hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5578dc4f671307398a9d0705dce7afae3c84c6e1
| 2,323
|
py
|
Python
|
blueprints/vgg_cifar.py
|
vdeuschle/rysia
|
c8c5adc2c770424b3a328a936f23a80a38c9f0f2
|
[
"Apache-2.0"
] | 2
|
2020-08-17T15:46:30.000Z
|
2020-08-20T19:20:57.000Z
|
blueprints/vgg_cifar.py
|
vdeuschle/rysia
|
c8c5adc2c770424b3a328a936f23a80a38c9f0f2
|
[
"Apache-2.0"
] | null | null | null |
blueprints/vgg_cifar.py
|
vdeuschle/rysia
|
c8c5adc2c770424b3a328a936f23a80a38c9f0f2
|
[
"Apache-2.0"
] | null | null | null |
from rysia.core import blueprint as bp
architecture = bp.model(
bp.InputLayer(shape=(32, 32, 3)),
bp.Convolution(channel=64, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=64, kernel_size=(3, 3), activation='relu'),
bp.Pooling(kernel_size=(2, 2)),
bp.Convolution(channel=128, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=128, kernel_size=(3, 3), activation='relu'),
bp.Pooling(kernel_size=(2, 2)),
bp.Convolution(channel=256, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=256, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=256, kernel_size=(3, 3), activation='relu'),
bp.Pooling(kernel_size=(2, 2)),
bp.Convolution(channel=512, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=512, kernel_size=(3, 3), activation='relu'),
bp.Convolution(channel=512, kernel_size=(3, 3), activation='relu'),
bp.Pooling(kernel_size=(2, 2)),
bp.Flatten(),
bp.FullyConnected(size=512, activation='relu'),
bp.FullyConnected(size=256, activation='relu'),
bp.FullyConnected(size=10, activation='linear')
)
learning_rate = 0.01
epochs = 100
batch_size = 128
loss_function = 'cross_entropy'
optimizer = 'adam'
optimizer_mode = 'default'
state_sizes = None
train_seq_length = None
test_seq_length = None
truncated_backprop_length = None
reshape_input_data = [None]
runs = 7
python_seed = 42
numpy_seed = 42
framework_seed = 42
monitors = []
mode = 'training'
frameworks = ['mxnet', 'tensorflow', 'pytorch']
aws = True
region_name = 'aws_region'
bucket_name = 'bucket_name'
instance_types = ['c4.2xlarge', 'c5.2xlarge', 'p2.xlarge', 'p3.2xlarge']
job_name = 'imdb'
ami_id = 'ami_id'
env_min_cpu = 0
env_desired_cpu = 0
env_max_cpu = 512
subnets = ['subnet1', 'subnet2', 'subnet3']
security_group_ids = ['secourity_group_id']
instance_role = 'instance_role_arn'
service_role = 'service_role_arn'
account_id = 'aws_account_id'
container_images = ['cpu_image', 'gpu_image']
job_def_names = ['cpu_job_definition', 'gpu_job_definition']
job_num_vcpu = None
job_memory_size = None
tear_down_comp_env = False
train_data_path = 'train_data_path'
train_label_path = 'train_label_path'
test_data_path = None
test_label_path = None
result_folder_path = 'result_folder_path'
model_params_path = None
| 32.71831
| 72
| 0.727077
|
3a18c3eb44bd9861ea3b6d21912ef619a2cf25d0
| 2,037
|
py
|
Python
|
scripts/pyqtgraph-develop/examples/GLVolumeItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/examples/GLVolumeItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/examples/GLVolumeItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Demonstrates GLVolumeItem for displaying volumetric data.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 200
w.show()
w.setWindowTitle('pyqtgraph example: GLVolumeItem')
#b = gl.GLBoxItem()
#w.addItem(b)
g = gl.GLGridItem()
g.scale(10, 10, 1)
w.addItem(g)
import numpy as np
## Hydrogen electron probability density
def psi(i, j, k, offset=(50,50,100)):
x = i-offset[0]
y = j-offset[1]
z = k-offset[2]
th = np.arctan2(z, (x**2+y**2)**0.5)
phi = np.arctan2(y, x)
r = (x**2 + y**2 + z **2)**0.5
a0 = 2
#ps = (1./81.) * (2./np.pi)**0.5 * (1./a0)**(3/2) * (6 - r/a0) * (r/a0) * np.exp(-r/(3*a0)) * np.cos(th)
ps = (1./81.) * 1./(6.*np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * np.exp(-r/(3*a0)) * (3 * np.cos(th)**2 - 1)
return ps
#return ((1./81.) * (1./np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * (r/a0) * np.exp(-r/(3*a0)) * np.sin(th) * np.cos(th) * np.exp(2 * 1j * phi))**2
data = np.fromfunction(psi, (100,100,200))
positive = np.log(np.clip(data, 0, data.max())**2)
negative = np.log(np.clip(-data, 0, -data.min())**2)
d2 = np.empty(data.shape + (4,), dtype=np.ubyte)
d2[..., 0] = positive * (255./positive.max())
d2[..., 1] = negative * (255./negative.max())
d2[..., 2] = d2[...,1]
d2[..., 3] = d2[..., 0]*0.3 + d2[..., 1]*0.3
d2[..., 3] = (d2[..., 3].astype(float) / 255.) **2 * 255
d2[:, 0, 0] = [255,0,0,100]
d2[0, :, 0] = [0,255,0,100]
d2[0, 0, :] = [0,0,255,100]
v = gl.GLVolumeItem(d2)
v.translate(-50,-50,-100)
w.addItem(v)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 29.1
| 152
| 0.542464
|
f2ee387253d25f8855ea851e5966c94fce6ed421
| 20,223
|
py
|
Python
|
inventories/views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
inventories/views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | 4
|
2022-03-29T20:52:31.000Z
|
2022-03-29T20:52:31.000Z
|
inventories/views.py
|
lueho/BRIT
|
1eae630c4da6f072aa4e2139bc406db4f4756391
|
[
"MIT"
] | null | null | null |
import io
import json
from bootstrap_modal_forms.generic import BSModalCreateView, BSModalUpdateView, BSModalDeleteView
from celery.result import AsyncResult
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, View
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.edit import ModelFormMixin
from rest_framework.views import APIView
from brit.views import DualUserListView, UserOwnsObjectMixin, NextOrSuccessUrlMixin, OwnedObjectModalUpdateView
from layer_manager.models import Layer
from maps.models import Catchment, GeoDataset
from maps.serializers import BaseResultMapSerializer
from materials.models import SampleSeries
from users.models import get_default_owner
from users.views import ModalLoginRequiredMixin
from .evaluations import ScenarioResult
from .forms import (
ScenarioModalModelForm,
ScenarioInventoryConfigurationAddForm,
ScenarioInventoryConfigurationUpdateForm,
SeasonalDistributionModelForm,
)
from .models import (
Scenario,
ScenarioInventoryConfiguration,
InventoryAlgorithm,
InventoryAlgorithmParameter,
InventoryAlgorithmParameterValue,
ScenarioStatus,
RunningTask
)
from .tasks import run_inventory
class SeasonalDistributionCreateView(LoginRequiredMixin, CreateView):
form_class = SeasonalDistributionModelForm
template_name = 'seasonal_distribution_create.html'
success_url = '/inventories/materials/{material_id}'
# ----------- Scenarios ------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# ----------- Scenarios CRUD -------------------------------------------------------------------------------------------
class ScenarioListView(DualUserListView):
model = Scenario
template_name = 'scenario_list.html'
# class ScenarioCreateView(LoginRequiredMixin, CreateView):
# model = Scenario
# form_class = ScenarioModelForm
# template_name = 'scenario_create.html'
# success_url = reverse_lazy('scenario_list')
#
# def form_valid(self, form):
# form.instance.owner = self.request.user
# return super().form_valid(form)
class ScenarioCreateView(LoginRequiredMixin, NextOrSuccessUrlMixin, BSModalCreateView):
form_class = ScenarioModalModelForm
template_name = 'modal_form.html'
success_url = reverse_lazy('scenario_list')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': 'Create new scenario',
'submit_button_text': 'Create'
})
return context
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class ScenarioDetailView(UserPassesTestMixin, DetailView):
"""Summary of the Scenario with complete configuration. Page for final review, which also contains the
'run' button."""
model = Scenario
template_name = 'scenario_detail.html'
object = None
config = None
allow_edit = False
region_url = reverse_lazy('ajax_region_geometries')
load_region = True
load_catchment = True
catchment_url = reverse_lazy('ajax_catchment_geometries')
load_features = False
adjust_bounds_to_features = False
marker_style = {
'color': '#4061d2',
'fillOpacity': 1,
'stroke': False
}
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.config = self.object.configuration_for_template()
context = self.get_context_data(object=self.object)
context['config'] = self.config
context['allow_edit'] = self.allow_edit
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'map_config': {
'region_url': self.region_url,
'catchment_url': self.catchment_url,
'load_features': self.load_features,
'adjust_bounds_to_features': self.adjust_bounds_to_features,
'region_id': self.object.region.id,
'load_catchment': self.load_catchment,
'catchment_id': self.get_catchment_id(),
'load_region': self.load_region,
'markerStyle': self.marker_style
}
})
return context
def get_catchment_id(self):
return self.object.catchment.id
def post(self, request, *args, **kwargs):
self.object = self.get_object()
scenario = self.object
scenario.set_status(ScenarioStatus.Status.RUNNING)
run_inventory(scenario.id)
return redirect('scenario-result', scenario.id)
def test_func(self):
self.object = self.get_object()
standard_owner = get_default_owner()
if self.object.owner == standard_owner:
if self.request.user == standard_owner:
self.allow_edit = True
return True
elif self.object.owner == self.request.user:
self.allow_edit = True
return True
else:
return False
class ScenarioUpdateView(ModalLoginRequiredMixin, UserOwnsObjectMixin, NextOrSuccessUrlMixin, BSModalUpdateView):
model = Scenario
form_class = ScenarioModalModelForm
template_name = 'modal_form.html'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'region_id': self.object.region.id
})
return kwargs
class ScenarioModalUpdateView(OwnedObjectModalUpdateView):
template_name = 'modal_form.html'
model = Scenario
form_class = ScenarioModalModelForm
permission_required = 'inventories.change_scenario'
class ScenarioDeleteView(LoginRequiredMixin, UserOwnsObjectMixin, NextOrSuccessUrlMixin, BSModalDeleteView):
model = Scenario
template_name = 'modal_delete.html'
success_message = 'Successfully deleted.'
success_url = reverse_lazy('scenario_list')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': 'Delete scenario',
'submit_button_text': 'Delete'
})
return context
def get_evaluation_status(request, task_id=None):
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result,
"task_info": task_result.info
}
return JsonResponse(result, status=200)
class ScenarioAddInventoryAlgorithmView(LoginRequiredMixin, UserPassesTestMixin,
TemplateResponseMixin, ModelFormMixin, View):
model = ScenarioInventoryConfiguration
form_class = ScenarioInventoryConfigurationAddForm
template_name = 'scenario_configuration_add.html'
object = None
def test_func(self):
scenario = Scenario.objects.get(id=self.kwargs.get('pk'))
return self.request.user == scenario.owner
@staticmethod
def post(request, *args, **kwargs):
scenario_id = request.POST.get('scenario')
scenario = Scenario.objects.get(id=scenario_id)
feedstock = SampleSeries.objects.get(id=request.POST.get('feedstock'))
algorithm_id = request.POST.get('inventory_algorithm')
algorithm = InventoryAlgorithm.objects.get(id=algorithm_id)
parameters = algorithm.inventoryalgorithmparameter_set.all()
values = {}
for parameter in parameters:
values[parameter] = []
parameter_id = 'parameter_' + str(parameter.pk)
if parameter_id in request.POST:
value_id = request.POST.get(parameter_id)
values[parameter].append(InventoryAlgorithmParameterValue.objects.get(id=value_id))
scenario.add_inventory_algorithm(feedstock, algorithm, values)
return redirect('scenario_detail', pk=scenario_id)
def get_object(self, **kwargs):
return Scenario.objects.get(pk=self.kwargs.get('pk'))
def get_initial(self):
return {
'feedstocks': self.object.available_feedstocks(),
'scenario': self.object
}
def get_context_data(self, **kwargs):
context = {'scenario': self.object,
'form': self.get_form()}
return super().get_context_data(**context)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class ScenarioAlgorithmConfigurationUpdateView(LoginRequiredMixin, UserPassesTestMixin,
TemplateResponseMixin, ModelFormMixin, View):
model = ScenarioInventoryConfiguration
form_class = ScenarioInventoryConfigurationUpdateForm
template_name = 'scenario_configuration_update.html'
object = None
def test_func(self):
scenario = Scenario.objects.get(id=self.kwargs.get('scenario_pk'))
return self.request.user == scenario.owner
@staticmethod
def post(request, *args, **kwargs):
scenario = Scenario.objects.get(id=request.POST.get('scenario'))
current_algorithm = InventoryAlgorithm.objects.get(id=request.POST.get('current_algorithm'))
feedstock = SampleSeries.objects.get(id=request.POST.get('feedstock'))
scenario.remove_inventory_algorithm(current_algorithm, feedstock)
new_algorithm = InventoryAlgorithm.objects.get(id=request.POST.get('inventory_algorithm'))
parameters = new_algorithm.inventoryalgorithmparameter_set.all()
values = {}
for parameter in parameters:
values[parameter] = []
parameter_id = 'parameter_' + str(parameter.pk)
if parameter_id in request.POST:
value_id = request.POST.get(parameter_id)
values[parameter].append(InventoryAlgorithmParameterValue.objects.get(id=value_id))
scenario.add_inventory_algorithm(feedstock, new_algorithm, values)
return redirect('scenario_detail', pk=request.POST.get('scenario'))
def get_object(self, **kwargs):
return Scenario.objects.get(pk=self.kwargs.get('scenario_pk'))
def get_initial(self):
scenario = Scenario.objects.get(id=self.kwargs.get('scenario_pk'))
algorithm = InventoryAlgorithm.objects.get(id=self.kwargs.get('algorithm_pk'))
config = scenario.inventory_algorithm_config(algorithm)
return config
def get_context_data(self, **kwargs):
context = self.get_initial()
context['form'] = self.get_form()
return super().get_context_data(**context)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class ScenarioRemoveInventoryAlgorithmView(LoginRequiredMixin, UserPassesTestMixin, View):
scenario = None
algorithm = None
feedstock = None
def test_func(self):
self.scenario = Scenario.objects.get(id=self.kwargs.get('scenario_pk'))
return self.scenario.owner == self.request.user
def get(self, request, *args, **kwargs):
self.scenario = Scenario.objects.get(id=self.kwargs.get('scenario_pk'))
self.algorithm = InventoryAlgorithm.objects.get(id=self.kwargs.get('algorithm_pk'))
self.feedstock = SampleSeries.objects.get(id=self.kwargs.get('feedstock_pk'))
self.scenario.remove_inventory_algorithm(algorithm=self.algorithm, feedstock=self.feedstock)
return redirect('scenario_detail', pk=self.scenario.id)
def download_scenario_summary(request, scenario_pk):
file_name = f'scenario_{scenario_pk}_summary.json'
scenario = Scenario.objects.get(id=scenario_pk)
with io.StringIO(json.dumps(scenario.summary_dict(), indent=4)) as file:
response = HttpResponse(file, content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=%s' % file_name
return response
def load_catchment_options(request):
region_id = request.GET.get('region_id') or request.GET.get('region')
if region_id:
return render(request, 'catchment_dropdown_list_options.html', {'catchments':Catchment.objects.filter(parent_region_id=region_id)})
else:
return render(request, 'catchment_dropdown_list_options.html', {'catchments': Catchment.objects.none()})
def load_geodataset_options(request):
scenario = Scenario.objects.get(id=request.GET.get('scenario'))
if request.GET.get('feedstock'):
feedstock = SampleSeries.objects.get(id=request.GET.get('feedstock'))
if request.GET.get('options') == 'create':
geodatasets = scenario.remaining_geodataset_options(feedstock=feedstock.material)
elif request.GET.get('options') == 'update':
current = GeoDataset.objects.filter(id=request.GET.get('current_geodataset'))
geodatasets = scenario.remaining_geodataset_options(feedstock=feedstock.material).union(current)
else:
geodatasets = scenario.available_geodatasets()
else:
geodatasets = GeoDataset.objects.none()
return render(request, 'geodataset_dropdown_list_options.html', {'geodatasets': geodatasets})
def load_algorithm_options(request):
scenario = Scenario.objects.get(id=request.GET.get('scenario'))
if request.GET.get('feedstock') and request.GET.get('geodataset'):
feedstock = SampleSeries.objects.get(id=request.GET.get('feedstock'))
geodataset = GeoDataset.objects.get(id=request.GET.get('geodataset'))
if request.GET.get('options') == 'create':
algorithms = scenario.remaining_inventory_algorithm_options(feedstock, geodataset)
elif request.GET.get('options') == 'update':
current_algorithm = InventoryAlgorithm.objects.filter(id=request.GET.get('current_inventory_algorithm'),
feedstock=feedstock.material, geodataset=geodataset)
algorithms = scenario.remaining_inventory_algorithm_options(feedstock, geodataset).union(current_algorithm)
else:
algorithms = scenario.available_inventory_algorithms()
else:
algorithms = InventoryAlgorithm.objects.none()
return render(request, 'algorithm_dropdown_list_options.html', {'algorithms': algorithms})
def load_parameter_options(request):
if request.GET.get('inventory_algorithm'):
algorithm = InventoryAlgorithm.objects.get(id=request.GET.get('inventory_algorithm'))
parameters = InventoryAlgorithmParameter.objects.filter(inventory_algorithm=algorithm)
context = {
'parameters': {
parameter: InventoryAlgorithmParameterValue.objects.filter(parameter=parameter) for parameter in
parameters}}
return render(request, 'parameters_dropdown_list_options.html', context)
else:
return HttpResponse("")
class ResultMapAPI(APIView):
"""Rest API to get features from automatically generated result tables. Endpoint for Leaflet maps"""
@staticmethod
def get(request, *args, **kwargs):
layer = Layer.objects.get(table_name=kwargs['layer_name'])
feature_collection = layer.get_feature_collection()
features = feature_collection.objects.all()
serializer_class = BaseResultMapSerializer
serializer_class.Meta.model = feature_collection
serializer = serializer_class(features, many=True)
data = {
'catchment_id': layer.scenario.catchment_id,
'region_id': layer.scenario.region_id,
'geoJson': serializer.data,
}
return JsonResponse(data, safe=False)
class ScenarioResultView(DetailView):
"""
View with summaries of the results of each algorithm and a total summary.
"""
template_name = 'scenario_result_detail.html'
model = Scenario
context_object_name = 'scenario'
object = None
allow_edit = False
region_url = reverse_lazy('ajax_region_geometries')
load_region = True
catchment_url = reverse_lazy('ajax_catchment_geometries')
load_catchment = True
load_features = False
adjust_bounds_to_features = False
marker_style = {
'color': '#4061d2',
'fillOpacity': 1,
'stroke': False
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
scenario = self.object
result = ScenarioResult(scenario)
context['layers'] = [layer.as_dict() for layer in result.layers]
context['charts'] = result.get_charts()
context['allow_edit'] = self.allow_edit
context['map_config'] = {
'region_url': self.region_url,
'catchment_url': self.catchment_url,
'catchment_id': self.get_catchment_id(),
'load_catchment': self.load_catchment,
'load_features': self.load_features,
'adjust_bounds_to_features': self.adjust_bounds_to_features,
'region_id': self.object.region.id,
'load_region': self.load_region,
'markerStyle': self.marker_style
}
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
scenario = self.object
if scenario.status == 2:
context = {
'scenario': scenario,
'task_list': {'tasks': []}
}
for task in RunningTask.objects.filter(scenario=scenario):
context['task_list']['tasks'].append({
'task_id': task.uuid,
'algorithm_name': task.algorithm.name
})
return render(request, 'evaluation_progress.html', context)
else:
context = self.get_context_data()
return self.render_to_response(context)
def get_catchment_id(self):
return self.object.catchment.id
def test_func(self):
self.object = self.get_object()
standard_owner = get_default_owner()
if self.object.owner == standard_owner:
if self.request.user == standard_owner:
self.allow_edit = True
return True
elif self.object.owner == self.request.user:
self.allow_edit = True
return True
else:
return False
class ScenarioEvaluationProgressView(DetailView):
"""
The page users land on if a scenario is being calculated. The progress of the evaluation is shown and upon
finishing the calculation, the user is redirected to the result page.
"""
template_name = 'evaluation_progress.html'
model = Scenario
class ScenarioResultDetailMapView(DetailView):
"""View of an individual result map in large size"""
model = Layer
context_object_name = 'layer'
template_name = 'result_detail_map.html'
def get_object(self, **kwargs):
scenario = Scenario.objects.get(id=self.kwargs.get('pk'))
algorithm = InventoryAlgorithm.objects.get(id=self.kwargs.get('algorithm_pk'))
feedstock = SampleSeries.objects.get(id=self.kwargs.get('feedstock_pk'))
return Layer.objects.get(scenario=scenario, algorithm=algorithm, feedstock=feedstock)
def download_scenario_result_summary(request, scenario_pk):
scenario = Scenario.objects.get(id=scenario_pk)
result = ScenarioResult(scenario)
with io.StringIO(json.dumps(result.summary_dict(), indent=4)) as file:
response = HttpResponse(file, content_type='application/json')
response['Content-Disposition'] = f'attachment; filename=scenario_{scenario_pk}_result_summary.json'
return response
| 39.575342
| 139
| 0.673985
|
d1acdb1eaacc0eb45792fa18a2baa078cb4224ce
| 151
|
py
|
Python
|
Exercicios/Mundo1/ex003.py
|
mpaullos/cursoemvideo-python
|
80732626b6b5471ec7fea6dc01d83931e5cfd8fb
|
[
"MIT"
] | null | null | null |
Exercicios/Mundo1/ex003.py
|
mpaullos/cursoemvideo-python
|
80732626b6b5471ec7fea6dc01d83931e5cfd8fb
|
[
"MIT"
] | null | null | null |
Exercicios/Mundo1/ex003.py
|
mpaullos/cursoemvideo-python
|
80732626b6b5471ec7fea6dc01d83931e5cfd8fb
|
[
"MIT"
] | null | null | null |
n1 = int (input ('Digite um valor: '))
n2 = int (input ('Digite outro valor: '))
soma = n1+n2
print('A soma entre {} e {} vale {}!'.format(n1,n2,soma))
| 37.75
| 57
| 0.602649
|
2a8e3513acfbfe8c954078bc9d7b57c695f1d818
| 4,868
|
py
|
Python
|
ngraph/frontends/onnx/tests/test_ops_nonlinear.py
|
NervanaSystems/ngraph-python
|
ac032c83c7152b615a9ad129d54d350f9d6a2986
|
[
"Apache-2.0"
] | 18
|
2018-03-19T04:16:49.000Z
|
2021-02-08T14:44:58.000Z
|
ngraph/frontends/onnx/tests/test_ops_nonlinear.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 2
|
2019-04-16T06:41:49.000Z
|
2019-05-06T14:08:13.000Z
|
ngraph/frontends/onnx/tests/test_ops_nonlinear.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 11
|
2018-06-16T15:59:08.000Z
|
2021-03-06T00:45:30.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, division
import numpy as np
import onnx
import pytest
from ngraph.frontends.onnx.tests.utils import convert_and_calculate
def import_and_compute(op_type, input_data, **node_attrs):
data_inputs = [np.array(input_data)]
node = onnx.helper.make_node(op_type, inputs=['x'], outputs=['y'], **node_attrs)
return convert_and_calculate(node, data_inputs, data_inputs).pop()
def assert_onnx_import_equals_callable(onnx_op_type, python_function, data, **kwargs):
data = np.array(data, dtype=np.float32)
assert np.allclose(import_and_compute(onnx_op_type, data, **kwargs),
python_function(data, **kwargs))
def test_sigmoid():
def sigmoid(x):
return 1 / (1 + np.exp(-x))
assert_onnx_import_equals_callable('Sigmoid', sigmoid, [-2, -1., 0., 1., 2.])
assert_onnx_import_equals_callable('Sigmoid', sigmoid, [0.])
assert_onnx_import_equals_callable('Sigmoid', sigmoid, [-2, -1., 0., 1., 2.])
def test_tanh():
assert_onnx_import_equals_callable('Tanh', np.tanh, [-2, -1., 0., 1., 2.])
assert_onnx_import_equals_callable('Tanh', np.tanh, [0.])
assert_onnx_import_equals_callable('Tanh', np.tanh, [-2, -1., 0., 1., 2.])
def test_relu():
def relu(x):
return np.maximum(x, 0)
assert_onnx_import_equals_callable('Relu', relu, [-2, -1., 0., 1., 2.])
assert_onnx_import_equals_callable('Relu', relu, [0.])
assert_onnx_import_equals_callable('Relu', relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])
assert_onnx_import_equals_callable('Relu', relu, [[1, 2, 3], [4, 5, 6]])
assert_onnx_import_equals_callable('Relu', relu, [[-3, -2, -1], [1, 2, 3]])
def test_leaky_relu():
def leaky_relu(x, alpha=0.01):
return np.maximum(alpha * x, x)
assert_onnx_import_equals_callable('LeakyRelu', leaky_relu, [-2, -1., 0., 1., 2.], alpha=0.5)
assert_onnx_import_equals_callable('LeakyRelu', leaky_relu, [0.])
assert_onnx_import_equals_callable('LeakyRelu', leaky_relu,
[-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], alpha=1)
assert_onnx_import_equals_callable('LeakyRelu', leaky_relu, [[1, 2, 3], [4, 5, 6]], alpha=0.2)
assert_onnx_import_equals_callable('LeakyRelu', leaky_relu, [[-3, -2, -1], [1, 2, 3]])
@pytest.mark.parametrize('x,slope', [
([-2, -1., 0., 1., 2.], 0.5),
([0.], 1),
([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1),
([[1, 2, 3], [4, 5, 6]], 0.5),
([[-3, -2, -1], [1, 2, 3]], 1),
])
def test_parametric_relu(x, slope):
def parametic_relu(x, slope):
return np.where(x < 0, slope * x, x)
x, slope = np.array(x), np.array(slope)
expected_output = parametic_relu(x, slope)
node = onnx.helper.make_node('PRelu', inputs=['x', 'slope'], outputs=['y'])
output = convert_and_calculate(node, [x, slope], [expected_output]).pop()
assert np.allclose(output, expected_output)
def test_selu():
# f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, y = gamma * x for x > 0
def selu(x, alpha=1.6732, gamma=1.0507):
return np.where(x <= 0, gamma * (alpha * np.exp(x) - alpha), gamma * x)
assert_onnx_import_equals_callable('Selu', selu, [-2, -1., 0., 1., 2.])
assert_onnx_import_equals_callable('Selu', selu, [0.])
assert_onnx_import_equals_callable('Selu', selu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])
assert_onnx_import_equals_callable('Selu', selu, [[1, 2, 3], [4, 5, 6]])
assert_onnx_import_equals_callable('Selu', selu, [-2, -1., 0., 1., 2.], gamma=0.5, alpha=0.5)
def test_elu():
# f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0
def elu(x, alpha=1):
return np.where(x < 0, alpha * (np.exp(x) - 1), x)
assert_onnx_import_equals_callable('Elu', elu, [-2, -1., 0., 1., 2.])
assert_onnx_import_equals_callable('Elu', elu, [0.])
assert_onnx_import_equals_callable('Elu', elu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])
assert_onnx_import_equals_callable('Elu', elu, [[1, 2, 3], [4, 5, 6]])
assert_onnx_import_equals_callable('Elu', elu, [-2, -1., 0., 1., 2.], alpha=0.5)
| 42.330435
| 98
| 0.615859
|
b1e2167bca2d6040007b1b06c4c4ba5d6314b44a
| 64,763
|
py
|
Python
|
ivi/tektronix/tektronixBaseScope.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 161
|
2015-01-23T17:43:01.000Z
|
2022-03-29T14:42:42.000Z
|
ivi/tektronix/tektronixBaseScope.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 45
|
2015-01-15T13:35:04.000Z
|
2021-06-03T01:58:55.000Z
|
ivi/tektronix/tektronixBaseScope.py
|
sacherjj/python-ivi
|
6dd1ba93d65dc30a652a3a1b34c66921d94315e8
|
[
"MIT"
] | 87
|
2015-01-31T10:55:23.000Z
|
2022-03-17T08:18:47.000Z
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import array
import sys
import time
from .. import ivi
from .. import scope
from .. import scpi
from .. import extra
AcquisitionTypeMapping = {
'normal': 'sample',
'peak_detect': 'peakdetect',
'high_resolution': 'hires',
'average': 'average',
'envelope': 'envelope'}
VerticalCoupling = set(['ac', 'dc'])
TriggerTypeMapping = {
'edge': 'edge',
'runt': 'pulse',
'width': 'pulse',
'glitch': 'pulse',
'tv': 'video',
#'immediate': '',
'ac_line': 'edge',
'logic': 'logic',
'bus': 'bus'}
TriggerCouplingMapping = {
'ac': 'ac',
'dc': 'dc',
'hf_reject': 'hfrej',
'lf_reject': 'lfrej',
'noise_reject': 'noiserej'}
TVTriggerEventMapping = {'field1': 'odd',
'field2': 'even',
'any_field': 'allfields',
'any_line': 'alllines',
'line_number': 'numeric'}
TVTriggerFormatMapping = {'ntsc': 'ntsc',
'pal': 'pal',
'secam': 'secam',
'bilevelcustom': 'bilevelcustom',
'trilevelcustom': 'trilevelcustom',
'hd480p60' : 'hd480p60',
'hd576p50' : 'hd576p50',
'hd720p30' : 'hd720p30',
'hd720p50' : 'hd720p50',
'hd720p60' : 'hd720p60',
'hd875i60' : 'hd875i60',
'hd1080p24' : 'hd1080p24',
'hd1080sf24' : 'hd1080sf24',
'hd1080i50' : 'hd1080i50',
'hd1080i60' : 'hd1080i60',
'hd1080p25' : 'hd1080p25',
'hd1080p30' : 'hd1080p30',
'hd1080p50' : 'hd1080p50',
'hd1080p60' : 'hd1080p60'}
PolarityMapping = {'positive': 'positive',
'negative': 'negative'}
PolarityMapping3 = {'positive': 'positive',
'negative': 'negative',
'either': 'either'}
GlitchConditionMapping = {'less_than': 'lessthan',
'greater_than': 'morethan',
'equal': 'equal',
'unequal': 'unequal'}
WidthConditionMapping = {'within': 'within', 'outside': 'outside'}
SampleModeMapping = {'real_time': 'rtim',
'equivalent_time': 'etim',
'segmented': 'segm'}
SlopeMapping = {
'positive': 'rise',
'negative': 'fall',
'either': 'either'}
MeasurementFunctionMapping = {
'rise_time': 'rise',
'fall_time': 'fall',
'frequency': 'frequency',
'period': 'period',
'voltage_rms': 'rms',
'voltage_peak_to_peak': 'pk2pk',
'voltage_max': 'maximum',
'voltage_min': 'minimum',
'voltage_high': 'high',
'voltage_low': 'low',
'voltage_average': 'mean',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_negative': 'nduty',
'duty_cycle_positive': 'pduty',
'amplitude': 'amplitude',
'voltage_cycle_rms': 'crms',
'voltage_cycle_average': 'cmean',
'overshoot': 'tovershoot',
'area': 'area',
'burst': 'burst',
'cycle_area': 'carea',
'overshoot_negative': 'novershoot',
'overshoot_positive': 'povershoot',
'edgecount_negative': 'nedgecount',
'edgecount_positive': 'pedgecount',
'pulsecount_negative': 'npulsecount',
'pulsecount_positive': 'ppulsecount',
'histogram_hits': 'hits',
'histogram_peak_hits': 'peakhits',
'histogram_median': 'median',
'histogram_sigma1': 'sigma1',
'histogram_sigma2': 'sigma2',
'histogram_sigma3': 'sigma3',
'histogram_stdev': 'stdev',
'histogram_waveforms': 'waveforms',
'phase': 'phase',
'delay': 'delay'}
MeasurementFunctionMappingDigital = {
'rise_time': 'risetime',
'fall_time': 'falltime',
'frequency': 'frequency',
'period': 'period',
'width_negative': 'nwidth',
'width_positive': 'pwidth',
'duty_cycle_positive': 'dutycycle'}
ScreenshotImageFormatMapping = {
'tif': 'tiff',
'tiff': 'tiff',
'bmp': 'bmp',
'bmp24': 'bmp',
'png': 'png',
'png24': 'png'}
TimebaseModeMapping = {
'main': 'main',
'window': 'window',
'xy': 'xy'}
TriggerModifierMapping = {'none': 'normal', 'auto': 'auto'}
class tektronixBaseScope(scpi.common.IdnCommand, scpi.common.Reset, scpi.common.Memory,
scpi.common.SystemSetup,
scope.Base, scope.TVTrigger, scope.RuntTrigger,
scope.GlitchTrigger, scope.WidthTrigger, scope.AcLineTrigger,
scope.WaveformMeasurement, scope.MinMaxWaveform,
scope.ContinuousAcquisition, scope.AverageAcquisition,
scope.TriggerModifier, scope.AutoSetup,
extra.common.Screenshot,
ivi.Driver):
"Tektronix generic IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._channel_label = list()
self._channel_probe_skew = list()
self._channel_scale = list()
self._channel_trigger_level = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_bw_limit = list()
super(tektronixBaseScope, self).__init__(*args, **kwargs)
self._memory_size = 10
self._memory_offset = 1
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 10
self._acquisition_segmented_count = 2
self._acquisition_segmented_index = 1
self._timebase_mode = 'main'
self._timebase_reference = 'center'
self._timebase_position = 0.0
self._timebase_range = 1e-3
self._timebase_scale = 100e-6
self._timebase_window_position = 0.0
self._timebase_window_range = 5e-6
self._timebase_window_scale = 500e-9
self._display_screenshot_image_format_mapping = ScreenshotImageFormatMapping
self._display_vectors = True
self._display_labels = True
self._identity_description = "Tektronix generic IVI oscilloscope driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Tektronix"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['DPO4032', 'DPO4034', 'DPO4054',
'DPO4104', 'DPO4014B', 'DPO4034B', 'DPO4054B', 'DPO4102B', 'DPO4104B',
'MSO4032', 'MSO4034', 'MSO4054', 'MSO4104', 'MSO4014B', 'MSO4034B',
'MSO4054B', 'MSO4102B', 'MSO4104B', 'MDO4054', 'MDO4104', 'MDO4014B',
'MDO4034B', 'MDO4054B', 'MDO4104B', 'MDO3012', 'MDO3014', 'MDO3022',
'MDO3024', 'MDO3032', 'MDO3034', 'MDO3052', 'MDO3054', 'MDO3102',
'MDO3104']
self._add_property('channels[].invert',
self._get_channel_invert,
self._set_channel_invert,
None,
ivi.Doc("""
Selects whether or not to invert the channel.
"""))
self._add_property('channels[].label',
self._get_channel_label,
self._set_channel_label,
None,
ivi.Doc("""
Sets the channel label. Setting a channel label also adds the label to
the nonvolatile label list.
"""))
self._add_property('channels[].probe_id',
self._get_channel_probe_id,
None,
None,
ivi.Doc("""
Returns the type of probe attached to the channel.
"""))
self._add_property('channels[].probe_skew',
self._get_channel_probe_skew,
self._set_channel_probe_skew,
None,
ivi.Doc("""
Specifies the channel-to-channel skew factor for the channel. Each analog
channel can be adjusted + or - 100 ns for a total of 200 ns difference
between channels. This can be used to compensate for differences in cable
delay. Units are seconds.
"""))
self._add_property('channels[].scale',
self._get_channel_scale,
self._set_channel_scale,
None,
ivi.Doc("""
Specifies the vertical scale, or units per division, of the channel. Units
are volts.
"""))
self._add_property('channels[].trigger_level',
self._get_channel_trigger_level,
self._set_channel_trigger_level,
None,
ivi.Doc("""
Specifies the trigger level of the channel. Units are volts.
"""))
self._add_property('timebase.mode',
self._get_timebase_mode,
self._set_timebase_mode,
None,
ivi.Doc("""
Sets the current time base. There are four time base modes:
* 'main': normal timebase
* 'window': zoomed or delayed timebase
* 'xy': channels are plotted against each other, no timebase
* 'roll': data moves continuously from left to right
"""))
self._add_property('timebase.position',
self._get_timebase_position,
self._set_timebase_position,
None,
ivi.Doc("""
Sets the time interval between the trigger event and the display reference
point on the screen. The maximum position value depends on the time/division
settings.
"""))
self._add_property('timebase.range',
self._get_timebase_range,
self._set_timebase_range,
None,
ivi.Doc("""
Sets the full-scale horizontal time in seconds for the main window. The
range is 10 times the current time-per-division setting.
"""))
self._add_property('timebase.scale',
self._get_timebase_scale,
self._set_timebase_scale,
None,
ivi.Doc("""
Sets the horizontal scale or units per division for the main window.
"""))
self._add_property('timebase.window.position',
self._get_timebase_window_position,
self._set_timebase_window_position,
None,
ivi.Doc("""
Sets the horizontal position in the zoomed (delayed) view of the main
sweep. The main sweep range and the main sweep horizontal position
determine the range for this command. The value for this command must
keep the zoomed view window within the main sweep range.
"""))
self._add_property('timebase.window.range',
self._get_timebase_window_range,
self._set_timebase_window_range,
None,
ivi.Doc("""
Sets the fullscale horizontal time in seconds for the zoomed (delayed)
window. The range is 10 times the current zoomed view window seconds per
division setting. The main sweep range determines the range for this
command. The maximum value is one half of the timebase.range value.
"""))
self._add_property('timebase.window.scale',
self._get_timebase_window_scale,
self._set_timebase_window_scale,
None,
ivi.Doc("""
Sets the zoomed (delayed) window horizontal scale (seconds/division). The
main sweep scale determines the range for this command. The maximum value
is one half of the timebase.scale value.
"""))
self._add_property('display.vectors',
self._get_display_vectors,
self._set_display_vectors,
None,
ivi.Doc("""
When enabled, draws a line between consecutive waveform data points.
"""))
self._add_method('display.clear',
self._display_clear,
ivi.Doc("""
Clears the display and resets all associated measurements. If the
oscilloscope is stopped, all currently displayed data is erased. If the
oscilloscope is running, all the data in active channels and functions is
erased; however, new data is displayed on the next acquisition.
"""))
self._add_method('system.display_string',
self._system_display_string,
ivi.Doc("""
Writes a string to the advisory line on the instrument display. Send None
or an empty string to clear the advisory line.
"""))
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
self._channel_count = self._analog_channel_count + self._digital_channel_count
super(tektronixBaseScope, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(tektronixBaseScope, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_label = list()
self._channel_probe_skew = list()
self._channel_invert = list()
self._channel_probe_id = list()
self._channel_scale = list()
self._channel_trigger_level = list()
self._channel_bw_limit = list()
self._analog_channel_name = list()
for i in range(self._analog_channel_count):
self._channel_name.append("ch%d" % (i+1))
self._channel_label.append("")
self._analog_channel_name.append("ch%d" % (i+1))
self._channel_probe_skew.append(0)
self._channel_scale.append(1.0)
self._channel_trigger_level.append(0.0)
self._channel_invert.append(False)
self._channel_probe_id.append("NONE")
self._channel_bw_limit.append(False)
# digital channels
self._digital_channel_name = list()
if (self._digital_channel_count > 0):
for i in range(self._digital_channel_count):
self._channel_name.append("d%d" % i)
self._channel_label.append("")
self._digital_channel_name.append("d%d" % i)
for i in range(self._analog_channel_count, self._channel_count):
self._channel_input_impedance[i] = 100000
self._channel_input_frequency_max[i] = 1e9
self._channel_probe_attenuation[i] = 1
self._channel_coupling[i] = 'dc'
self._channel_offset[i] = 0
self._channel_range[i] = 1
self._channel_count = self._analog_channel_count + self._digital_channel_count
self.channels._set_list(self._channel_name)
self._channel_name_dict = ivi.get_index_dict(self._channel_name)
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
esr = self._ask("*esr?")
error_code, error_message = self._ask("evmsg?").split(',')
error_code = int(error_code)
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
self._write("diag:select all")
self._write("diag:loop:option once")
self._write("diag:state execute")
# wait for test to complete
res = ''
while 1:
res = self._ask("diag:result:flag?").strip('"').lower()
if res != 'in progress':
break
time.sleep(5)
code = 0 if res == 'pass' else 1
if code != 0:
message = "Self test failed"
return (code, message)
def _system_display_string(self, string = None):
if string is None:
string = ""
if not self._driver_operation_simulate:
self._write(":message:show \"%s\"" % string)
self._write(":message:state 1")
def _display_fetch_screenshot(self, format='png', invert=False):
if self._driver_operation_simulate:
return b''
if format not in self._display_screenshot_image_format_mapping:
raise ivi.ValueNotSupportedException()
format = self._display_screenshot_image_format_mapping[format]
self._write(":hardcopy:inksaver %d" % int(bool(invert)))
self._write(":save:image:fileformat %s" % format)
self._write(":hardcopy start")
return self._read_raw()
def _get_timebase_mode(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
if int(self._ask(":zoom:state?")):
self._timebase_mode = "window"
elif self._ask(":display:xy?") == "TRIGGERED":
self._timebase_mode = "xy"
else:
self._timebase_mode = "main"
self._set_cache_valid()
return self._timebase_mode
def _set_timebase_mode(self, value):
if value not in TimebaseModeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'window':
self._write(":display:xy off")
self._write(":zoom:state 1")
elif value == 'xy':
self._write(":zoom:state 0")
self._write(":display:xy triggered")
else:
self._write(":zoom:state 0")
self._write(":display:xy off")
self._timebase_mode = value
self._set_cache_valid()
def _get_timebase_position(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_position = float(self._ask(":horizontal:delay:time?"))
self._set_cache_valid()
return self._timebase_position
def _set_timebase_position(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":horizontal:delay:mode 1")
self._write(":horizontal:delay:time %e" % value)
self._timebase_position = value
self._set_cache_valid()
self._set_cache_valid(False, 'acquisition_start_time')
self._set_cache_valid(False, 'timebase_window_position')
def _get_timebase_range(self):
return self._get_timebase_scale() * self._horizontal_divisions
def _set_timebase_range(self, value):
value = float(value)
self._set_timebase_scale(value / self._horizontal_divisions)
def _get_timebase_scale(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_scale = float(self._ask(":horizontal:scale?"))
self._timebase_range = self._timebase_scale * self._horizontal_divisions
self._set_cache_valid()
return self._timebase_scale
def _set_timebase_scale(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":horizontal:scale %e" % value)
self._timebase_scale = value
self._timebase_range = value * self._horizontal_divisions
self._set_cache_valid()
self._set_cache_valid(False, 'timebase_window_range')
def _get_timebase_window_position(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_window_position = (float(self._ask(":zoom:zoom1:position?"))-50) / 100 * self._get_timebase_range() + self._get_timebase_position()
self._set_cache_valid()
return self._timebase_window_position
def _set_timebase_window_position(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":zoom:zoom1:position %e" % (((value - self._get_timebase_position()) / self._get_timebase_range() * 100) + 50))
self._timebase_window_position = value
self._set_cache_valid()
def _get_timebase_window_range(self):
return self._get_timebase_window_scale() * self._horizontal_divisions
def _set_timebase_window_range(self, value):
value = float(value)
self._set_timebase_window_scale(value / self._horizontal_divisions)
def _get_timebase_window_scale(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._timebase_window_scale = float(self._ask(":zoom:zoom1:scale?"))
self._timebase_window_range = self._timebase_window_scale * self._horizontal_divisions
self._set_cache_valid()
return self._timebase_window_scale
def _set_timebase_window_scale(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":zoom:zoom1:scale %e" % value)
self._timebase_window_scale = value
self._timebase_window_range = value * self._horizontal_divisions
self._set_cache_valid()
def _get_display_vectors(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._display_vectors = not bool(int(self._ask(":display:style:dotsonly?")))
self._set_cache_valid()
return self._display_vectors
def _set_display_vectors(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write(":display:style:dotsonly %d" % int(not value))
self._display_vectors = value
self._set_cache_valid()
def _display_clear(self):
if not self._driver_operation_simulate:
self._write(":message:state 0")
self._write(":display:persistence clear")
def _get_acquisition_start_time(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_start_time = float(self._ask(":horizontal:delay:time?")) - self._get_acquisition_time_per_record() / 2
self._set_cache_valid()
return self._acquisition_start_time
def _set_acquisition_start_time(self, value):
value = float(value)
value = value + self._get_acquisition_time_per_record() / 2
if not self._driver_operation_simulate:
self._write(":horizontal:delay:time %e" % value)
self._acquisition_start_time = value
self._set_cache_valid()
def _get_acquisition_type(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":acquire:mode?").lower()
self._acquisition_type = [k for k,v in AcquisitionTypeMapping.items() if v==value][0]
self._set_cache_valid()
return self._acquisition_type
def _set_acquisition_type(self, value):
if value not in AcquisitionTypeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":acquire:mode %s" % AcquisitionTypeMapping[value])
self._acquisition_type = value
self._set_cache_valid()
def _get_acquisition_number_of_points_minimum(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_number_of_points_minimum = int(self._ask(":horizontal:recordlength?"))
self._set_cache_valid()
return self._acquisition_number_of_points_minimum
def _set_acquisition_number_of_points_minimum(self, value):
value = int(value)
# coerce value?
if not self._driver_operation_simulate:
self._write(":horizontal:recordlength %d" % value)
self._acquisition_number_of_points_minimum = value
self._set_cache_valid()
def _get_acquisition_record_length(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_record_length = int(self._ask(":horizontal:recordlength?"))
self._set_cache_valid()
return self._acquisition_record_length
def _get_acquisition_time_per_record(self):
return self._get_timebase_range()
def _set_acquisition_time_per_record(self, value):
self._set_timebase_range(value)
def _get_channel_label(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_label[index] = self._ask(":%s:label?" % self._channel_name[index]).strip('"')
self._set_cache_valid(index=index)
return self._channel_label[index]
def _set_channel_label(self, index, value):
value = str(value)
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate:
self._write(":%s:label \"%s\"" % (self._channel_name[index], value))
self._channel_label[index] = value
self._set_cache_valid(index=index)
def _get_channel_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_enabled[index] = bool(int(self._ask(":select:%s?" % self._channel_name[index])))
self._set_cache_valid(index=index)
return self._channel_enabled[index]
def _set_channel_enabled(self, index, value):
value = bool(value)
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate:
self._write(":select:%s %d" % (self._channel_name[index], int(value)))
self._channel_enabled[index] = value
self._set_cache_valid(index=index)
def _get_channel_input_impedance(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_input_impedance[index] = float(self._ask(":%s:termination?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_input_impedance[index]
def _set_channel_input_impedance(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if value != 50 and value != 1000000:
raise Exception('Invalid impedance selection')
if not self._driver_operation_simulate:
self._write(":%s:termination %f" % (self._channel_name[index], value))
self._channel_input_impedance[index] = value
self._set_cache_valid(index=index)
def _get_channel_input_frequency_max(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_input_frequency_max[index] = float(self._ask(":%s:bandwidth?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_input_frequency_max[index]
def _set_channel_input_frequency_max(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate:
self._write(":%s:bandwidth %e" % (self._channel_name[index], value))
self._channel_input_frequency_max[index] = value
self._set_cache_valid(index=index)
def _get_channel_probe_attenuation(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_probe_attenuation[index] = 1/float(self._ask(":%s:probe:gain?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_probe_attenuation[index]
def _set_channel_probe_attenuation(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
value = 1/float(value)
if not self._driver_operation_simulate:
self._write(":%s:probe:gain %e" % (self._channel_name[index], value))
self._channel_probe_attenuation[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, 'channel_offset', index)
self._set_cache_valid(False, 'channel_scale', index)
def _get_channel_probe_skew(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_probe_skew[index] = float(self._ask(":%s:deskew?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_probe_skew[index]
def _set_channel_probe_skew(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:deskew %e" % (self._channel_name[index], value))
self._channel_probe_skew[index] = value
self._set_cache_valid(index=index)
def _get_channel_invert(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_invert[index] = bool(int(self._ask(":%s:invert?" % self._channel_name[index])))
self._set_cache_valid(index=index)
return self._channel_invert[index]
def _set_channel_invert(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":%s:invert %e" % (self._channel_name[index], int(value)))
self._channel_invert[index] = value
self._set_cache_valid(index=index)
def _get_channel_probe_id(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_probe_id[index] = self._ask(":%s:probe:id?" % self._channel_name[index])
self._set_cache_valid(index=index)
return self._channel_probe_id[index]
def _get_channel_coupling(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_coupling[index] = self._ask(":%s:coupling?" % self._channel_name[index]).lower()
self._set_cache_valid(index=index)
return self._channel_coupling[index]
def _set_channel_coupling(self, index, value):
index = ivi.get_index(self._analog_channel_name, index)
if value not in VerticalCoupling:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":%s:coupling %s" % (self._channel_name[index], value))
self._channel_coupling[index] = value
self._set_cache_valid(index=index)
def _get_channel_offset(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_offset[index] = -float(self._ask(":%s:position?" % self._channel_name[index])) * self._get_channel_scale(index)
self._set_cache_valid(index=index)
return self._channel_offset[index]
def _set_channel_offset(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:position %e" % (self._channel_name[index], -value / self._get_channel_scale(index)))
self._channel_offset[index] = value
self._set_cache_valid(index=index)
def _get_channel_range(self, index):
return self._get_channel_scale(index) * self._vertical_divisions
def _set_channel_range(self, index, value):
value = float(value)
self._set_channel_scale(index, value / self._vertical_divisions)
def _get_channel_scale(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_scale[index] = float(self._ask(":%s:scale?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_scale[index]
def _set_channel_scale(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:scale %e" % (self._channel_name[index], value))
self._channel_scale[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, "channel_offset", index)
def _get_channel_trigger_level(self, index):
index = ivi.get_index(self._channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
self._channel_trigger_level[index] = float(self._ask(":trigger:a:level:%s?" % self._channel_name[index]))
self._set_cache_valid(index=index)
return self._channel_trigger_level[index]
def _set_channel_trigger_level(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:a:level:%s %e" % (self._channel_name[index], value))
self._channel_trigger_level[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, "trigger_level")
def _get_measurement_status(self):
if not self._driver_operation_simulate:
if int(self._ask(":acquire:numacq?")) > 0:
return "complete"
elif int(self._ask(":acquire:state?")) > 0:
return "in_progress"
else:
return "unknown"
return self._measurement_status
def _get_trigger_coupling(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:edge:coupling?").lower()
self._trigger_coupling = [k for k,v in TriggerCouplingMapping.items() if v==value][0]
return self._trigger_coupling
def _set_trigger_coupling(self, value):
if value not in TriggerCouplingMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:edge:coupling %s" % TriggerCouplingMapping[value])
self._trigger_coupling = value
self._set_cache_valid()
def _get_trigger_holdoff(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_holdoff = float(self._ask(":trigger:a:holdoff?"))
self._set_cache_valid()
return self._trigger_holdoff
def _set_trigger_holdoff(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:a:holdoff %e" % value)
self._trigger_holdoff = value
self._set_cache_valid()
def _get_trigger_level(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
ch = self._get_trigger_source()
try:
self._trigger_level = self._get_channel_trigger_level(ch)
self._set_cache_valid()
except:
pass
return self._trigger_level
def _set_trigger_level(self, value):
value = float(value)
if not self._driver_operation_simulate:
ch = self._get_trigger_source()
self._set_channel_trigger_level(ch, value)
self._trigger_level = value
self._set_cache_valid()
def _get_trigger_edge_slope(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:edge:slope?").lower()
self._trigger_edge_slope = [k for k,v in SlopeMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_edge_slope
def _set_trigger_edge_slope(self, value):
if value not in SlopeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:edge:slope %s" % SlopeMapping[value])
self._trigger_edge_slope = value
self._set_cache_valid()
def _get_trigger_source(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
t = self._ask(":trigger:a:type?").lower()
if t == 'edge':
value = self._ask(":trigger:a:edge:source?").lower()
elif t == 'logic':
# TODO
value = ''
elif t == 'pulse':
pc = self._ask(":trigger:a:pulse:class?").lower()
if pc == 'runt':
value = self._ask(":trigger:a:runt:source?").lower()
elif pc == 'width':
value = self._ask(":trigger:a:pulsewidth:source?").lower()
elif pc == 'transition':
value = self._ask(":trigger:a:transition:source?").lower()
elif pc == 'timeout':
value = self._ask(":trigger:a:timeout:source?").lower()
elif t == 'bus':
# TODO
value = ''
elif t == 'video':
value = self._ask(":trigger:a:video:source?").lower()
# TODO process value
self._trigger_source = value
self._set_cache_valid()
return self._trigger_source
def _set_trigger_source(self, value):
if hasattr(value, 'name'):
value = value.name
value = str(value)
if value not in self._channel_name:
raise ivi.UnknownPhysicalNameException()
if not self._driver_operation_simulate:
t = self._ask(":trigger:a:type?").lower()
if t == 'edge':
self._write(":trigger:a:edge:source %s" % value)
elif t == 'logic':
# TODO
pass
elif t == 'pulse':
pc = self._ask(":trigger:a:pulse:class?").lower()
if pc == 'runt':
self._write(":trigger:a:runt:source %s" % value)
elif pc == 'width':
self._write(":trigger:a:pulsewidth:source %s" % value)
elif pc == 'transition':
self._write(":trigger:a:transition:source %s" % value)
elif pc == 'timeout':
self._write(":trigger:a:timeout:source %s" % value)
elif t == 'bus':
# TODO
pass
elif t == 'video':
self._write(":trigger:a:video:source %s" % value)
#self._write(":trigger:source %s" % value)
self._trigger_source = value
self._set_cache_valid()
self._set_cache_valid(False, 'trigger_level')
self._set_cache_valid(False, 'trigger_runt_threshold_high')
self._set_cache_valid(False, 'trigger_runt_threshold_low')
for i in range(self._analog_channel_count): self._set_cache_valid(False, 'channel_trigger_level', i)
def _get_trigger_type(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:type?").lower()
if value == 'edge':
src = self._ask(":trigger:a:edge:source?").lower()
if src == 'line':
value = 'ac_line'
elif value == 'logic':
# TODO
value = 'logic'
elif value == 'pulse':
pc = self._ask(":trigger:a:pulse:class?").lower()
if pc == 'width':
wh = self._ask(":trigger:a:pulsewidth:when?").lower()
if wh in GlitchConditionMapping.values():
value = 'glitch'
else:
value = 'width'
else:
value = pc
elif value == 'bus':
# TODO
value = 'bus'
elif value == 'video':
value = 'tv'
#else:
# value = [k for k,v in TriggerTypeMapping.items() if v==value][0]
self._trigger_type = value
self._set_cache_valid()
return self._trigger_type
def _set_trigger_type(self, value):
if value not in TriggerTypeMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:type %s" % TriggerTypeMapping[value])
if value == 'ac_line':
self._write(":trigger:a:edge:source line")
elif value in ['runt', 'width', 'glitch', 'transition', 'timeout']:
self._write(":trigger:a:pulse:class %s" % value)
print(value)
if value == 'glitch':
t = self._ask(":trigger:a:pulsewidth:when?").lower()
if t not in GlitchConditionMapping.values():
self._write(":trigger:a:pulsewidth:when %s" % GlitchConditionMapping[self._trigger_glitch_condition])
elif value == 'width':
t = self._ask(":trigger:a:pulsewidth:when?").lower()
if t not in WidthConditionMapping.values():
self._write(":trigger:a:pulsewidth:when %s" % WidthConditionMapping[self._trigger_width_condition])
self._trigger_type = value
self._set_cache_valid()
self._set_cache_valid(False, 'trigger_source')
self._set_cache_valid(False, 'trigger_level')
def _measurement_abort(self):
pass
def _get_trigger_tv_trigger_event(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:video:sync?").lower()
# may need processing
self._trigger_tv_trigger_event = [k for k,v in TVTriggerEventMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_tv_trigger_event
def _set_trigger_tv_trigger_event(self, value):
if value not in TVTriggerEventMapping:
raise ivi.ValueNotSupportedException()
# may need processing
if not self._driver_operation_simulate:
self._write(":trigger:a:video:sync %s" % TVTriggerEventMapping[value])
self._trigger_tv_trigger_event = value
self._set_cache_valid()
def _get_trigger_tv_line_number(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = int(self._ask(":trigger:a:video:line?"))
# may need processing
self._trigger_tv_line_number = value
self._set_cache_valid()
return self._trigger_tv_line_number
def _set_trigger_tv_line_number(self, value):
value = int(value)
# may need processing
if not self._driver_operation_simulate:
self._write(":trigger:a:video:line %e" % value)
self._trigger_tv_line_number = value
self._set_cache_valid()
def _get_trigger_tv_polarity(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:video:polarity?").lower()
self._trigger_tv_polarity = [k for k,v in PolarityMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_tv_polarity
def _set_trigger_tv_polarity(self, value):
if value not in PolarityMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:video:polarity %s" % PolarityMapping[value])
self._trigger_tv_polarity = value
self._set_cache_valid()
def _get_trigger_tv_signal_format(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:video:standard?").lower()
self._trigger_tv_signal_format = [k for k,v in TVTriggerFormatMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_tv_signal_format
def _set_trigger_tv_signal_format(self, value):
if value not in TVTriggerFormatMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:video:standard %s" % TVTriggerFormatMapping[value])
self._trigger_tv_signal_format = value
self._set_cache_valid()
def _get_trigger_runt_threshold_high(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
ch = self._ask(":trigger:a:runt:source?")
self._trigger_runt_threshold_high = float(self._ask(":trigger:a:upperthreshold:%s?" % ch))
self._set_cache_valid()
return self._trigger_runt_threshold_high
def _set_trigger_runt_threshold_high(self, value):
value = float(value)
if not self._driver_operation_simulate:
ch = self._get_trigger_source()
self._write(":trigger:a:upperthreshold:%s %e" % (ch, value))
self._trigger_runt_threshold_high = value
self._set_cache_valid()
def _get_trigger_runt_threshold_low(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
ch = self._ask(":trigger:a:runt:source?")
self._trigger_runt_threshold_low = float(self._ask(":trigger:a:lowerthreshold:%s?" % ch))
self._set_cache_valid()
return self._trigger_runt_threshold_low
def _set_trigger_runt_threshold_low(self, value):
value = float(value)
if not self._driver_operation_simulate:
ch = self._get_trigger_source()
self._write(":trigger:a:lowerthreshold:%s %e" % (ch, value))
self._trigger_runt_threshold_low = value
self._set_cache_valid()
self._set_cache_valid(False, 'trigger_level')
for i in range(self._analog_channel_count): self._set_cache_valid(False, 'channel_trigger_level', i)
def _get_trigger_runt_polarity(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:runt:polarity?").lower()
self._trigger_runt_polarity = [k for k,v in PolarityMapping3.items() if v==value][0]
self._set_cache_valid()
return self._trigger_runt_polarity
def _set_trigger_runt_polarity(self, value):
if value not in PolarityMapping3:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:runt:polarity %s" % PolarityMapping3[value])
self._trigger_runt_polarity = value
self._set_cache_valid()
# TODO: need runt condition and width
def _get_trigger_glitch_condition(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:pulsewidth:when?").lower()
if value in GlitchConditionMapping.values():
self._trigger_glitch_condition = [k for k,v in GlitchConditionMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_glitch_condition
def _set_trigger_glitch_condition(self, value):
if value not in GlitchConditionMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:when %s" % GlitchConditionMapping[value])
self._trigger_glitch_condition = value
self._set_cache_valid()
def _get_trigger_glitch_polarity(self):
return self._get_trigger_width_polarity()
def _set_trigger_glitch_polarity(self, value):
self._set_trigger_width_polarity(value)
def _get_trigger_glitch_width(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_glitch_width = float(self._ask(":trigger:a:pulsewidth:width?"))
self._set_cache_valid()
return self._trigger_glitch_width
def _set_trigger_glitch_width(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:width %e" % value)
self._trigger_glitch_width = value
self._set_cache_valid()
def _get_trigger_width_condition(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:pulsewidth:when?").lower()
if value in WidthConditionMapping.values():
self._trigger_width_condition = [k for k,v in WidthConditionMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_width_condition
def _set_trigger_width_condition(self, value):
if value not in WidthConditionMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:when %s" % WidthConditionMapping[value])
self._trigger_width_condition = value
self._set_cache_valid()
def _get_trigger_width_threshold_high(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_width_threshold_high = float(self._ask(":trigger:a:pulsewidth:highlimit?"))
self._set_cache_valid()
return self._trigger_width_threshold_high
def _set_trigger_width_threshold_high(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:highlimit %e" % value)
self._trigger_width_threshold_high = value
self._set_cache_valid()
def _get_trigger_width_threshold_low(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_width_threshold_low = float(self._ask(":trigger:a:pulsewidth:lowlimit?"))
self._set_cache_valid()
return self._trigger_width_threshold_low
def _set_trigger_width_threshold_low(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:lowlimit %e" % value)
self._trigger_width_threshold_low = value
self._set_cache_valid()
def _get_trigger_width_polarity(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:pulsewidth:polarity?").lower()
self._trigger_width_polarity = [k for k,v in PolarityMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_width_polarity
def _set_trigger_width_polarity(self, value):
if value not in PolarityMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:pulsewidth:polarity %s" % PolarityMapping[value])
self._trigger_width_polarity = value
self._set_cache_valid()
def _get_trigger_ac_line_slope(self):
return self._get_trigger_edge_slope()
def _set_trigger_ac_line_slope(self, value):
self._set_trigger_edge_slope(value)
def _measurement_fetch_waveform(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return ivi.TraceYT()
self._write(":data:source %s" % self._channel_name[index])
self._write(":data:encdg fastest")
self._write(":data:width 2")
self._write(":data:start 1")
self._write(":data:stop 1e10")
trace = ivi.TraceYT()
# Read preamble
pre = self._ask(":wfmoutpre?").split(';')
acq_format = pre[7].strip().upper()
points = int(pre[6])
point_size = int(pre[0])
point_enc = pre[2].strip().upper()
point_fmt = pre[3].strip().upper()
byte_order = pre[4].strip().upper()
trace.x_increment = float(pre[10])
trace.x_origin = float(pre[11])
trace.x_reference = int(float(pre[12]))
trace.y_increment = float(pre[14])
trace.y_reference = int(float(pre[15]))
trace.y_origin = float(pre[16])
if acq_format != 'Y':
raise UnexpectedResponseException()
if point_enc != 'BINARY':
raise UnexpectedResponseException()
# Read waveform data
raw_data = self._ask_for_ieee_block(":curve?")
self._read_raw() # flush buffer
# Store in trace object
if point_fmt == 'RP' and point_size == 1:
trace.y_raw = array.array('B', raw_data[0:points*2])
elif point_fmt == 'RP' and point_size == 2:
trace.y_raw = array.array('H', raw_data[0:points*2])
elif point_fmt == 'RI' and point_size == 1:
trace.y_raw = array.array('b', raw_data[0:points*2])
elif point_fmt == 'RI' and point_size == 2:
trace.y_raw = array.array('h', raw_data[0:points*2])
elif point_fmt == 'FP' and point_size == 4:
trace.y_increment = 1
trace.y_reference = 0
trace.y_origin = 0
trace.y_raw = array.array('f', raw_data[0:points*4])
else:
raise UnexpectedResponseException()
if (byte_order == 'LSB') != (sys.byteorder == 'little'):
trace.y_raw.byteswap()
return trace
def _measurement_read_waveform(self, index, maximum_time):
return self._measurement_fetch_waveform(index)
def _measurement_initiate(self):
if not self._driver_operation_simulate:
self._write(":acquire:stopafter sequence")
self._write(":acquire:state run")
self._set_cache_valid(False, 'trigger_continuous')
def _get_reference_level_high(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._reference_level_high = float(self._ask(":measurement:reflevel:percent:high?"))
self._set_cache_valid()
return self._reference_level_high
def _set_reference_level_high(self, value):
value = float(value)
if value < 0: value = 0
if value > 100: value = 100
if not self._driver_operation_simulate:
self._write(":measurement:reflevel:method percent")
self._write(":measurement:reflevel:percent:high %e" % value)
self._reference_level_high = value
self._set_cache_valid()
def _get_reference_level_low(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._reference_level_low = float(self._ask(":measurement:reflevel:percent:low?"))
self._set_cache_valid()
return self._reference_level_low
def _set_reference_level_low(self, value):
value = float(value)
if value < 0: value = 0
if value > 100: value = 100
if not self._driver_operation_simulate:
self._write(":measurement:reflevel:method percent")
self._write(":measurement:reflevel:percent:low %e" % value)
self._reference_level_low = value
self._set_cache_valid()
def _get_reference_level_middle(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._reference_level_middle = float(self._ask(":measurement:reflevel:percent:mid1?"))
self._set_cache_valid()
return self._reference_level_middle
def _set_reference_level_middle(self, value):
value = float(value)
if value < 0: value = 0
if value > 100: value = 100
if not self._driver_operation_simulate:
self._write(":measurement:reflevel:method percent")
self._write(":measurement:reflevel:percent:mid1 %e" % value)
self._write(":measurement:reflevel:percent:mid2 %e" % value)
self._reference_level_middle = value
self._set_cache_valid()
def _measurement_fetch_waveform_measurement(self, index, measurement_function, ref_channel = None):
index = ivi.get_index(self._channel_name, index)
if index < self._analog_channel_count:
if measurement_function not in MeasurementFunctionMapping:
raise ivi.ValueNotSupportedException()
func = MeasurementFunctionMapping[measurement_function]
else:
if measurement_function not in MeasurementFunctionMappingDigital:
raise ivi.ValueNotSupportedException()
func = MeasurementFunctionMappingDigital[measurement_function]
if not self._driver_operation_simulate:
self._write(":measurement:immed:type %s" % func)
self._write(":measurement:immed:source1 %s" % self._channel_name[index])
if measurement_function in ['ratio', 'phase', 'delay']:
if hasattr(ref_channel, 'name'):
ref_channel = ref_channel.name
ref_index = ivi.get_index(self._channel_name, ref_channel)
self._write(":measurement:immed:source2 %s" % self._channel_name[ref_index])
return float(self._ask(":measurement:immed:value?"))
return 0
def _measurement_read_waveform_measurement(self, index, measurement_function, maximum_time):
return self._measurement_fetch_waveform_measurement(index, measurement_function)
def _get_acquisition_number_of_envelopes(self):
return self._acquisition_number_of_envelopes
def _set_acquisition_number_of_envelopes(self, value):
self._acquisition_number_of_envelopes = value
def _measurement_fetch_waveform_min_max(self, index):
index = ivi.get_index(self._channel_name, index)
data = list()
return data
def _measurement_read_waveform_min_max(self, index, maximum_time):
return self._measurement_fetch_waveform_min_max(index)
def _get_trigger_continuous(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._trigger_continuous = self._ask(":acquire:stopafter?").lower() == 'runstop'
self._set_cache_valid()
return self._trigger_continuous
def _set_trigger_continuous(self, value):
value = bool(value)
if not self._driver_operation_simulate:
if value:
self._write(":acquire:stopafter runstop")
self._write(":acquire:state run")
else:
self._write(":acquire:stopafter sequence")
self._trigger_continuous = value
self._set_cache_valid()
def _get_acquisition_number_of_averages(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_number_of_averages = int(self._ask(":acquire:numavg?"))
self._set_cache_valid()
return self._acquisition_number_of_averages
def _set_acquisition_number_of_averages(self, value):
if value not in [2, 4, 8, 16, 32, 64, 128, 256, 512]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":acquire:numavg %d" % value)
self._acquisition_number_of_averages = value
self._set_cache_valid()
def _get_trigger_modifier(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":trigger:a:mode?").lower()
self._trigger_modifier = [k for k,v in TriggerModifierMapping.items() if v==value][0]
self._set_cache_valid()
return self._trigger_modifier
def _set_trigger_modifier(self, value):
if value not in TriggerModifierMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:a:mode %s" % TriggerModifierMapping[value])
self._trigger_modifier = value
self._set_cache_valid()
def _measurement_auto_setup(self):
if not self._driver_operation_simulate:
self._write(":autoset execute")
| 43.996603
| 158
| 0.623257
|
e36ad755e4f82ff66aa727d3a110dfda597c1faf
| 1,960
|
py
|
Python
|
tornado_ipware/ip.py
|
lumbrefrio/tornado-ipware
|
a557c4a57c6354a6db61c45c3f6bce4ae34c2783
|
[
"MIT"
] | null | null | null |
tornado_ipware/ip.py
|
lumbrefrio/tornado-ipware
|
a557c4a57c6354a6db61c45c3f6bce4ae34c2783
|
[
"MIT"
] | null | null | null |
tornado_ipware/ip.py
|
lumbrefrio/tornado-ipware
|
a557c4a57c6354a6db61c45c3f6bce4ae34c2783
|
[
"MIT"
] | null | null | null |
from . import utils as util
from . import defaults as defs
def get_client_ip(
request,
proxy_order='left-most',
proxy_count=None,
proxy_trusted_ips=None,
request_header_order=None,
):
client_ip = None
routable = False
if proxy_count is None:
proxy_count = -1
if proxy_trusted_ips is None:
proxy_trusted_ips = []
if request_header_order is None:
request_header_order = defs.IPWARE_META_PRECEDENCE_ORDER
for key in request_header_order:
value = util.get_request_header(request, key)
if value:
ips, ip_count = util.get_ips_from_string(value)
if ip_count < 1:
# we are expecting at least one IP address to process
continue
if proxy_count == 0 and ip_count > 1:
# we are not expecting requests via any proxies
continue
if proxy_count > 0 and proxy_count != ip_count - 1:
# we are expecting requests via `proxy_count` number of proxies
continue
if proxy_trusted_ips and ip_count < 2:
# we are expecting requests via at least one trusted proxy
continue
if proxy_order == 'right-most' and ip_count > 1:
# we are expecting requests via proxies to be custom as per `<proxy2>, <proxy1>, <client>`
ips.reverse()
if proxy_trusted_ips:
for proxy in proxy_trusted_ips:
if proxy in ips[-1]:
client_ip, routable = util.get_ip_info(ips[0])
if client_ip and routable:
return client_ip, routable
else:
client_ip, routable = util.get_ip_info(util.get_best_ip(client_ip, ips[0]))
if client_ip and routable:
return client_ip, routable
return client_ip, routable
| 32.131148
| 106
| 0.578061
|
a3125dc4e4024c4f1f946d3877005b9d61dc4aad
| 9,058
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/arrays/integer/test_dtypes.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/arrays/integer/test_dtypes.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/arrays/integer/test_dtypes.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import Int8Dtype, UInt32Dtype
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_copy():
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
# copy=True -> ensure both data and mask are actual copies
result = arr.astype("Int64", copy=True)
assert result is not arr
assert not np.shares_memory(result._data, arr._data)
assert not np.shares_memory(result._mask, arr._mask)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
# copy=False
result = arr.astype("Int64", copy=False)
assert result is arr
assert np.shares_memory(result._data, arr._data)
assert np.shares_memory(result._mask, arr._mask)
result[0] = 10
assert arr[0] == 10
result[0] = pd.NA
assert arr[0] is pd.NA
# astype to different dtype -> always needs a copy -> even with copy=False
# we need to ensure that also the mask is actually copied
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
result = arr.astype("Int32", copy=False)
assert not np.shares_memory(result._data, arr._data)
assert not np.shares_memory(result._mask, arr._mask)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_astype_dt64():
# GH#32435
arr = pd.array([1, 2, 3, pd.NA]) * 10 ** 9
result = arr.astype("datetime64[ns]")
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_construct_cast_invalid(dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str():
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype="<U21")
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean():
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
| 31.234483
| 82
| 0.631817
|
8d0cb5f5e0a0c7e6d66508db95ce15565aa33bb2
| 685
|
py
|
Python
|
test/mac/gyptest-postbuild-defaults.py
|
Jet-Streaming/gyp
|
6488d24fc7bdd8bc563998d77d9a0950781bbaf5
|
[
"BSD-3-Clause"
] | null | null | null |
test/mac/gyptest-postbuild-defaults.py
|
Jet-Streaming/gyp
|
6488d24fc7bdd8bc563998d77d9a0950781bbaf5
|
[
"BSD-3-Clause"
] | null | null | null |
test/mac/gyptest-postbuild-defaults.py
|
Jet-Streaming/gyp
|
6488d24fc7bdd8bc563998d77d9a0950781bbaf5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a postbuild invoking |defaults| works.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'postbuild-defaults'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
result_file = test.built_file_path('result', chdir=CHDIR)
test.must_exist(result_file)
test.must_contain(result_file, '''\
Test
${PRODUCT_NAME}
''')
test.pass_test()
| 22.833333
| 73
| 0.678832
|
c8459be99440754474e0b768f4032e4902442a8b
| 408
|
py
|
Python
|
legacy/test_actor.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
legacy/test_actor.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
legacy/test_actor.py
|
lzcaisg/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
from Actor import Actor
import datetime
actor = Actor(todayDate=datetime.datetime(2016, 12, 10),
startDate=datetime.datetime(2016, 9, 10),
etfList=["FTSE 100", "Nasdaq 100", "Shanghai Composite"],
initValue=1000000)
print(actor.etfLocalMinMaxDict['FTSE 100']['max']["180"].to_string())
print(actor.etfLocalMinMaxDict['Shanghai Composite']['min']["60"].to_string())
| 40.8
| 78
| 0.671569
|
2ecae0fc450a182b8c5bf47f49f86b60f1dcfda7
| 6,613
|
py
|
Python
|
Lib/test/test_resource.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 1,872
|
2015-01-02T18:56:47.000Z
|
2022-03-31T07:34:39.000Z
|
Lib/test/test_resource.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 675
|
2015-02-27T09:01:01.000Z
|
2022-03-31T14:03:25.000Z
|
Lib/test/test_resource.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 278
|
2015-01-02T03:48:20.000Z
|
2022-03-29T20:40:44.000Z
|
import contextlib
import sys
import os
import unittest
from test import support
import time
resource = support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(support.TESTFN, "wb")
try:
f.write(b"X" * 1024)
try:
f.write(b"Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except OSError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
support.unlink(support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
try:
usage_thread = resource.getrusage(resource.RUSAGE_THREAD)
except (ValueError, AttributeError):
pass
# Issue 6083: Reference counting bug
def test_setrusage_refcount(self):
try:
limits = resource.getrlimit(resource.RLIMIT_CPU)
except AttributeError:
pass
else:
class BadSequence:
def __len__(self):
return 2
def __getitem__(self, key):
if key in (0, 1):
return len(tuple(range(1000000)))
raise IndexError
resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
def test_pagesize(self):
pagesize = resource.getpagesize()
self.assertIsInstance(pagesize, int)
self.assertGreaterEqual(pagesize, 0)
@unittest.skipUnless(sys.platform == 'linux', 'test requires Linux')
def test_linux_constants(self):
for attr in ['MSGQUEUE', 'NICE', 'RTPRIO', 'RTTIME', 'SIGPENDING']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@support.requires_freebsd_version(9)
def test_freebsd_contants(self):
for attr in ['SWAP', 'SBSIZE', 'NPTS']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@unittest.skipUnless(hasattr(resource, 'prlimit'), 'no prlimit')
@support.requires_linux_version(2, 6, 36)
def test_prlimit(self):
self.assertRaises(TypeError, resource.prlimit)
if os.geteuid() != 0:
self.assertRaises(PermissionError, resource.prlimit,
1, resource.RLIMIT_AS)
self.assertRaises(ProcessLookupError, resource.prlimit,
-1, resource.RLIMIT_AS)
limit = resource.getrlimit(resource.RLIMIT_AS)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS), limit)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS, limit),
limit)
def test_main(verbose=None):
support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main()
| 39.598802
| 80
| 0.576743
|
ea06631a8b86dbab76914971df02be27908be89a
| 13,477
|
py
|
Python
|
amazonadapi/amazonclient.py
|
barce/amazonadapi
|
76558774f26a6d1e2b496b9cbc0be1c603c85346
|
[
"MIT"
] | 3
|
2018-10-17T19:06:17.000Z
|
2021-04-04T13:52:39.000Z
|
amazonadapi/amazonclient.py
|
barce/amazonadapi
|
76558774f26a6d1e2b496b9cbc0be1c603c85346
|
[
"MIT"
] | 5
|
2018-09-12T20:07:40.000Z
|
2018-10-15T16:34:52.000Z
|
amazonadapi/amazonclient.py
|
barce/amazonadapi
|
76558774f26a6d1e2b496b9cbc0be1c603c85346
|
[
"MIT"
] | 3
|
2018-02-05T21:04:28.000Z
|
2018-10-27T09:45:56.000Z
|
#!/usr/bin/env python
from future.standard_library import install_aliases
install_aliases()
import json
import jwt
import requests
import time
import os
import sys
import re
from urllib3._collections import HTTPHeaderDict
use_environment_variables = None
try:
from django.conf import settings
except ImportError:
use_environment_variables = True
class AmazonOrder:
id = None
advertiserId = None
name = None
startDateTime = None
endDateTime = None
status = None
budget = {}
deliveryCaps = []
def __init__(self):
self.status = 'INACTIVE'
class AmazonLineItem:
id = None
orderId = None
advertiserId = None
name = None
type = None
startDateTime = None
endDateTime = None
status = None
budget = {}
deliveryCaps = []
def __init__(self):
self.status = 'INACTIVE'
class AmazonClient:
client_id = None
client_secret = None
api_key = None
id_host = None
one_host = None
aud = None
payload = None
encoded_payload = None
oauth_url = None
payload_url = None
headers = None
authorized_headers = None
token = None
refresh_token = None
profile_id = None
region = None
region_list = {}
host = None
data = None
page_token = None
page_size = None
next_page_url = None
redirect_uri = None
def __init__(self):
self.client_id = os.environ['AMZN_AD_CLIENT_ID']
self.client_secret = os.environ['AMZN_AD_CLIENT_SECRET']
# self.auth_url = "https://www.amazon.com/ap/oa?client_id=" + self.client_id + "&scope=advertising::campaign_management&repsonse_type=code&redirect_url=https%3A//www.accuenplatform.com/accounts/login/%3Fnext%3D/backstage/api/advertiser"
self.auth_url = os.environ['AMZN_AUTH_URL']
self.profile_id = os.environ['AMZN_DEFAULT_PROFILE_ID']
self.redirect_uri="https://www.accuenplatform.com/accounts/login/%3Fnext%3D/backstage/api/advertiser"
try:
self.refresh_token = os.environ['AMZN_REFRESH_TOKEN']
except KeyError as e:
print("error missing:")
print(e)
self.region_list = {"UK": "advertising-api-eu.amazon.com", "IN": "advertising-api-eu.amazon.com", "US": "advertising-api.amazon.com", "JP": "advertising-api-fe.amazon.com"}
try:
self.host = self.region_list[os.environ['AMZN_REGION']]
except KeyError as e:
self.host = 'advertising-api.amazon.com'
def connect(self):
get_token_url = "https://api.amazon.com/auth/o2/token"
payload = "grant_type=authorization_code&code=" + self.amzn_code + "&redirect_uri=" + self.redirect_uri + "&client_id=" + self.client_id + "&client_secret=" + self.client_secret
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
print(get_token_url)
print(payload)
print(headers)
r = requests.post(get_token_url, data=payload, headers=headers)
results_json = r.json()
return results_json
def get_amazon_auth_url(self):
print("Go to this URL:")
print(self.auth_url)
def cli_auth_dance(self):
self.get_amazon_auth_url()
if sys.version_info < (3, 0):
self.amzn_code = raw_input("Enter Amazon auth code: ")
else:
self.amzn_code = input("Enter Amazon auth code: ")
print("Auth code, {}, entered.".format(self.amzn_code))
self.raw_token_results = self.connect()
print("raw_token_results:")
print(self.raw_token_results)
self.token = self.raw_token_results['access_token']
self.refresh_token = self.raw_token_results['refresh_token']
profiles_json = self.get_profiles()
self.profile_id = str(profiles_json[0]['profileId'])
return self.token
def auto_refresh_token(self):
i_sentinel = 1
i_counter = 0
while i_sentinel > 0:
get_token_url = "https://api.amazon.com/auth/o2/token"
payload = "grant_type=refresh_token&client_id=" + self.client_id + "&client_secret=" + self.client_secret + "&refresh_token=" + self.refresh_token
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post(get_token_url, data=payload, headers=headers)
results_json = r.json()
if 'access_token' in results_json:
self.token = results_json['access_token']
return results_json
i_counter += 1
time.sleep(1)
if i_counter >= 5:
i_sentinel = 0
return results_json
def set_region(self, region='US'):
self.region = region
try:
self.host = self.region_list[region]
except KeyError as e:
self.host = self.region_list["US"]
self.region = "US"
return self.host
# curl -X GET -H "Content-Type:application/json" -H "Authorization: Bearer $AMZN_TOKEN" https://advertising-api.amazon.com/v1/profiles
def get_profiles(self):
url = "https://" + self.host + "/v1/profiles"
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token}
# r = requests.get(url, headers=headers)
# results_json = r.json()
r = self.make_request(url, headers, 'GET')
return r
# -H Authorization: Bearer self.token
# -H Host: advertising-api.amazon
# -H Amazon-Advertising-API-Scope: PROFILE_ID
# -H Content-Type: application/json
# url: https://advertising-api.amazon.com/da/v1/advertisers
def get_advertisers(self):
i_sentinel = 1
ids = []
response_json = {}
while i_sentinel == 1:
if self.page_token == None:
if self.page_size == None:
url = "https://" + self.host + "/da/v1/advertisers"
else:
url = "https://" + self.host + "/da/v1/advertisers?page_size=" + str(self.page_size)
self.page_size = None
else:
url = "https://" + self.host + "/da/v1/advertisers?page_token=" + self.page_token
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
r = self.make_request(url, headers, 'GET')
try:
print(r.headers['Link'])
self.next_page_url = r.headers['Link']
except:
i_sentinel = 0
if self.next_page_url != None:
p = re.compile('.*page_token=(.*)>')
matches = p.findall(self.next_page_url)
self.page_token = matches[0]
self.page_token = None
self.page_size = None
return r
# -H Authorization: Bearer self.token
# -H Host: advertising-api.amazon
# -H Amazon-Advertising-API-Scope: PROFILE_ID
# -H Content-Type: application/json
# url: https://advertising-api.amazon.com/da/v1/advertisers/AD_ID/orders
def get_orders(self, ad_id):
i_sentinel = 1
while i_sentinel == 1:
if self.page_token == None:
if self.page_size == None:
url = "https://" + self.host + "/da/v1/advertisers/" + str(ad_id) + "/orders"
else:
url = "https://" + self.host + "/da/v1/advertisers/" + str(ad_id) + "/orders?page_size=" + str(self.page_size)
self.page_size = None
else:
url = "https://" + self.host + "/da/v1/advertisers/" + str(ad_id) + "/orders?page_token=" + self.page_token
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
r = self.make_request(url, headers, 'GET')
try:
self.next_page_url = r.headers['Link']
except:
i_sentinel = 0
if self.next_page_url != None:
p = re.compile('.*page_token=(.*)>')
matches = p.findall(self.next_page_url)
self.page_token = matches[0]
self.page_token = None
self.page_size = None
return r
# -H Authorization: Bearer self.token
# -H Host: advertising-api.amazon
# -H Amazon-Advertising-API-Scope: PROFILE_ID
# -H Content-Type: application/json
# url: https://advertising-api.amazon.com/da/v1/orders/ORDER_ID
def get_order(self, order_id):
url = "https://" + self.host + "/da/v1/orders/" + order_id
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
r = self.make_request(url, headers, 'GET')
return r
# -H Authorization: Bearer self.token
# -H Host: advertising-api.amazon
# -H Amazon-Advertising-API-Scope: PROFILE_ID
# -H Content-Type: application/json
# url: https://advertising-api.amazon.com/da/v1/orders/ORDER_ID/line-items
def get_line_items(self, order_id):
i_sentinel = 1
ids = []
response_json = {}
while i_sentinel == 1:
if self.page_token == None:
if self.page_size == None:
url = "https://" + self.host + "/da/v1/orders/" + order_id + "/line-items"
else:
url = "https://" + self.host + "/da/v1/orders/" + order_id + "/line-items?page_size=" + str(self.page_size)
self.page_size = None
else:
url = "https://" + self.host + "/da/v1/orders/" + order_id + "/line-items?page_token=" + self.page_token
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
r = self.make_request(url, headers, 'GET')
try:
self.next_page_url = r.headers['Link']
except:
i_sentinel = 0
if self.next_page_url != None:
p = re.compile('.*page_token=(.*)>')
matches = p.findall(self.next_page_url)
self.page_token = matches[0]
self.page_token = None
self.page_size = None
return r
def get_line_item(self, line_item_id):
url = "https://" + self.host + "/da/v1/line-items/" + line_item_id
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
r = self.make_request(url, headers, 'GET')
return r
def create_order(self, order):
url = "https://" + self.host + "/da/v1/orders"
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
self.data = order
r = self.make_request(url, headers, 'POST', self.data)
return r
def update_order(self, order):
url = "https://" + self.host + "/da/v1/orders"
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
self.data = order
r = self.make_request(url, headers, 'PUT', self.data)
return r
def create_line_item(self, line_item):
url = "https://" + self.host + "/da/v1/line-items"
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
self.data = line_item
r = self.make_request(url, headers, 'POST', self.data)
return r
def update_line_item(self, line_item):
# url = "https://" + self.host + "/da/v1/line-items/" + line_item.id # <-- expected behavior for update
url = "https://" + self.host + "/da/v1/line-items"
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'Host': self.host, 'Amazon-Advertising-API-Scope': self.profile_id}
self.data = line_item
r = self.make_request(url, headers, 'PUT', self.data)
return r
def error_check_json(self, results_json):
# if results_json['error']['httpStatusCode'] in ['401'] or results_json['code'] in ['401']:
refresh_results_json = self.auto_refresh_token()
return refresh_results_json
# create response_json method to abstract away the creation of return response that matt wants
def generate_json_response(self, r, results_json, request_body):
response_json = {
'response_code': r.status_code,
'request_body': request_body
}
# if request is successful, ensure msg_type is success
if r.status_code in [200, 201]:
response_json['msg_type'] = 'success'
response_json['msg'] = ''
response_json['data'] = results_json
else:
response_json['msg_type'] = 'error'
# display the error message that comes back from request
response_json['msg'] = results_json['error']
response_json['data'] = results_json['error']
return response_json
# make_request(method_type) --> pass in method_type
def make_request(self, url, headers, method_type, data=None):
request_body = url, headers, data
r, results_json = self.make_new_request(url, self.token, method_type, headers, data)
if r.status_code in [400, 401]:
# refresh access token
self.token = self.error_check_json(results_json)['access_token']
# apply headers with new token, return response and response dict
r, results_json = self.make_new_request(url, self.token, method_type, headers)
# use results_json to create updated json dict
response_json = self.generate_json_response(r, results_json, request_body)
return json.dumps(response_json)
def make_new_request(self, url, token, method_type, headers, data=None):
# modify headers with new access token
headers['Authorization'] = 'Bearer ' + token
if method_type == 'GET':
r = requests.get(url, headers=headers)
if method_type == 'POST':
r = requests.post(url, headers=headers, verify=False, data=json.dumps(data))
if method_type == 'PUT':
r = requests.put(url, headers=headers, verify=False, data=json.dumps(data))
results_json = r.json()
return r, results_json
| 34.468031
| 240
| 0.662759
|
d33cd77ce18a1ed2136790689ce81045bcd3c7ce
| 758
|
py
|
Python
|
chapter-09/Exercise_9_9.py
|
yuetsin/CS-902
|
ae945f55944830ed3c6e4d71cfff2798cdf571ac
|
[
"MIT"
] | 1
|
2019-05-16T11:00:40.000Z
|
2019-05-16T11:00:40.000Z
|
chapter-09/Exercise_9_9.py
|
yuetsin/CS-902
|
ae945f55944830ed3c6e4d71cfff2798cdf571ac
|
[
"MIT"
] | null | null | null |
chapter-09/Exercise_9_9.py
|
yuetsin/CS-902
|
ae945f55944830ed3c6e4d71cfff2798cdf571ac
|
[
"MIT"
] | null | null | null |
# Exercise 9.9
from random import random
from math import sin
from math import cos
def oneStep():
stepAngle = random() * 6.283185307178
return (cos(stepAngle), sin(stepAngle))
def goHiking(steps):
Xpos = Ypos = 0
for i in range(steps):
Movement = oneStep()
Xpos += Movement[0]
Ypos += Movement[1]
return (Xpos, Ypos)
def main():
times = input("How many times do you want to go hiking? >>> ")
steps = input("How many steps do you want to go hiking? >>> ")
totalX = totalY = 0.0
move = goHiking(steps)
for i in range(times):
totalX += move[0]
totalY += move[1]
print "Final destination will be about", totalX / times, totalY / times
if __name__ == '__main__':
main()
| 24.451613
| 75
| 0.612137
|
195370e5cba51682190098714416dac29f8693ad
| 182
|
py
|
Python
|
tests/testapp/application_urls.py
|
barseghyanartur/feincms3-language-sites
|
f02e46e895a8c0d52b0bdad60352926073ffbd4b
|
[
"BSD-3-Clause"
] | 2
|
2021-09-14T07:23:32.000Z
|
2021-10-03T12:15:11.000Z
|
tests/testapp/application_urls.py
|
barseghyanartur/feincms3-language-sites
|
f02e46e895a8c0d52b0bdad60352926073ffbd4b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testapp/application_urls.py
|
barseghyanartur/feincms3-language-sites
|
f02e46e895a8c0d52b0bdad60352926073ffbd4b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T07:23:34.000Z
|
2021-09-14T07:23:34.000Z
|
from django.http import HttpResponse
from django.urls import path
app_name = "application"
urlpatterns = [
path("", lambda request: HttpResponse(request.path), name="root"),
]
| 20.222222
| 70
| 0.736264
|
d6fecc0794f5810a2b814da663443fa75093ad7f
| 88,971
|
py
|
Python
|
tests/integration/awslambda/test_lambda.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/awslambda/test_lambda.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/awslambda/test_lambda.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
import base64
import json
import logging
import os
import re
import shutil
import time
from io import BytesIO
from typing import Dict, TypeVar
import pytest
from botocore.exceptions import ClientError
from botocore.response import StreamingBody
from localstack.constants import LAMBDA_TEST_ROLE, TEST_AWS_ACCOUNT_ID
from localstack.services.awslambda import lambda_api
from localstack.services.awslambda.lambda_api import (
LAMBDA_DEFAULT_HANDLER,
get_lambda_policy_name,
use_docker,
)
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_DOTNET6,
LAMBDA_RUNTIME_DOTNETCORE31,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_NODEJS12X,
LAMBDA_RUNTIME_NODEJS14X,
LAMBDA_RUNTIME_PROVIDED,
LAMBDA_RUNTIME_PROVIDED_AL2,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38,
LAMBDA_RUNTIME_PYTHON39,
LAMBDA_RUNTIME_RUBY27,
)
from localstack.services.install import (
GO_RUNTIME_VERSION,
INSTALL_PATH_LOCALSTACK_FAT_JAR,
TEST_LAMBDA_JAVA,
download_and_extract,
)
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
cp_r,
get_arch,
get_os,
load_file,
mkdir,
new_tmp_dir,
retry,
run_safe,
save_file,
short_uid,
to_bytes,
to_str,
unzip,
)
from localstack.utils.generic.wait_utils import wait_until
from localstack.utils.sync import poll_condition
from localstack.utils.testutil import (
check_expected_lambda_log_events_length,
create_lambda_archive,
get_lambda_log_events,
)
from .functions import lambda_integration
from .lambda_test_util import concurrency_update_done, get_invoke_init_type, update_done
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, "functions/lambda_integration.py")
TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, "functions/lambda_echo.py")
TEST_LAMBDA_PYTHON_VERSION = os.path.join(THIS_FOLDER, "functions/lambda_python_version.py")
TEST_LAMBDA_PYTHON_UNHANDLED_ERROR = os.path.join(
THIS_FOLDER, "functions/lambda_unhandled_error.py"
)
TEST_LAMBDA_PYTHON3 = os.path.join(THIS_FOLDER, "functions/lambda_python3.py")
TEST_LAMBDA_INTEGRATION_NODEJS = os.path.join(THIS_FOLDER, "functions/lambda_integration.js")
TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, "functions/lambda_handler.js")
TEST_LAMBDA_GOLANG_ZIP = os.path.join(THIS_FOLDER, "functions/golang/handler.zip")
TEST_LAMBDA_RUBY = os.path.join(THIS_FOLDER, "functions/lambda_integration.rb")
TEST_LAMBDA_DOTNETCORE2 = os.path.join(THIS_FOLDER, "functions/dotnetcore2/dotnetcore2.zip")
TEST_LAMBDA_DOTNETCORE31 = os.path.join(THIS_FOLDER, "functions/dotnetcore31/dotnetcore31.zip")
TEST_LAMBDA_DOTNET6 = os.path.join(THIS_FOLDER, "functions/dotnet6/dotnet6.zip")
TEST_LAMBDA_CUSTOM_RUNTIME = os.path.join(THIS_FOLDER, "functions/custom-runtime")
TEST_LAMBDA_HTTP_RUST = os.path.join(THIS_FOLDER, "functions/rust-lambda/function.zip")
TEST_LAMBDA_JAVA_WITH_LIB = os.path.join(
THIS_FOLDER, "functions/java/lambda_echo/lambda-function-with-lib-0.0.1.jar"
)
TEST_LAMBDA_JAVA_MULTIPLE_HANDLERS = os.path.join(
THIS_FOLDER,
"functions",
"java",
"lambda_multiple_handlers",
"build",
"distributions",
"lambda-function-with-multiple-handlers.zip",
)
TEST_LAMBDA_ENV = os.path.join(THIS_FOLDER, "functions/lambda_environment.py")
TEST_LAMBDA_SEND_MESSAGE_FILE = os.path.join(THIS_FOLDER, "functions/lambda_send_message.py")
TEST_LAMBDA_PUT_ITEM_FILE = os.path.join(THIS_FOLDER, "functions/lambda_put_item.py")
TEST_LAMBDA_START_EXECUTION_FILE = os.path.join(THIS_FOLDER, "functions/lambda_start_execution.py")
TEST_LAMBDA_FUNCTION_PREFIX = "lambda-function"
TEST_GOLANG_LAMBDA_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/example-handler-{os}-{arch}.tar.gz"
TEST_LAMBDA_LIBS = [
"requests",
"psutil",
"urllib3",
"chardet",
"certifi",
"idna",
"pip",
"dns",
]
PYTHON_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38,
LAMBDA_RUNTIME_PYTHON39,
]
if use_docker()
else [LAMBDA_RUNTIME_PYTHON38]
)
NODE_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_NODEJS12X,
LAMBDA_RUNTIME_NODEJS14X,
]
if use_docker()
else [LAMBDA_RUNTIME_NODEJS14X]
)
JAVA_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
]
if use_docker()
else [LAMBDA_RUNTIME_JAVA11]
)
def is_old_provider():
return (
os.environ.get("TEST_TARGET") != "AWS_CLOUD"
and os.environ.get("PROVIDER_OVERRIDE_LAMBDA") != "asf"
)
PROVIDED_TEST_RUNTIMES = [
LAMBDA_RUNTIME_PROVIDED,
# TODO remove skip once we use correct images
pytest.param(
LAMBDA_RUNTIME_PROVIDED_AL2,
marks=pytest.mark.skipif(
is_old_provider(), reason="curl missing in provided.al2 lambci image"
),
),
]
T = TypeVar("T")
def read_streams(payload: T) -> T:
new_payload = {}
for k, v in payload.items():
if isinstance(v, Dict):
new_payload[k] = read_streams(v)
elif isinstance(v, StreamingBody):
new_payload[k] = to_str(v.read())
else:
new_payload[k] = v
return new_payload
# API only functions (no lambda execution itself)
class TestLambdaAPI:
@pytest.mark.only_localstack
def test_create_lambda_function(self, lambda_client):
"""Basic test that creates and deletes a Lambda function"""
func_name = f"lambda_func-{short_uid()}"
kms_key_arn = f"arn:{aws_stack.get_partition()}:kms:{aws_stack.get_region()}:{TEST_AWS_ACCOUNT_ID}:key11"
vpc_config = {
"SubnetIds": ["subnet-123456789"],
"SecurityGroupIds": ["sg-123456789"],
}
tags = {"env": "testing"}
kwargs = {
"FunctionName": func_name,
"Runtime": LAMBDA_RUNTIME_PYTHON37,
"Handler": LAMBDA_DEFAULT_HANDLER,
"Role": LAMBDA_TEST_ROLE,
"KMSKeyArn": kms_key_arn,
"Code": {
"ZipFile": create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON_ECHO), get_content=True
)
},
"Timeout": 3,
"VpcConfig": vpc_config,
"Tags": tags,
"Environment": {"Variables": {"foo": "bar"}},
}
result = lambda_client.create_function(**kwargs)
function_arn = result["FunctionArn"]
assert testutil.response_arn_matches_partition(lambda_client, function_arn)
partial_function_arn = ":".join(function_arn.split(":")[3:])
# Get function by Name, ARN and partial ARN
for func_ref in [func_name, function_arn, partial_function_arn]:
rs = lambda_client.get_function(FunctionName=func_ref)
assert rs["Configuration"].get("KMSKeyArn", "") == kms_key_arn
assert rs["Configuration"].get("VpcConfig", {}) == vpc_config
assert rs["Tags"] == tags
# clean up
lambda_client.delete_function(FunctionName=func_name)
with pytest.raises(Exception) as exc:
lambda_client.delete_function(FunctionName=func_name)
assert "ResourceNotFoundException" in str(exc)
@pytest.mark.skip_snapshot_verify
def test_add_lambda_permission_aws(
self, lambda_client, iam_client, create_lambda_function, account_id, snapshot
):
"""Testing the add_permission call on lambda, by adding a new resource-based policy to a lambda function"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"lambda_func-{short_uid()}"
lambda_create_response = create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
lambda_arn = lambda_create_response["CreateFunctionResponse"]["FunctionArn"]
snapshot.match("create_lambda", lambda_create_response)
# create lambda permission
action = "lambda:InvokeFunction"
sid = "s3"
principal = "s3.amazonaws.com"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
snapshot.match("add_permission", resp)
# fetch lambda policy
get_policy_result = lambda_client.get_policy(FunctionName=function_name)
snapshot.match("get_policy", get_policy_result)
assert lambda_arn == json.loads(get_policy_result["Policy"])["Statement"][0]["Resource"]
# TODO permissions cannot be added to $LATEST
@pytest.mark.skipif(
not is_old_provider(), reason="test does not make valid assertions against AWS"
)
def test_add_lambda_permission(self, lambda_client, iam_client, create_lambda_function):
function_name = f"lambda_func-{short_uid()}"
lambda_create_response = create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
lambda_arn = lambda_create_response["CreateFunctionResponse"]["FunctionArn"]
# create lambda permission
action = "lambda:InvokeFunction"
sid = "s3"
principal = "s3.amazonaws.com"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
# fetch lambda policy
policy = lambda_client.get_policy(FunctionName=function_name)["Policy"]
assert isinstance(policy, str)
policy = json.loads(to_str(policy))
assert action == policy["Statement"][0]["Action"]
assert sid == policy["Statement"][0]["Sid"]
assert lambda_arn == policy["Statement"][0]["Resource"]
assert principal == policy["Statement"][0]["Principal"]["Service"]
assert (
aws_stack.s3_bucket_arn("test-bucket")
== policy["Statement"][0]["Condition"]["ArnLike"]["AWS:SourceArn"]
)
# fetch IAM policy
# this is not a valid assertion in general (especially against AWS)
policies = iam_client.list_policies(Scope="Local", MaxItems=500)["Policies"]
policy_name = get_lambda_policy_name(function_name)
matching = [p for p in policies if p["PolicyName"] == policy_name]
assert len(matching) == 1
assert ":policy/" in matching[0]["Arn"]
# remove permission that we just added
resp = lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid,
Qualifier="qual1",
RevisionId="r1",
)
assert 200 == resp["ResponseMetadata"]["HTTPStatusCode"]
@pytest.mark.skip_snapshot_verify
def test_remove_multi_permissions(self, lambda_client, create_lambda_function, snapshot):
"""Tests creation and subsequent removal of multiple permissions, including the changes in the policy"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
action = "lambda:InvokeFunction"
sid = "s3"
principal = "s3.amazonaws.com"
permission_1_add = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
)
snapshot.match("add_permission_1", permission_1_add)
sid_2 = "sqs"
principal_2 = "sqs.amazonaws.com"
permission_2_add = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid_2,
Principal=principal_2,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
snapshot.match("add_permission_2", permission_2_add)
policy_response = lambda_client.get_policy(
FunctionName=function_name,
)
snapshot.match("policy_after_2_add", policy_response)
with pytest.raises(ClientError) as e:
lambda_client.remove_permission(
FunctionName=function_name,
StatementId="non-existent",
)
snapshot.match("expect_error_remove_permission", e.value.response)
assert e.value.response["Error"]["Code"] == "ResourceNotFoundException"
lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid_2,
)
policy = json.loads(
lambda_client.get_policy(
FunctionName=function_name,
)["Policy"]
)
snapshot.match("policy_after_removal", policy)
assert policy["Statement"][0]["Sid"] == sid
lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid,
)
with pytest.raises(ClientError) as ctx:
lambda_client.get_policy(FunctionName=function_name)
snapshot.match("expect_exception_get_policy", ctx.value.response)
assert ctx.value.response["Error"]["Code"] == "ResourceNotFoundException"
@pytest.mark.skipif(
not is_old_provider(), reason="test does not make valid assertions against AWS"
)
def test_add_lambda_multiple_permission(
self, iam_client, lambda_client, create_lambda_function
):
"""Test adding multiple permissions"""
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
# create lambda permissions
action = "lambda:InvokeFunction"
principal = "s3.amazonaws.com"
statement_ids = ["s4", "s5"]
for sid in statement_ids:
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
assert "Statement" in resp
# fetch IAM policy
# this is not a valid assertion in general (especially against AWS)
policies = iam_client.list_policies(Scope="Local", MaxItems=500)["Policies"]
policy_name = get_lambda_policy_name(function_name)
matching = [p for p in policies if p["PolicyName"] == policy_name]
assert 1 == len(matching)
assert ":policy/" in matching[0]["Arn"]
# validate both statements
policy = matching[0]
versions = iam_client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"]
assert 1 == len(versions)
statements = versions[0]["Document"]["Statement"]
for i in range(len(statement_ids)):
assert action == statements[i]["Action"]
assert lambda_api.func_arn(function_name) == statements[i]["Resource"]
assert principal == statements[i]["Principal"]["Service"]
assert (
aws_stack.s3_bucket_arn("test-bucket")
== statements[i]["Condition"]["ArnLike"]["AWS:SourceArn"]
)
# check statement_ids in reverse order
assert statement_ids[abs(i - 1)] == statements[i]["Sid"]
# remove permission that we just added
resp = lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid,
Qualifier="qual1",
RevisionId="r1",
)
assert 200 == resp["ResponseMetadata"]["HTTPStatusCode"]
@pytest.mark.skip_snapshot_verify
@pytest.mark.snapshot
def test_lambda_asynchronous_invocations(
self,
lambda_client,
create_lambda_function,
sqs_queue,
sqs_queue_arn,
lambda_su_role,
snapshot,
):
"""Testing API actions of function event config"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
role=lambda_su_role,
)
queue_arn = sqs_queue_arn(sqs_queue)
destination_config = {
"OnSuccess": {"Destination": queue_arn},
"OnFailure": {"Destination": queue_arn},
}
# adding event invoke config
response = lambda_client.put_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=2,
MaximumEventAgeInSeconds=123,
DestinationConfig=destination_config,
)
snapshot.match("put_function_event_invoke_config", response)
# over writing event invoke config
response = lambda_client.put_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=2,
DestinationConfig=destination_config,
)
snapshot.match("put_function_event_invoke_config_overwritemaxeventage", response)
# updating event invoke config
response = lambda_client.update_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=1,
)
snapshot.match("put_function_event_invoke_config_maxattempt1", response)
# clean up
lambda_client.delete_function_event_invoke_config(FunctionName=function_name)
def test_function_concurrency(self, lambda_client, create_lambda_function, snapshot):
"""Testing the api of the put function concurrency action"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
# TODO botocore.errorfactory.InvalidParameterValueException:
# An error occurred (InvalidParameterValueException) when calling the PutFunctionConcurrency operation: Specified ReservedConcurrentExecutions for function decreases account's UnreservedConcurrentExecution below its minimum value of [50].
response = lambda_client.put_function_concurrency(
FunctionName=function_name, ReservedConcurrentExecutions=123
)
snapshot.match("put_function_concurrency", response)
assert "ReservedConcurrentExecutions" in response
response = lambda_client.get_function_concurrency(FunctionName=function_name)
snapshot.match("get_function_concurrency", response)
assert "ReservedConcurrentExecutions" in response
lambda_client.delete_function_concurrency(FunctionName=function_name)
@pytest.mark.skip_snapshot_verify
def test_function_code_signing_config(self, lambda_client, create_lambda_function, snapshot):
"""Testing the API of code signing config"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
response = lambda_client.create_code_signing_config(
Description="Testing CodeSigning Config",
AllowedPublishers={
"SigningProfileVersionArns": [
f"arn:aws:signer:{aws_stack.get_region()}:000000000000:/signing-profiles/test",
]
},
CodeSigningPolicies={"UntrustedArtifactOnDeployment": "Enforce"},
)
snapshot.match("create_code_signing_config", response)
assert "Description" in response["CodeSigningConfig"]
assert "SigningProfileVersionArns" in response["CodeSigningConfig"]["AllowedPublishers"]
assert (
"UntrustedArtifactOnDeployment" in response["CodeSigningConfig"]["CodeSigningPolicies"]
)
code_signing_arn = response["CodeSigningConfig"]["CodeSigningConfigArn"]
response = lambda_client.update_code_signing_config(
CodeSigningConfigArn=code_signing_arn,
CodeSigningPolicies={"UntrustedArtifactOnDeployment": "Warn"},
)
snapshot.match("update_code_signing_config", response)
assert (
"Warn"
== response["CodeSigningConfig"]["CodeSigningPolicies"]["UntrustedArtifactOnDeployment"]
)
response = lambda_client.get_code_signing_config(CodeSigningConfigArn=code_signing_arn)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
snapshot.match("get_code_signing_config", response)
response = lambda_client.put_function_code_signing_config(
CodeSigningConfigArn=code_signing_arn, FunctionName=function_name
)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
snapshot.match("put_function_code_signing_config", response)
response = lambda_client.get_function_code_signing_config(FunctionName=function_name)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
snapshot.match("get_function_code_signing_config", response)
assert code_signing_arn == response["CodeSigningConfigArn"]
assert function_name == response["FunctionName"]
response = lambda_client.delete_function_code_signing_config(FunctionName=function_name)
assert 204 == response["ResponseMetadata"]["HTTPStatusCode"]
response = lambda_client.delete_code_signing_config(CodeSigningConfigArn=code_signing_arn)
assert 204 == response["ResponseMetadata"]["HTTPStatusCode"]
# TODO not executed
def create_multiple_lambda_permissions(self, lambda_client, create_lambda_function, snapshot):
"""Test creating multiple lambda permissions and checking the policy"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
# FIXME no zip file/function?
create_lambda_function(
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON37,
libs=TEST_LAMBDA_LIBS,
)
action = "lambda:InvokeFunction"
sid = "logs"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal="logs.amazonaws.com",
)
snapshot.match("add_permission_response_1", resp)
assert "Statement" in resp
sid = "kinesis"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal="kinesis.amazonaws.com",
)
snapshot.match("add_permission_response_2", resp)
assert "Statement" in resp
policy_response = lambda_client.get_policy(
FunctionName=function_name,
)
snapshot.match("policy_after_2_add", policy_response)
class TestLambdaBaseFeatures:
@pytest.mark.skip_snapshot_verify
def test_dead_letter_queue(
self,
lambda_client,
create_lambda_function,
sqs_client,
sqs_create_queue,
sqs_queue_arn,
lambda_su_role,
snapshot,
):
"""Creates a lambda with a defined dead letter queue, and check failed lambda invocation leads to a message"""
# create DLQ and Lambda function
snapshot.add_transformer(snapshot.transform.lambda_api())
snapshot.add_transformer(snapshot.transform.sqs_api())
queue_name = f"test-{short_uid()}"
lambda_name = f"test-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
create_lambda_response = create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON,
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36,
DeadLetterConfig={"TargetArn": queue_arn},
role=lambda_su_role,
)
snapshot.match("create_lambda_with_dlq", create_lambda_response)
# invoke Lambda, triggering an error
payload = {lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1}
lambda_client.invoke(
FunctionName=lambda_name,
Payload=json.dumps(payload),
InvocationType="Event",
)
# assert that message has been received on the DLQ
def receive_dlq():
result = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(result["Messages"]) > 0
msg_attrs = result["Messages"][0]["MessageAttributes"]
assert "RequestID" in msg_attrs
assert "ErrorCode" in msg_attrs
assert "ErrorMessage" in msg_attrs
snapshot.match("sqs_dlq_message", result)
# on AWS, event retries can be quite delayed, so we have to wait up to 6 minutes here, potential flakes
retry(receive_dlq, retries=120, sleep=3)
# update DLQ config
update_function_config_response = lambda_client.update_function_configuration(
FunctionName=lambda_name, DeadLetterConfig={}
)
snapshot.match("delete_dlq", update_function_config_response)
# invoke Lambda again, assert that status code is 200 and error details contained in the payload
result = lambda_client.invoke(
FunctionName=lambda_name, Payload=json.dumps(payload), LogType="Tail"
)
result = read_streams(result)
payload = json.loads(to_str(result["Payload"]))
snapshot.match("result_payload", payload)
assert 200 == result["StatusCode"]
assert "Unhandled" == result["FunctionError"]
assert "$LATEST" == result["ExecutedVersion"]
assert "Test exception" in payload["errorMessage"]
assert "Exception" in payload["errorType"]
assert isinstance(payload["stackTrace"], list)
log_result = result.get("LogResult")
assert log_result
logs = to_str(base64.b64decode(to_str(log_result)))
assert "START" in logs
assert "Test exception" in logs
assert "END" in logs
assert "REPORT" in logs
@pytest.mark.parametrize(
"condition,payload",
[
("Success", {}),
("RetriesExhausted", {lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1}),
],
)
@pytest.mark.skip_snapshot_verify
def test_assess_lambda_destination_invocation(
self,
condition,
payload,
lambda_client,
sqs_client,
create_lambda_function,
sqs_create_queue,
sqs_queue_arn,
lambda_su_role,
snapshot,
):
snapshot.add_transformer(snapshot.transform.lambda_api())
snapshot.add_transformer(snapshot.transform.sqs_api())
# message body contains ARN
snapshot.add_transformer(snapshot.transform.key_value("MD5OfBody"))
"""Testing the destination config API and operation (for the OnSuccess case)"""
# create DLQ and Lambda function
queue_name = f"test-{short_uid()}"
lambda_name = f"test-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON,
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
role=lambda_su_role,
)
put_event_invoke_config_response = lambda_client.put_function_event_invoke_config(
FunctionName=lambda_name,
DestinationConfig={
"OnSuccess": {"Destination": queue_arn},
"OnFailure": {"Destination": queue_arn},
},
)
snapshot.match("put_function_event_invoke_config", put_event_invoke_config_response)
lambda_client.invoke(
FunctionName=lambda_name,
Payload=json.dumps(payload),
InvocationType="Event",
)
def receive_message():
rs = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(rs["Messages"]) > 0
msg = rs["Messages"][0]["Body"]
msg = json.loads(msg)
assert condition == msg["requestContext"]["condition"]
snapshot.match("destination_message", rs)
retry(receive_message, retries=120, sleep=3)
@pytest.mark.skip_snapshot_verify(paths=["$..LogResult"])
def test_large_payloads(self, caplog, lambda_client, create_lambda_function, snapshot):
"""Testing large payloads sent to lambda functions (~5MB)"""
snapshot.add_transformer(snapshot.transform.lambda_api())
# Set the loglevel to INFO for this test to avoid breaking a CI environment (due to excessive log outputs)
caplog.set_level(logging.INFO)
function_name = f"large_payload-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
payload = {"test": "test123456" * 100 * 1000 * 5} # 5MB payload
payload_bytes = to_bytes(json.dumps(payload))
result = lambda_client.invoke(FunctionName=function_name, Payload=payload_bytes)
result = read_streams(result)
snapshot.match("invocation_response", result)
assert 200 == result["ResponseMetadata"]["HTTPStatusCode"]
result_data = result["Payload"]
result_data = json.loads(to_str(result_data))
assert payload == result_data
parametrize_python_runtimes = pytest.mark.parametrize(
"runtime",
PYTHON_TEST_RUNTIMES,
)
class TestPythonRuntimes:
@pytest.fixture(
params=PYTHON_TEST_RUNTIMES,
)
def python_function_name(self, request, lambda_client, create_lambda_function):
function_name = f"python-test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON,
libs=TEST_LAMBDA_LIBS,
runtime=request.param,
)
return function_name
@pytest.mark.skip_snapshot_verify(
paths=["$..Payload.context.memory_limit_in_mb", "$..logs.logs"]
)
def test_invocation_type_not_set(self, lambda_client, python_function_name, snapshot):
"""Test invocation of a lambda with no invocation type set, but LogType="Tail""" ""
snapshot.add_transformer(snapshot.transform.lambda_api())
snapshot.add_transformer(
snapshot.transform.key_value("LogResult", reference_replacement=False)
)
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", LogType="Tail"
)
result = read_streams(result)
snapshot.match("invoke", result)
result_data = json.loads(result["Payload"])
# assert response details
assert 200 == result["StatusCode"]
assert {} == result_data["event"]
# assert that logs are contained in response
logs = result.get("LogResult", "")
logs = to_str(base64.b64decode(to_str(logs)))
snapshot.add_transformer(
snapshot.transform.regex(
re.compile(r"Duration: \d+(\.\d{2})? ms"), "Duration: <duration> ms"
)
)
snapshot.add_transformer(
snapshot.transform.regex(re.compile(r"Used: \d+ MB"), "Used: <memory> MB")
)
snapshot.match("logs", {"logs": logs})
assert "START" in logs
assert "Lambda log message" in logs
assert "END" in logs
assert "REPORT" in logs
@pytest.mark.skip_snapshot_verify(
paths=["$..LogResult", "$..Payload.context.memory_limit_in_mb"]
)
def test_invocation_type_request_response(self, lambda_client, python_function_name, snapshot):
"""Test invocation with InvocationType RequestResponse explicitely set"""
snapshot.add_transformer(snapshot.transform.lambda_api())
result = lambda_client.invoke(
FunctionName=python_function_name,
Payload=b"{}",
InvocationType="RequestResponse",
)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
result_data = json.loads(result_data)
assert "application/json" == result["ResponseMetadata"]["HTTPHeaders"]["content-type"]
assert 200 == result["StatusCode"]
assert isinstance(result_data, dict)
@pytest.mark.skip_snapshot_verify(paths=["$..LogResult", "$..ExecutedVersion"])
def test_invocation_type_event(self, lambda_client, python_function_name, snapshot):
"""Check invocation response for type event"""
snapshot.add_transformer(snapshot.transform.lambda_api())
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", InvocationType="Event"
)
result = read_streams(result)
snapshot.match("invoke-result", result)
assert 202 == result["StatusCode"]
@pytest.mark.skip_snapshot_verify(paths=["$..LogResult", "$..ExecutedVersion"])
def test_invocation_type_dry_run(self, lambda_client, python_function_name, snapshot):
"""Check invocation response for type dryrun"""
snapshot.add_transformer(snapshot.transform.lambda_api())
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", InvocationType="DryRun"
)
result = read_streams(result)
snapshot.match("invoke-result", result)
assert 204 == result["StatusCode"]
@parametrize_python_runtimes
@pytest.mark.skip_snapshot_verify
def test_lambda_environment(self, lambda_client, create_lambda_function, runtime, snapshot):
"""Tests invoking a lambda function with environment variables set on creation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"env-test-function-{short_uid()}"
env_vars = {"Hello": "World"}
creation_result = create_lambda_function(
handler_file=TEST_LAMBDA_ENV,
libs=TEST_LAMBDA_LIBS,
func_name=function_name,
envvars=env_vars,
runtime=runtime,
)
snapshot.match("creation-result", creation_result)
# invoke function and assert result contains env vars
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result = read_streams(result)
snapshot.match("invocation-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert json.loads(result_data) == env_vars
# get function config and assert result contains env vars
result = lambda_client.get_function_configuration(FunctionName=function_name)
snapshot.match("get-configuration-result", result)
assert result["Environment"] == {"Variables": env_vars}
@parametrize_python_runtimes
@pytest.mark.skip_snapshot_verify
def test_invocation_with_qualifier(
self,
lambda_client,
s3_client,
s3_bucket,
runtime,
check_lambda_logs,
lambda_su_role,
wait_until_lambda_ready,
snapshot,
):
"""Tests invocation of python lambda with a given qualifier"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test_lambda_{short_uid()}"
bucket_key = "test_lambda.zip"
# upload zip file to S3
zip_file = create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=runtime
)
s3_client.upload_fileobj(BytesIO(zip_file), s3_bucket, bucket_key)
# create lambda function
response = lambda_client.create_function(
FunctionName=function_name,
Runtime=runtime,
Role=lambda_su_role,
Publish=True,
Handler="handler.handler",
Code={"S3Bucket": s3_bucket, "S3Key": bucket_key},
Timeout=10,
)
snapshot.match("creation-response", response)
assert "Version" in response
qualifier = response["Version"]
wait_until_lambda_ready(function_name=function_name, qualifier=qualifier)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = lambda_client.invoke(
FunctionName=function_name, Payload=data_before, Qualifier=qualifier
)
result = read_streams(result)
snapshot.match("invocation-response", result)
data_after = json.loads(result["Payload"])
assert json.loads(to_str(data_before)) == data_after["event"]
context = data_after["context"]
assert response["Version"] == context["function_version"]
assert context.get("aws_request_id")
assert function_name == context["function_name"]
assert f"/aws/lambda/{function_name}" == context["log_group_name"]
assert context.get("log_stream_name")
assert context.get("memory_limit_in_mb")
# assert that logs are present
expected = [".*Lambda log message - print function.*"]
if use_docker():
# Note that during regular test execution, nosetests captures the output from
# the logging module - hence we can only expect this when running in Docker
expected.append(".*Lambda log message - logging module.*")
def check_logs():
check_lambda_logs(function_name, expected_lines=expected)
retry(check_logs, retries=10)
lambda_client.delete_function(FunctionName=function_name)
@parametrize_python_runtimes
@pytest.mark.skip_snapshot_verify
def test_upload_lambda_from_s3(
self,
lambda_client,
s3_client,
s3_bucket,
runtime,
lambda_su_role,
wait_until_lambda_ready,
snapshot,
):
"""Test invocation of a python lambda with its deployment package uploaded to s3"""
snapshot.add_transformer(snapshot.transform.lambda_api())
snapshot.add_transformer(snapshot.transform.s3_api())
function_name = f"test_lambda_{short_uid()}"
bucket_key = "test_lambda.zip"
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=runtime
)
s3_client.upload_fileobj(BytesIO(zip_file), s3_bucket, bucket_key)
# create lambda function
create_response = lambda_client.create_function(
FunctionName=function_name,
Runtime=runtime,
Handler="handler.handler",
Role=lambda_su_role,
Code={"S3Bucket": s3_bucket, "S3Key": bucket_key},
Timeout=10,
)
snapshot.match("creation-response", create_response)
wait_until_lambda_ready(function_name=function_name)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = lambda_client.invoke(FunctionName=function_name, Payload=data_before)
result = read_streams(result)
snapshot.match("invocation-response", result)
data_after = json.loads(result["Payload"])
assert json.loads(to_str(data_before)) == data_after["event"]
context = data_after["context"]
assert "$LATEST" == context["function_version"]
assert function_name == context["function_name"]
# clean up
lambda_client.delete_function(FunctionName=function_name)
@parametrize_python_runtimes
def test_handler_in_submodule(self, lambda_client, create_lambda_function, runtime):
"""Test invocation of a lambda handler which resides in a submodule (= not root module)"""
function_name = f"test-function-{short_uid()}"
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=runtime,
file_name="localstack_package/def/main.py",
)
create_lambda_function(
func_name=function_name,
zip_file=zip_file,
handler="localstack_package.def.main.handler",
runtime=runtime,
)
# invoke function and assert result
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result_data = json.loads(result["Payload"].read())
assert 200 == result["StatusCode"]
assert json.loads("{}") == result_data["event"]
@parametrize_python_runtimes
def test_lambda_send_message_to_sqs(
self,
lambda_client,
create_lambda_function,
sqs_client,
sqs_create_queue,
runtime,
lambda_su_role,
):
"""Send sqs message to sqs queue inside python lambda"""
function_name = f"test-function-{short_uid()}"
queue_name = f"lambda-queue-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
create_lambda_function(
handler_file=TEST_LAMBDA_SEND_MESSAGE_FILE,
func_name=function_name,
runtime=runtime,
role=lambda_su_role,
)
event = {
"message": f"message-from-test-lambda-{short_uid()}",
"queue_name": queue_name,
"region_name": sqs_client.meta.region_name,
}
lambda_client.invoke(FunctionName=function_name, Payload=json.dumps(event))
# assert that message has been received on the Queue
def receive_message():
rs = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(rs["Messages"]) > 0
return rs["Messages"][0]
message = retry(receive_message, retries=15, sleep=2)
assert event["message"] == message["Body"]
@parametrize_python_runtimes
def test_lambda_put_item_to_dynamodb(
self,
lambda_client,
create_lambda_function,
dynamodb_create_table,
runtime,
dynamodb_resource,
lambda_su_role,
dynamodb_client,
):
"""Put item into dynamodb from python lambda"""
table_name = f"ddb-table-{short_uid()}"
function_name = f"test-function-{short_uid()}"
dynamodb_create_table(table_name=table_name, partition_key="id")
create_lambda_function(
handler_file=TEST_LAMBDA_PUT_ITEM_FILE,
func_name=function_name,
runtime=runtime,
role=lambda_su_role,
)
data = {short_uid(): f"data-{i}" for i in range(3)}
event = {
"table_name": table_name,
"region_name": dynamodb_client.meta.region_name,
"items": [{"id": k, "data": v} for k, v in data.items()],
}
def wait_for_table_created():
return (
dynamodb_client.describe_table(TableName=table_name)["Table"]["TableStatus"]
== "ACTIVE"
)
assert poll_condition(wait_for_table_created, timeout=30)
lambda_client.invoke(FunctionName=function_name, Payload=json.dumps(event))
rs = dynamodb_resource.Table(table_name).scan()
items = rs["Items"]
assert len(items) == len(data.keys())
for item in items:
assert data[item["id"]] == item["data"]
@parametrize_python_runtimes
def test_lambda_start_stepfunctions_execution(
self, lambda_client, stepfunctions_client, create_lambda_function, runtime, lambda_su_role
):
"""Start stepfunctions machine execution from lambda"""
function_name = f"test-function-{short_uid()}"
resource_lambda_name = f"test-resource-{short_uid()}"
state_machine_name = f"state-machine-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_START_EXECUTION_FILE,
func_name=function_name,
runtime=runtime,
role=lambda_su_role,
)
resource_lambda_arn = create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=resource_lambda_name,
runtime=runtime,
role=lambda_su_role,
)["CreateFunctionResponse"]["FunctionArn"]
state_machine_def = {
"StartAt": "step1",
"States": {
"step1": {
"Type": "Task",
"Resource": resource_lambda_arn,
"ResultPath": "$.result_value",
"End": True,
}
},
}
rs = stepfunctions_client.create_state_machine(
name=state_machine_name,
definition=json.dumps(state_machine_def),
roleArn=lambda_su_role,
)
sm_arn = rs["stateMachineArn"]
try:
lambda_client.invoke(
FunctionName=function_name,
Payload=json.dumps(
{
"state_machine_arn": sm_arn,
"region_name": stepfunctions_client.meta.region_name,
"input": {},
}
),
)
time.sleep(1)
rs = stepfunctions_client.list_executions(stateMachineArn=sm_arn)
# assert that state machine get executed 1 time
assert 1 == len([ex for ex in rs["executions"] if ex["stateMachineArn"] == sm_arn])
finally:
# clean up
stepfunctions_client.delete_state_machine(stateMachineArn=sm_arn)
@pytest.mark.skipif(
not use_docker(), reason="Test for docker python runtimes not applicable if run locally"
)
@parametrize_python_runtimes
def test_python_runtime_correct_versions(self, lambda_client, create_lambda_function, runtime):
"""Test different versions of python runtimes to report back the correct python version"""
function_name = f"test_python_executor_{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON_VERSION,
runtime=runtime,
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b"{}",
)
result = json.loads(to_str(result["Payload"].read()))
assert result["version"] == runtime
@pytest.mark.skipif(
not use_docker(), reason="Test for docker python runtimes not applicable if run locally"
)
@parametrize_python_runtimes
@pytest.mark.skip_snapshot_verify
def test_python_runtime_unhandled_errors(
self, lambda_client, create_lambda_function, runtime, snapshot
):
"""Test unhandled errors during python lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test_python_executor_{short_uid()}"
creation_response = create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON_UNHANDLED_ERROR,
runtime=runtime,
)
snapshot.match("creation_response", creation_response)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b"{}",
)
result = read_streams(result)
snapshot.match("invocation_response", result)
assert result["StatusCode"] == 200
assert result["ExecutedVersion"] == "$LATEST"
assert result["FunctionError"] == "Unhandled"
payload = json.loads(result["Payload"])
assert payload["errorType"] == "CustomException"
assert payload["errorMessage"] == "some error occurred"
assert "stackTrace" in payload
if (
runtime == "python3.9" and not is_old_provider()
): # TODO: remove this after the legacy provider is gone
assert "requestId" in payload
else:
assert "requestId" not in payload
parametrize_node_runtimes = pytest.mark.parametrize(
"runtime",
NODE_TEST_RUNTIMES,
)
class TestNodeJSRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Test for docker nodejs runtimes not applicable if run locally"
)
@parametrize_node_runtimes
@pytest.mark.skip_snapshot_verify
def test_nodejs_lambda_with_context(
self, lambda_client, create_lambda_function, runtime, check_lambda_logs, snapshot
):
"""Test context of nodejs lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
creation_response = create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_INTEGRATION_NODEJS,
handler="lambda_integration.handler",
runtime=runtime,
)
snapshot.match("creation", creation_response)
ctx = {
"custom": {"foo": "bar"},
"client": {"snap": ["crackle", "pop"]},
"env": {"fizz": "buzz"},
}
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b"{}",
ClientContext=to_str(base64.b64encode(to_bytes(json.dumps(ctx)))),
)
result = read_streams(result)
snapshot.match("invocation", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
client_context = json.loads(result_data)["context"]["clientContext"]
# TODO in the old provider, for some reason this is necessary. That is invalid behavior
if is_old_provider():
client_context = json.loads(client_context)
assert "bar" == client_context.get("custom").get("foo")
# assert that logs are present
expected = [".*Node.js Lambda handler executing."]
def check_logs():
check_lambda_logs(function_name, expected_lines=expected)
retry(check_logs, retries=15)
@parametrize_node_runtimes
@pytest.mark.skip_snapshot_verify
def test_invoke_nodejs_lambda(
self, lambda_client, create_lambda_function, runtime, logs_client, snapshot
):
"""Test simple nodejs lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
result = create_lambda_function(
func_name=function_name,
zip_file=testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True),
runtime=runtime,
handler="lambda_handler.handler",
)
snapshot.match("creation-result", result)
rs = lambda_client.invoke(
FunctionName=function_name,
Payload=json.dumps({"event_type": "test_lambda"}),
)
assert 200 == rs["ResponseMetadata"]["HTTPStatusCode"]
rs = read_streams(rs)
snapshot.match("invocation-result", rs)
payload = rs["Payload"]
response = json.loads(payload)
assert "response from localstack lambda" in response["body"]
def assert_events():
events = get_lambda_log_events(function_name, logs_client=logs_client)
assert len(events) > 0
retry(assert_events, retries=10)
@parametrize_node_runtimes
@pytest.mark.skip_snapshot_verify(paths=["$..LogResult"])
def test_invoke_nodejs_lambda_with_payload_containing_quotes(
self, lambda_client, create_lambda_function, runtime, logs_client, snapshot
):
"""Test nodejs invocation of payload with quotes"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True),
runtime=runtime,
handler="lambda_handler.handler",
)
test_string = "test_string' with some quotes"
body = f'{{"test_var": "{test_string}"}}'
rs = lambda_client.invoke(
FunctionName=function_name,
Payload=body,
)
assert 200 == rs["ResponseMetadata"]["HTTPStatusCode"]
rs = read_streams(rs)
snapshot.match("invoke-result", rs)
response = json.loads(rs["Payload"])
assert "response from localstack lambda" in response["body"]
def assert_events():
events = get_lambda_log_events(function_name, logs_client=logs_client)
assert len(events) > 0
assert test_string in str(events[0])
retry(assert_events, retries=10)
class TestCustomRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Test for docker provided runtimes not applicable if run locally"
)
@pytest.mark.parametrize(
"runtime",
PROVIDED_TEST_RUNTIMES,
)
@pytest.mark.skip_snapshot_verify
def test_provided_runtimes(
self, lambda_client, create_lambda_function, runtime, check_lambda_logs, snapshot
):
"""Test simple provided lambda (with curl as RIC) invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
result = create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_CUSTOM_RUNTIME,
handler="function.handler",
runtime=runtime,
)
snapshot.match("create-result", result)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"text": "bar with \'quotes\\""}',
)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
result_data = result_data.strip()
# jsonify in pro (re-)formats the event json so we allow both versions here
assert result_data in (
"""Echoing request: '{"text": "bar with \'quotes\\""}'""",
"""Echoing request: '{"text":"bar with \'quotes\\""}'""",
)
# assert that logs are present
expected = [".*Custom Runtime Lambda handler executing."]
def check_logs():
check_lambda_logs(function_name, expected_lines=expected)
retry(check_logs, retries=20)
class TestDotNetCoreRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Dotnet functions only supported with docker executor"
)
@pytest.mark.parametrize(
"zip_file,handler,runtime,expected_lines",
[
(
TEST_LAMBDA_DOTNETCORE31,
"dotnetcore31::dotnetcore31.Function::FunctionHandler",
LAMBDA_RUNTIME_DOTNETCORE31,
["Running .NET Core 3.1 Lambda"],
),
(
TEST_LAMBDA_DOTNET6,
"dotnet6::dotnet6.Function::FunctionHandler",
LAMBDA_RUNTIME_DOTNET6,
["Running .NET 6 Lambda"],
),
],
ids=["dotnetcore3.1", "dotnet6"],
)
@pytest.mark.skip_snapshot_verify
def test_dotnet_lambda(
self,
zip_file,
handler,
runtime,
expected_lines,
lambda_client,
create_lambda_function,
snapshot,
):
"""Test simple dotnet lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
create_result = create_lambda_function(
func_name=function_name,
zip_file=load_file(zip_file, mode="rb"),
handler=handler,
runtime=runtime,
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert "{}" == result_data.strip()
# TODO make lambda log checks more resilient to various formats
# self.check_lambda_logs(func_name, expected_lines=expected_lines)
class TestRubyRuntimes:
@pytest.mark.skipif(not use_docker(), reason="ruby runtimes not supported in local invocation")
@pytest.mark.skip_snapshot_verify
def test_ruby_lambda_running_in_docker(self, lambda_client, create_lambda_function, snapshot):
"""Test simple ruby lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
function_name = f"test-function-{short_uid()}"
create_result = create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_RUBY,
handler="lambda_integration.handler",
runtime=LAMBDA_RUNTIME_RUBY27,
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert "{}" == to_str(result_data).strip()
class TestGolangRuntimes:
@pytest.mark.skip_snapshot_verify
@pytest.mark.skip_offline
def test_golang_lambda(self, lambda_client, tmp_path, create_lambda_function, snapshot):
"""Test simple golang lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
# fetch platform-specific example handler
url = TEST_GOLANG_LAMBDA_URL_TEMPLATE.format(
version=GO_RUNTIME_VERSION,
os=get_os(),
arch=get_arch(),
)
handler = tmp_path / "go-handler"
download_and_extract(url, handler)
# create function
func_name = f"test_lambda_{short_uid()}"
create_result = create_lambda_function(
func_name=func_name,
handler_file=handler,
handler="handler",
runtime=LAMBDA_RUNTIME_GOLANG,
)
snapshot.match("create-result", create_result)
# invoke
result = lambda_client.invoke(
FunctionName=func_name, Payload=json.dumps({"name": "pytest"})
)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert result["StatusCode"] == 200
assert result_data.strip() == '"Hello pytest!"'
parametrize_java_runtimes = pytest.mark.parametrize(
"runtime",
JAVA_TEST_RUNTIMES,
)
class TestJavaRuntimes:
@pytest.fixture(scope="class")
def test_java_jar(self) -> bytes:
# The TEST_LAMBDA_JAVA jar file is downloaded with `make init-testlibs`.
java_file = load_file(TEST_LAMBDA_JAVA, mode="rb")
if not java_file:
raise Exception(
f"Test dependency {TEST_LAMBDA_JAVA} not found."
"Please make sure to run 'make init-testlibs' to ensure the file is available."
)
return java_file
@pytest.fixture(scope="class")
def test_java_zip(self, tmpdir_factory, test_java_jar) -> bytes:
tmpdir = tmpdir_factory.mktemp("tmp-java-zip")
zip_lib_dir = os.path.join(tmpdir, "lib")
zip_jar_path = os.path.join(zip_lib_dir, "test.lambda.jar")
mkdir(zip_lib_dir)
cp_r(
INSTALL_PATH_LOCALSTACK_FAT_JAR,
os.path.join(zip_lib_dir, "executor.lambda.jar"),
)
save_file(zip_jar_path, test_java_jar)
return testutil.create_zip_file(tmpdir, get_content=True)
@pytest.fixture(
params=JAVA_TEST_RUNTIMES,
)
def simple_java_lambda(self, create_lambda_function, test_java_zip, request):
function_name = f"java-test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=request.param,
handler="cloud.localstack.sample.LambdaHandler",
)
return function_name
@pytest.mark.skip_snapshot_verify(
paths=["$..invoke-result.LogResult", "$..invoke-result.Payload"]
)
def test_java_runtime(self, lambda_client, simple_java_lambda, snapshot):
"""Tests a simple java lambda invocation"""
snapshot.add_transformer(snapshot.transform.lambda_api())
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
Payload=b'{"echo":"echo"}',
)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
# TODO: find out why the assertion below does not work in Travis-CI! (seems to work locally)
assert "LinkedHashMap" in to_str(result_data)
assert result_data is not None
@pytest.mark.skip_snapshot_verify(
paths=["$..invoke-result.LogResult", "$..invoke-result.Payload"]
)
def test_java_runtime_with_large_payload(
self, lambda_client, simple_java_lambda, caplog, snapshot
):
"""Tests a invocation against a java lambda with a 5MB payload"""
snapshot.add_transformer(snapshot.transform.lambda_api())
# Set the loglevel to INFO for this test to avoid breaking a CI environment (due to excessive log outputs)
caplog.set_level(logging.INFO)
payload = {"test": "test123456" * 100 * 1000 * 5} # 5MB payload
payload = to_bytes(json.dumps(payload))
result = lambda_client.invoke(FunctionName=simple_java_lambda, Payload=payload)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert "LinkedHashMap" in result_data
assert result_data is not None
@pytest.mark.skip_snapshot_verify
def test_java_runtime_with_lib(self, lambda_client, create_lambda_function, snapshot):
"""Test lambda creation/invocation with different deployment package types (jar, zip, zip-with-gradle)"""
snapshot.add_transformer(snapshot.transform.lambda_api())
java_jar_with_lib = load_file(TEST_LAMBDA_JAVA_WITH_LIB, mode="rb")
# create ZIP file from JAR file
jar_dir = new_tmp_dir()
zip_dir = new_tmp_dir()
unzip(TEST_LAMBDA_JAVA_WITH_LIB, jar_dir)
zip_lib_dir = os.path.join(zip_dir, "lib")
shutil.move(os.path.join(jar_dir, "lib"), zip_lib_dir)
jar_without_libs_file = testutil.create_zip_file(jar_dir)
shutil.copy(jar_without_libs_file, os.path.join(zip_lib_dir, "lambda.jar"))
java_zip_with_lib = testutil.create_zip_file(zip_dir, get_content=True)
java_zip_with_lib_gradle = load_file(
os.path.join(
THIS_FOLDER,
"functions",
"java",
"lambda_echo",
"build",
"distributions",
"lambda-function-built-by-gradle.zip",
),
mode="rb",
)
for archive_desc, archive in [
("jar-with-lib", java_jar_with_lib),
("zip-with-lib", java_zip_with_lib),
("zip-with-lib-gradle", java_zip_with_lib_gradle),
]:
lambda_name = f"test-function-{short_uid()}"
create_result = create_lambda_function(
func_name=lambda_name,
zip_file=archive,
runtime=LAMBDA_RUNTIME_JAVA11,
handler="cloud.localstack.sample.LambdaHandlerWithLib",
)
snapshot.match(f"create-result-{archive_desc}", create_result)
result = lambda_client.invoke(FunctionName=lambda_name, Payload=b'{"echo":"echo"}')
result = read_streams(result)
snapshot.match(f"invoke-result-{archive_desc}", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert "echo" in to_str(result_data)
def test_sns_event(self, lambda_client, simple_java_lambda):
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}',
)
assert 202 == result["StatusCode"]
def test_ddb_event(self, lambda_client, simple_java_lambda):
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}',
)
assert 202 == result["StatusCode"]
@parametrize_java_runtimes
def test_kinesis_invocation(
self, lambda_client, create_lambda_function, test_java_zip, runtime
):
payload = (
b'{"Records": [{'
b'"kinesis": {"data": "dGVzdA==", "partitionKey": "partition"},'
b'"eventID": "shardId-000000000001:12345678901234567890123456789012345678901234567890",'
b'"eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/test"}]}'
)
# deploy lambda - Java with Kinesis input object
function_name = f"test-lambda-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.KinesisLambdaHandler",
)
result = lambda_client.invoke(FunctionName=function_name, Payload=payload)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert '"test "' == to_str(result_data).strip()
def test_kinesis_event(self, lambda_client, simple_java_lambda):
payload = (
b'{"Records": [{'
b'"kinesis": {"data": "dGVzdA==", "partitionKey": "partition"},'
b'"eventID": "shardId-000000000001:12345678901234567890123456789012345678901234567890",'
b'"eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/test"}]}'
)
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=payload,
)
result_data = result["Payload"].read()
assert 202 == result["StatusCode"]
assert "" == to_str(result_data).strip()
@parametrize_java_runtimes
def test_stream_handler(self, lambda_client, create_lambda_function, test_java_jar, runtime):
function_name = f"test-lambda-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_jar,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.LambdaStreamHandler",
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"echo":"echo"}',
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "{}" == to_str(result_data).strip()
@parametrize_java_runtimes
@pytest.mark.skip_snapshot_verify
def test_serializable_input_object(
self, lambda_client, create_lambda_function, test_java_zip, runtime, snapshot
):
snapshot.add_transformer(snapshot.transform.lambda_api())
# deploy lambda - Java with serializable input object
function_name = f"test-lambda-{short_uid()}"
create_result = create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.SerializedInputLambdaHandler",
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"bucket": "test_bucket", "key": "test_key"}',
)
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert json.loads(result_data) == {
"validated": True,
"bucket": "test_bucket",
"key": "test_key",
}
@pytest.mark.skip_snapshot_verify
def test_trigger_java_lambda_through_sns(
self,
lambda_client,
s3_client,
sns_client,
sns_subscription,
simple_java_lambda,
s3_bucket,
sns_create_topic,
logs_client,
snapshot,
):
snapshot.add_transformer(snapshot.transform.lambda_api())
snapshot.add_transformer(snapshot.transform.s3_api())
snapshot.add_transformer(snapshot.transform.key_value("Sid"))
topic_name = f"topic-{short_uid()}"
key = f"key-{short_uid()}"
function_name = simple_java_lambda
function_result = lambda_client.get_function(FunctionName=function_name)
snapshot.match("get-function", function_result)
function_arn = function_result["Configuration"]["FunctionArn"]
permission_id = f"test-statement-{short_uid()}"
topic_arn = sns_create_topic(Name=topic_name)["TopicArn"]
s3_sns_policy = f"""{{
"Version": "2012-10-17",
"Id": "example-ID",
"Statement": [
{{
"Sid": "Example SNS topic policy",
"Effect": "Allow",
"Principal": {{
"Service": "s3.amazonaws.com"
}},
"Action": [
"SNS:Publish"
],
"Resource": "{topic_arn}",
"Condition": {{
"ArnLike": {{
"aws:SourceArn": "arn:aws:s3:*:*:{s3_bucket}"
}}
}}
}}
]
}}
"""
sns_client.set_topic_attributes(
TopicArn=topic_arn, AttributeName="Policy", AttributeValue=s3_sns_policy
)
s3_client.put_bucket_notification_configuration(
Bucket=s3_bucket,
NotificationConfiguration={
"TopicConfigurations": [{"TopicArn": topic_arn, "Events": ["s3:ObjectCreated:*"]}]
},
)
add_permission_response = lambda_client.add_permission(
FunctionName=function_name,
StatementId=permission_id,
Action="lambda:InvokeFunction",
Principal="sns.amazonaws.com",
SourceArn=topic_arn,
)
snapshot.match("add-permission", add_permission_response)
sns_subscription(
TopicArn=topic_arn,
Protocol="lambda",
Endpoint=function_arn,
)
events_before = (
run_safe(
get_lambda_log_events,
function_name,
regex_filter="Records",
logs_client=logs_client,
)
or []
)
s3_client.put_object(Bucket=s3_bucket, Key=key, Body="something")
# We got an event that confirm lambda invoked
retry(
function=check_expected_lambda_log_events_length,
retries=30,
sleep=1,
expected_length=len(events_before) + 1,
function_name=function_name,
regex_filter="Records",
logs_client=logs_client,
)
# clean up
s3_client.delete_objects(Bucket=s3_bucket, Delete={"Objects": [{"Key": key}]})
@pytest.mark.parametrize(
"handler,expected_result",
[
(
"cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequestCustom",
"CUSTOM",
),
("cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom", "INTERFACE"),
(
"cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequest",
"INTERFACE",
),
],
)
# this test is only compiled against java 11
@pytest.mark.skip_snapshot_verify
def test_java_custom_handler_method_specification(
self,
lambda_client,
create_lambda_function,
handler,
expected_result,
check_lambda_logs,
snapshot,
):
snapshot.add_transformer(snapshot.transform.lambda_api())
java_handler_multiple_handlers = load_file(TEST_LAMBDA_JAVA_MULTIPLE_HANDLERS, mode="rb")
expected = ['.*"echo": "echo".*']
function_name = f"lambda_handler_test_{short_uid()}"
create_result = create_lambda_function(
func_name=function_name,
zip_file=java_handler_multiple_handlers,
runtime=LAMBDA_RUNTIME_JAVA11,
handler=handler,
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(FunctionName=function_name, Payload=b'{"echo":"echo"}')
result = read_streams(result)
snapshot.match("invoke-result", result)
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert expected_result == result_data.strip('"\n ')
def check_logs():
check_lambda_logs(function_name, expected_lines=expected)
retry(check_logs, retries=20)
TEST_LAMBDA_CACHE_NODEJS = os.path.join(THIS_FOLDER, "functions", "lambda_cache.js")
TEST_LAMBDA_CACHE_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_cache.py")
TEST_LAMBDA_TIMEOUT_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_timeout.py")
TEST_LAMBDA_INTROSPECT_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_introspect.py")
class TestLambdaBehavior:
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(
TEST_LAMBDA_CACHE_NODEJS,
LAMBDA_RUNTIME_NODEJS12X,
), # TODO: can we do some kind of nested parametrize here?
(TEST_LAMBDA_CACHE_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["nodejs", "python"],
)
@pytest.mark.xfail(
os.environ.get("TEST_TARGET") != "AWS_CLOUD",
reason="lambda caching not supported currently",
) # TODO: should be removed after the lambda rework
def test_lambda_cache_local(
self, lambda_client, create_lambda_function, lambda_fn, lambda_runtime
):
"""tests the local context reuse of packages in AWS lambda"""
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
)
result = lambda_client.invoke(FunctionName=func_name)
result_data = result["Payload"].read()
assert result["StatusCode"] == 200
assert json.loads(result_data)["counter"] == 0
result = lambda_client.invoke(FunctionName=func_name)
result_data = result["Payload"].read()
assert result["StatusCode"] == 200
assert json.loads(result_data)["counter"] == 1
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(TEST_LAMBDA_TIMEOUT_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["python"],
)
@pytest.mark.xfail(
os.environ.get("TEST_TARGET") != "AWS_CLOUD",
reason="lambda timeouts not supported currently",
) # TODO: should be removed after the lambda rework
def test_lambda_timeout_logs(
self,
lambda_client,
create_lambda_function,
lambda_fn,
lambda_runtime,
logs_client,
snapshot,
):
"""tests the local context reuse of packages in AWS lambda"""
snapshot.add_transformer(snapshot.transform.lambda_api())
func_name = f"test_lambda_{short_uid()}"
create_result = create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
timeout=1,
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(FunctionName=func_name, Payload=json.dumps({"wait": 2}))
snapshot.match("invoke-result", result)
assert result["StatusCode"] == 200
log_group_name = f"/aws/lambda/{func_name}"
ls_result = logs_client.describe_log_streams(logGroupName=log_group_name)
log_stream_name = ls_result["logStreams"][0]["logStreamName"]
def assert_events():
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name
)["events"]
assert any(["starting wait" in e["message"] for e in log_events])
assert not any(["done waiting" in e["message"] for e in log_events])
retry(assert_events, retries=15)
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(TEST_LAMBDA_TIMEOUT_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["python"],
)
@pytest.mark.skip_snapshot_verify
def test_lambda_no_timeout_logs(
self,
lambda_client,
create_lambda_function,
lambda_fn,
lambda_runtime,
logs_client,
snapshot,
):
"""tests the local context reuse of packages in AWS lambda"""
snapshot.add_transformer(snapshot.transform.lambda_api())
func_name = f"test_lambda_{short_uid()}"
create_result = create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
timeout=2,
)
snapshot.match("create-result", create_result)
result = lambda_client.invoke(FunctionName=func_name, Payload=json.dumps({"wait": 1}))
snapshot.match("invoke-result", result)
assert result["StatusCode"] == 200
log_group_name = f"/aws/lambda/{func_name}"
def _log_stream_available():
result = logs_client.describe_log_streams(logGroupName=log_group_name)["logStreams"]
return len(result) > 0
wait_until(_log_stream_available, strategy="linear")
ls_result = logs_client.describe_log_streams(logGroupName=log_group_name)
log_stream_name = ls_result["logStreams"][0]["logStreamName"]
def _assert_log_output():
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name
)["events"]
return any(["starting wait" in e["message"] for e in log_events]) and any(
["done waiting" in e["message"] for e in log_events]
)
wait_until(_assert_log_output, strategy="linear")
@pytest.mark.skip(reason="very slow (only execute when needed)")
def test_lambda_provisioned_concurrency_moves_with_alias(
self, lambda_client, logs_client, create_lambda_function, snapshot
):
"""
create fn ⇒ publish version ⇒ create alias for version ⇒ put concurrency on alias
⇒ new version with change ⇒ change alias to new version ⇒ concurrency moves with alias? same behavior for calls to alias/version?
"""
snapshot.add_transformer(snapshot.transform.lambda_api())
func_name = f"test_lambda_{short_uid()}"
alias_name = f"test_alias_{short_uid()}"
create_result = create_lambda_function(
func_name=func_name,
handler_file=TEST_LAMBDA_INTROSPECT_PYTHON,
runtime=LAMBDA_RUNTIME_PYTHON38,
client=lambda_client,
timeout=2,
)
snapshot.match("create-result", create_result)
fn = lambda_client.get_function_configuration(FunctionName=func_name, Qualifier="$LATEST")
snapshot.match("get-function-configuration", fn)
assert fn["State"] == "Active"
first_ver = lambda_client.publish_version(
FunctionName=func_name, RevisionId=fn["RevisionId"], Description="my-first-version"
)
snapshot.match("publish_version_1", first_ver)
assert first_ver["State"] == "Active"
assert fn["RevisionId"] != first_ver["RevisionId"]
get_function_configuration = lambda_client.get_function_configuration(
FunctionName=func_name, Qualifier=first_ver["Version"]
)
snapshot.match("get_function_configuration_version_1", first_ver)
assert get_function_configuration["RevisionId"] == first_ver["RevisionId"]
# There's no ProvisionedConcurrencyConfiguration yet
assert get_invoke_init_type(lambda_client, func_name, first_ver["Version"]) == "on-demand"
# Create Alias and add ProvisionedConcurrencyConfiguration to it
alias = lambda_client.create_alias(
FunctionName=func_name, FunctionVersion=first_ver["Version"], Name=alias_name
)
snapshot.match("create_alias", alias)
assert alias["FunctionVersion"] == first_ver["Version"]
assert alias["RevisionId"] != first_ver["RevisionId"]
get_function_result = lambda_client.get_function(
FunctionName=func_name, Qualifier=first_ver["Version"]
)
versioned_revision_id_before = get_function_result["Configuration"]["RevisionId"]
snapshot.match("get_function_before_provisioned", get_function_result)
lambda_client.put_provisioned_concurrency_config(
FunctionName=func_name, Qualifier=alias_name, ProvisionedConcurrentExecutions=1
)
assert wait_until(concurrency_update_done(lambda_client, func_name, alias_name))
get_function_result = lambda_client.get_function(
FunctionName=func_name, Qualifier=alias_name
)
snapshot.match("get_function_after_provisioned", get_function_result)
versioned_revision_id_after = get_function_result["Configuration"]["RevisionId"]
assert versioned_revision_id_before != versioned_revision_id_after
# Alias AND Version now both use provisioned-concurrency (!)
assert (
get_invoke_init_type(lambda_client, func_name, first_ver["Version"])
== "provisioned-concurrency"
)
assert (
get_invoke_init_type(lambda_client, func_name, alias_name) == "provisioned-concurrency"
)
# Update lambda configuration and publish new version
lambda_client.update_function_configuration(FunctionName=func_name, Timeout=10)
assert wait_until(update_done(lambda_client, func_name))
lambda_conf = lambda_client.get_function_configuration(FunctionName=func_name)
snapshot.match("get_function_after_update", lambda_conf)
# Move existing alias to the new version
new_version = lambda_client.publish_version(
FunctionName=func_name, RevisionId=lambda_conf["RevisionId"]
)
snapshot.match("publish_version_2", new_version)
new_alias = lambda_client.update_alias(
FunctionName=func_name, FunctionVersion=new_version["Version"], Name=alias_name
)
snapshot.match("update_alias", new_alias)
assert new_alias["RevisionId"] != new_version["RevisionId"]
# lambda should now be provisioning new "hot" execution environments for this new alias->version pointer
# the old one should be de-provisioned
get_provisioned_config_result = lambda_client.get_provisioned_concurrency_config(
FunctionName=func_name, Qualifier=alias_name
)
snapshot.match("get_provisioned_config_after_alias_move", get_provisioned_config_result)
assert wait_until(
concurrency_update_done(lambda_client, func_name, alias_name),
strategy="linear",
wait=30,
max_retries=20,
_max_wait=600,
) # this is SLOW (~6-8 min)
# concurrency should still only work for the alias now
# NOTE: the old version has been de-provisioned and will run 'on-demand' now!
assert get_invoke_init_type(lambda_client, func_name, first_ver["Version"]) == "on-demand"
assert (
get_invoke_init_type(lambda_client, func_name, new_version["Version"])
== "provisioned-concurrency"
)
assert (
get_invoke_init_type(lambda_client, func_name, alias_name) == "provisioned-concurrency"
)
# ProvisionedConcurrencyConfig should only be "registered" to the alias, not the referenced version
with pytest.raises(Exception) as e:
lambda_client.get_provisioned_concurrency_config(
FunctionName=func_name, Qualifier=new_version["Version"]
)
e.match("ProvisionedConcurrencyConfigNotFoundException")
@pytest.mark.skip(reason="very slow (only execute when needed)")
def test_lambda_provisioned_concurrency_doesnt_apply_to_latest(
self, lambda_client, logs_client, create_lambda_function
):
"""create fn ⇒ publish version ⇒ provisioned concurrency @version ⇒ test if it applies to call to $LATEST"""
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=TEST_LAMBDA_INTROSPECT_PYTHON,
runtime=LAMBDA_RUNTIME_PYTHON38,
client=lambda_client,
timeout=2,
)
fn = lambda_client.get_function_configuration(FunctionName=func_name, Qualifier="$LATEST")
assert fn["State"] == "Active"
first_ver = lambda_client.publish_version(
FunctionName=func_name, RevisionId=fn["RevisionId"], Description="my-first-version"
)
assert first_ver["State"] == "Active"
assert fn["RevisionId"] != first_ver["RevisionId"]
assert (
lambda_client.get_function_configuration(
FunctionName=func_name, Qualifier=first_ver["Version"]
)["RevisionId"]
== first_ver["RevisionId"]
)
# Normal published version without ProvisionedConcurrencyConfiguration
assert get_invoke_init_type(lambda_client, func_name, first_ver["Version"]) == "on-demand"
# Create ProvisionedConcurrencyConfiguration for this Version
versioned_revision_id_before = lambda_client.get_function(
FunctionName=func_name, Qualifier=first_ver["Version"]
)["Configuration"]["RevisionId"]
lambda_client.put_provisioned_concurrency_config(
FunctionName=func_name,
Qualifier=first_ver["Version"],
ProvisionedConcurrentExecutions=1,
)
assert wait_until(concurrency_update_done(lambda_client, func_name, first_ver["Version"]))
versioned_revision_id_after = lambda_client.get_function(
FunctionName=func_name, Qualifier=first_ver["Version"]
)["Configuration"]["RevisionId"]
assert versioned_revision_id_before != versioned_revision_id_after
assert (
get_invoke_init_type(lambda_client, func_name, first_ver["Version"])
== "provisioned-concurrency"
)
# $LATEST does *NOT* use provisioned concurrency
assert get_invoke_init_type(lambda_client, func_name, "$LATEST") == "on-demand"
# TODO: why is this flaky?
# assert lambda_client.get_function(FunctionName=func_name, Qualifier='$LATEST')['Configuration']['RevisionId'] == lambda_client.get_function(FunctionName=func_name, Qualifier=first_ver['Version'])['Configuration']['RevisionId']
| 38.415803
| 247
| 0.64584
|
06d783690782d48e595dd4b1b084b7b4f832fb63
| 20,724
|
py
|
Python
|
interact/core/topology_dataframe.py
|
MD-Studio/MDInteract
|
55bb51d27ec8d4095118837a997eea7467c2ac53
|
[
"Apache-2.0"
] | 4
|
2019-06-24T12:56:28.000Z
|
2021-03-27T17:32:17.000Z
|
interact/core/topology_dataframe.py
|
MD-Studio/MDInteract
|
55bb51d27ec8d4095118837a997eea7467c2ac53
|
[
"Apache-2.0"
] | 1
|
2020-09-25T12:15:27.000Z
|
2020-10-11T10:28:30.000Z
|
interact/core/topology_dataframe.py
|
MD-Studio/MDInteract
|
55bb51d27ec8d4095118837a997eea7467c2ac53
|
[
"Apache-2.0"
] | 5
|
2019-09-12T02:35:04.000Z
|
2021-07-30T18:05:28.000Z
|
# -*- coding: utf-8 -*-
import logging
import numpy
from pandas import DataFrame, Series, concat
from scipy.spatial.distance import cdist
from interact import constants, reference_data, __module__
from interact.core.topology_base import TopologyBaseClass
from interact.core.topology_series import TopologySeries
from interact.core.sssr import sssr
logger = logging.getLogger(__module__)
class TopologyDataFrame(TopologyBaseClass, DataFrame):
"""
TopologyDataFrame class
An extended Pandas DataFrame for working with MDTraj molecular structure
topologies and associated atom coordinates.
A TopologyDataFrame object is initiated from a pandas topology DataFrame
obtained using `mdtraj.System.to_dataframe` method. An internal
coordinate representation that is persistent over dataframe selections
is initiated using the `set_coordinates` method.
"""
def __init__(self, *args, **kwargs):
super(TopologyDataFrame, self).__init__(*args, **kwargs)
# Set parent the first time
if not hasattr(self, '_parent'):
self._parent = self
if not hasattr(self, '_coordinates'):
self._coordinates = None
if not hasattr(self, '_distance_matrix'):
self._distance_matrix = None
self.unitcell_vectors = None
self.unitcell_lengths = None
self.unitcell_angles = None
self.time = None
def contains(self, other):
"""
Check if the atom selection in other is contained in self
Implementing `contains` as magic method `__contains__` is not possible
as the native __contains__ is used internally by Pandas.
:param other: Other topology DataFrame
:type other: :interact:TopologyDataFrame
:rtype: :py:bool
"""
return set(other.get_index()).issubset(set(self.get_index()))
def __getitem__(self, item):
return self._wrapped_pandas_method('__getitem__', item)
@property
def _constructor(self):
return TopologyDataFrame
@property
def _constructor_sliced(self):
return TopologySeries
def _wrapped_pandas_method(self, mtd, *args, **kwargs):
"""
Wrap a generic pandas method to ensure it returns a TopologySeries
"""
result = getattr(super(TopologyDataFrame, self), mtd)(*args, **kwargs)
if isinstance(result, TopologySeries):
for name in self._metadata:
object.__setattr__(result, name, getattr(self, name, None))
return result
def squeeze(self, *args, **kwargs):
return self._wrapped_pandas_method('squeeze', *args, **kwargs)
def iterrows(self):
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
for name in self._metadata:
setattr(s, name, getattr(self, name, None))
yield k, s
def labels(self):
"""
Unified method to return TopologyDataFrame column labels or
TopologySeries axis labels as list.
:return: data labels
:rtype: :py:list
"""
return self.columns.tolist()
def get_index(self):
return list(self.index)
def charge(self, partial=False):
"""
Get the partial or formal charge of the selection
Computes charge as the sum of partial charges in the the 'charge'
column. Rounds the the nearest integer value unless `partial` is
True.
:param partial: Return partial charge of selection
:type partial: :py:bool
:return: charge of selection
:rtype: :py:int or :py:float if partial
"""
if 'charge' not in self.columns:
raise AttributeError('No charge column')
partial_charge = self['charge'].sum()
if not partial:
return int(round(partial_charge))
return partial_charge
def center(self, mass=False):
"""
Computes 3D coordinates of the geometrical center or the center of mass
of atoms in the selection.
Given the atoms of a ring it will calculate the ring center.
:param mass: calculate center of mass
:type mass: :py:bool
:return: coordinate representing center
:rtype: :numpy:ndarray
"""
coords = self.coord
if mass:
elements = reference_data['element_data']
atom_mass = Series(elements.atomicMass.values, index=elements.symbol).to_dict()
scale = numpy.array([atom_mass.get(element, 12.0) for element in self['element']])
else:
scale = numpy.ones((len(coords), 1))
scaled = coords * scale
return numpy.mean(scaled, axis=0)
def distances(self, target=None, max_distmat_size=75000000):
"""
Compute pairwise distance matrix
Compute a Euclidean distance matrix between all atom pairs of the
current source selection and the target selection using the
`scipy.spatial.distance.cdist` method.
If the `target` is not set it equals the current source selection
resulting in a square pairwise distance matrix.
Once build, the distance matrix is referenced in all TopologyDataFrame
and TopologySeries selections made from the parent frame until
`distances` is called again or a new set of coordinates is registered.
Structure your workflow as such that you build the distance matrix
from the initial parent frame once and reuse it in subsequent
selections.
Note:
Although building a pairwise distance matrix is pretty fast, the
memory load increases quickly with every new atom added to the
selection. A full MD system of a biomolecular structure in a box
with explicit solvent will probably not fit in memory anymore.
An upper limit on the matrix size (max_distmat_size) is enforced
to prevent problems.
:param target: target atom selection
:type target: :interact:TopologyDataFrame
:param max_distmat_size: Maximum size of pairwise distance matrix to
prevent memory flooding.
:type max_distmat_size: :py:int
:return: pairwise distance matrix with rows for source
selection and columns for the target.
:rtype: :pandas:DataFrame
:raises: OverflowError, matrix size > max_distmat_size
"""
if target is None:
target = self
# Get dataframe/series index for source and target set
source_atoms = self.get_index()
target_atoms = target.get_index()
# Restrict size of pairwise matrix
matrix_size = len(source_atoms) * len(target_atoms)
if matrix_size > max_distmat_size:
raise OverflowError('Pairwise distance matrix size to large {0} > {1}'.format(matrix_size,
max_distmat_size))
# Calculate pairwise distance matrix and make DataFrame out of it
# using source and target index.
distances = cdist(self.coord, target.coord, metric='euclidean')
self._distance_matrix = DataFrame(distances, index=source_atoms, columns=target_atoms)
# Set in parent if called from child. Should not be needed and
# creates one more retained object
if self._parent._distance_matrix is None:
self._parent._distance_matrix = self._distance_matrix
return self._distance_matrix
def extend(self, mode='resSeq'):
"""
Extend the current atom selection based on similarity in topology
column types defined by the `mode` argument.
Selection is always restricted to a match in chainID to avoid
duplicate selection in for instance a dimer complex
for example:
Default `resSeq` mode will extend the current selection with
all additional atoms that share the same residue number as
the ones in the selection.
:param mode: extend selection criterium
:type mode: :py:str
:return: new TopologyDataFrame with extended atom selection
:rtype: :interact:TopologyDataFrame
:raises: AttributeError
"""
if mode not in self.labels():
raise AttributeError('TopologyDataFrame has no column named {0}'.format(mode))
chainid_restriction = set(self['chainID'])
return self._parent[(self._parent[mode].isin(self[mode].unique())) &
(self._parent['chainID'].isin(chainid_restriction))]
def contacts(self, target=None, intra=False):
"""
Get the distance between the atoms in the current selection (source)
and the target.
If the target is not specified it is set to the full system (_parent)
without the source.
The returned contact DataFrame is a representation of the pairwise
distance matrix between the source and target containing all atom
pairs. A distance cutoff is not applied but that can be easily done
by quering on the resulting contact DataFrame.
:param target: system DataFrame congaing a target selection
:type target: :interact:TopologyDataFrame
:param intra: calculate intra-molecular contacts
:type intra: :py:bool
:return: all pairwise contacts between source and target
:rtype: :pandas:DataFrame
"""
source_index = set(self.get_index())
# Get intra-selection contacts (target == self)
if intra:
target_index = source_index
# Inter-selection contacts.
# Get index of source (current selection) and target (without source)
else:
if target is not None:
target_index = set(target.get_index()).difference(source_index)
else:
target_index = set(self.get_index()).difference(source_index)
# Get slice of contact matrix representing the selection, reformat to row based Dataframe
contacts = self._distance_matrix.loc[source_index, target_index]
contacts = contacts.unstack().reset_index()
contacts.columns = ['target', 'source', 'distance']
# Get selection for source and target from parent, reindex and concatenate into new DataFrame
source = self._parent.loc[(self._parent.index.isin(contacts['source'])), :].copy()
source.insert(0, 'index', source.index)
source = source.loc[contacts['source'], :]
source.index = range(len(source))
target = self._parent.loc[(self._parent.index.isin(contacts['target'])), :].copy()
target.insert(0, 'index', target.index)
target = target.loc[contacts['target'], :]
target.index = range(len(target))
columns = source.columns.tolist()
contacts_frame = concat([source, target, contacts['distance']], axis=1)
multi_index = [(['source'] * len(columns) + ['target'] * (len(columns) + 1)), columns * 2 + ['distance']]
contacts_frame.columns = multi_index
contacts_frame.columns.names = [0, 1]
# Add angle column (for contacts with angle constraints)
contacts_frame['target', 'angle'] = numpy.nan
# Add a contact column and fill it with 'nd' (type not determined)
contacts_frame['contact'] = 'nd'
return contacts_frame.sort_values(by=('source', 'serial'))
def covalent_bonds(self, cutoff=constants['max_covalent_bond_dist']):
"""
Return covalently bonded atoms in the selection as ContactFrame
:param cutoff: covalent bond upper distance
:type cutoff: :py:float
:return: Covalently bonded atoms
:rtype: :pandas:DataFrame
"""
# Get all intra selection distances
cf = self.contacts(intra=True)
# Get atom pairs within covalent bonded distance
return cf[(cf[('target', 'distance')] >= 0.05) & (cf[('target', 'distance')] <= cutoff)]
def is_amino_acid(self):
"""
Quick selector for residues of type amino acid (aa) according to their
three-letter code described in the `residue_data` reference set.
The returned pandas Series object can be used in additional data
queries.
For custom amino acid selection use the pandas `isin` selector.
:return: boolean series of the same length as the DataFrame
indicating if a row is an amino acid.
:rtype: :interact:TopologySeries
"""
data = reference_data['residue_data']
aa = data.loc[data['type'] == 'aa', 'three-letter']
return self['resName'].isin(set(aa))
def is_amino_acid_backbone(self, backbone=('C', 'CA', 'CB', 'N', 'O', 'H', 'HA')):
"""
Quick selector for amino acid backbone atoms by first selecting
all amino acids using `is_amino_acid` followed by selecting the
backbone atoms defined in the `backbone` attribute
The returned pandas Series object can be used in additional data
queries.
:param backbone: backbone atom names
:type backbone: :py:list
:return: boolean series of the same length as the DataFrame
indicating if a row is an amino acid backbone atom.
:rtype: :interact:TopologySeries
"""
return self.is_amino_acid() & self['name'].isin(backbone)
def is_nucleic_acid(self):
"""
Quick selector for residues of type nucleic acid (na) according to
their two-letter code described in the `residue_data` reference set.
The returned pandas Series object can be used in additional data
queries.
For custom nucleic acids selection use the pandas `isin` selector.
:return: boolean series of the same length as the DataFrame
indicating if a row is a nucleic acid.
:rtype: :interact:TopologySeries
"""
data = reference_data['residue_data']
na = list(data.loc[data['type'] == 'na', 'two-letter'])
na.extend(list(data.loc[data['type'] == 'na', 'three-letter']))
return self['resName'].isin(set(na))
def is_nucleic_acid_backbone(self, backbone=('P', 'O1P', 'O2P', "O5'", "C5'", "1H5'", "2H5'", "C4'", "H4'",
"C3'", "O3'", "H3'", "C2'", "1H2'", "2H2'", "C1'", "H1'", "O4'")):
"""
Quick selector for nucleic acid backbone atoms by first selecting
all nucleic acids using `is_nucleic_acid` followed by selecting the
backbone atoms defined in the `backbone` attribute
The returned pandas Series object can be used in additional data
queries.
:param backbone: backbone atom names
:type backbone: :py:list
:return: boolean series of the same length as the DataFrame
indicating if a row is an nucleic acid backbone atom.
:rtype: :interact:TopologySeries
"""
return self.is_nucleic_acid() & self['name'].isin(backbone)
def is_ligand(self):
"""
Quick selector for ligand residues identified as those residues not
part of the amino-acid, nucleic-acid, solvent and ion residue/element
groups.
The returned pandas Series object can be used in additional data
queries.
For custom ligand selection use the pandas `isin` selector.
:return: boolean series of the same length as the DataFrame
indicating if a row is a ligand.
:rtype: :interact:TopologySeries
"""
data = reference_data['residue_data']
known_types = list(data.loc[data['type'].isin(('sol', 'ion')), 'two-letter'])
known_types.extend(list(data.loc[data['type'].isin(('sol', 'ion')), 'three-letter']))
known_types.extend(list(reference_data['element_data']['symbol']))
return ~self.is_nucleic_acid() & ~self.is_amino_acid() & ~self['resName'].isin(set(known_types))
def is_ring(self, **kwargs):
"""
Quick selector for rings in the system.
This method provides an accumulated view on all rings in the system.
Use the `find_rings` method to obtain individual isolated rings in a
selection.
Ring type (aromatic, planar) can be further specified using the keyword
arguments accepted by the interact.core.sssr method
:param kwargs: keyword arguments passed along to the `sssr` method
:return: boolean series of the same length as the DataFrame
indicating if a row is part of a ring.
:rtype: :interact:TopologySeries
"""
serials = []
for residue in self.residues():
detected = residue.find_rings(**kwargs)
for ring in detected:
serials.extend(list(ring.index))
return self.index.isin(serials)
def residues(self, extend=False):
"""
Residue iterator
Iterate over all residues in the current TopologyDataFrame by residue
number and yield new TopologyDataFrame with the residue selection.
If 'extend', return all atoms of the given residue.
:param extend: extend to all atoms of the residue in case of subset
:type extend: :py:bool
:return: TopologyDataFrame
:rtype: :interact:TopologyDataFrame
:raises: AttributeError
"""
for residue in self['resSeq'].unique():
residue_frame = self[self['resSeq'] == residue]
if extend:
yield residue_frame.extend(mode='resSeq')
else:
yield residue_frame
def find_rings(self, **kwargs):
"""
Find rings in the structure
Uses SSSR implemented in the `interact.core.sssr` method for finding
the Smallest Subset of Smallest Rings.
:param kwargs: keyword arguments passed along to the sssr method
:return: list of rings as TopologyDataFrames
:rtype: :py:list
"""
return sssr(self, **kwargs)
def find_charged_centers(self, negative=True, positive=True,
neg_atoms=('O.co2', 'S.O2', 'S.3'), pos_atoms=('N.4', 'N.pl3', 'N.ar')):
"""
Find charged centers in the current residue selection
Extend the neighbourhood of the input atoms to include covalently bonded
atoms that are not of element type 'C' and 'H'.
:param negative: include negative charged centers
:type negative: :py:bool
:param positive: include positive charged centers
:type positive: :py:bool
:param neg_atoms: sybyl atom types of negative charged atoms to include
:type neg_atoms: :py:tuple
:param pos_atoms: sysbyl atom types of positive charged atoms to include
:type pos_atoms: :py:tuple
"""
centers = []
for residue in self.residues(extend=True):
charge = self.charge()
if charge <= -1 and negative:
charge_selection = residue[residue['attype'].isin(neg_atoms)]
elif charge >= 1 and positive:
charge_selection = residue[residue['attype'].isin(pos_atoms)]
else:
continue
if not charge_selection.empty:
# Get direct covalent neighbours not of element type C, H
resseq = charge_selection['resSeq'].unique()[0]
parent = charge_selection._parent
n = list(charge_selection.index)
sel = charge_selection
while not sel.empty:
neighbours = sel.neighbours(cutoff=constants['max_covalent_bond_dist'])
neighbours = neighbours[~neighbours['element'].isin(('C', 'H')) & (neighbours['resSeq'] == resseq)]
serials = [i for i in list(neighbours.index) if i not in n]
n.extend(serials)
sel = parent[parent.index.isin(serials)]
centers.append((parent[parent.index.isin(n)], charge))
return centers
| 37.68
| 119
| 0.618655
|
e06dd96e75fcb6625fd86ce3cd96170940051c46
| 6,413
|
py
|
Python
|
avalanche/benchmarks/utils/data_loader.py
|
ryanlindeborg/avalanche
|
32333776e729bad22f369f8923bc32416c9edcf9
|
[
"MIT"
] | 12
|
2021-04-16T15:49:59.000Z
|
2022-02-27T18:04:58.000Z
|
avalanche/benchmarks/utils/data_loader.py
|
ryanlindeborg/avalanche
|
32333776e729bad22f369f8923bc32416c9edcf9
|
[
"MIT"
] | null | null | null |
avalanche/benchmarks/utils/data_loader.py
|
ryanlindeborg/avalanche
|
32333776e729bad22f369f8923bc32416c9edcf9
|
[
"MIT"
] | 2
|
2021-06-22T04:11:52.000Z
|
2021-11-12T03:27:18.000Z
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 01-12-2020 #
# Author(s): Antonio Carta #
# E-mail: contact@continualai.org #
# Website: avalanche.continualai.org #
################################################################################
from torch.utils.data.dataloader import DataLoader
from typing import Dict
class MultiTaskDataLoader:
def __init__(self, data_dict: Dict, oversample_small_tasks: bool = False,
**kwargs):
""" Custom data loader for multi-task training.
The dictionary `data_dict` maps task ids into their
corresponding datasets.
When iterating over the data, it returns sequentially a different
batch for each task (i.e. first a batch for task 1, then task 2,
and so on). If `oversample_small_tasks == True` smaller tasks are
oversampled to match the largest task.
It is suggested to use this loader only if tasks have approximately the
same length.
:param data_dict: a dictionary with task ids as keys and Datasets
as values.
:param oversample_small_tasks: whether smaller tasks should be
oversampled to match the largest one.
:param kwargs: data loader arguments used to instantiate the loader for
each task separately. See pytorch :class:`DataLoader`.
"""
self.data_dict = data_dict
self.loaders_dict: Dict[int, DataLoader] = {}
self.oversample_small_tasks = oversample_small_tasks
for task_id, data in self.data_dict.items():
self.loaders_dict[task_id] = DataLoader(data, **kwargs)
self.max_len = max([len(d) for d in self.loaders_dict.values()])
def __iter__(self):
iter_dataloaders = {}
for t in self.loaders_dict.keys():
iter_dataloaders[t] = iter(self.loaders_dict[t])
max_len = max([len(d) for d in iter_dataloaders.values()])
try:
for it in range(max_len):
# list() is necessary because we may remove keys from the
# dictionary. This would break the generator.
for t in list(self.data_dict.keys()):
t_loader = iter_dataloaders[t]
try:
x, y, *_ = next(t_loader)
yield t, x, y
except StopIteration:
# StopIteration is thrown if dataset ends.
# reinitialize data loader
if self.oversample_small_tasks:
# reinitialize data loader
iter_dataloaders[t] = iter(t_loader)
self.current_dataloader = iter_dataloaders[t]
x, y = next(t_loader)
else:
del iter_dataloaders[t]
continue
yield t, x, y
except StopIteration:
return
def __len__(self):
return self.max_len * len(self.loaders_dict)
class MultiTaskMultiBatchDataLoader:
def __init__(self, data_dict: Dict, oversample_small_tasks: bool = False,
**kwargs):
""" Custom data loader for multi-task training.
The dictionary `data_dict` maps task ids into their
corresponding datasets.
mini-batches emitted by this dataloader are dictionaries with task
labels as keys and mini-batches as values. Therefore, each mini-batch
contains separate data for each task (i.e. key 1 batch for task 1).
If `oversample_small_tasks == True` smaller tasks are oversampled to
match the largest task.
It is suggested to use this loader only if tasks have approximately the
same length.
:param data_dict: a dictionary with task ids as keys and Datasets
as values.
:param oversample_small_task: whether smaller tasks should be
oversampled to match the largest one.
:param kwargs: data loader arguments used to instantiate the loader for
each task separately. See pytorch :class:`DataLoader`.
"""
self.data_dict = data_dict
self.loaders_dict: Dict[int, DataLoader] = {}
self.oversample_small_tasks = oversample_small_tasks
for task_id, data in self.data_dict.items():
self.loaders_dict[task_id] = DataLoader(data, **kwargs)
self.max_len = max([len(d) for d in self.loaders_dict.values()])
def __iter__(self):
iter_dataloaders = {}
for t in self.loaders_dict.keys():
iter_dataloaders[t] = iter(self.loaders_dict[t])
max_len = max([len(d) for d in iter_dataloaders.values()])
try:
for it in range(max_len):
mb_curr = {}
# list() is necessary because we may remove keys from the
# dictionary. This would break the generator.
for t in list(self.data_dict.keys()):
t_loader = iter_dataloaders[t]
try:
x, y, *_ = next(t_loader)
except StopIteration:
# StopIteration is thrown if dataset ends.
if self.oversample_small_tasks:
# reinitialize data loader
iter_dataloaders[t] = iter(t_loader)
self.current_dataloader = iter_dataloaders[t]
x, y = next(t_loader)
else:
del iter_dataloaders[t]
continue
mb_curr[t] = x, y
yield mb_curr
except StopIteration:
return
def __len__(self):
return self.max_len
| 45.161972
| 80
| 0.52877
|
f5e49e1739f75184ca42fed0eccfc0275e567f4d
| 1,540
|
py
|
Python
|
cifar10/cifar10_pre_process.py
|
Arvind-Ideas2IT/pytorch-pipeline
|
dc9f8a7b8f45a18dc5cc045a692e15eb17f2d99e
|
[
"MIT"
] | null | null | null |
cifar10/cifar10_pre_process.py
|
Arvind-Ideas2IT/pytorch-pipeline
|
dc9f8a7b8f45a18dc5cc045a692e15eb17f2d99e
|
[
"MIT"
] | null | null | null |
cifar10/cifar10_pre_process.py
|
Arvind-Ideas2IT/pytorch-pipeline
|
dc9f8a7b8f45a18dc5cc045a692e15eb17f2d99e
|
[
"MIT"
] | 1
|
2021-02-16T08:48:02.000Z
|
2021-02-16T08:48:02.000Z
|
import sys, argparse, logging
import os
import torch.utils.data
from PIL import Image
import json
import pandas as pd
import numpy as np
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import webdataset as wds
from pathlib import Path
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
import json
import subprocess
output_path = json.loads(sys.argv[2])[0]
trainset = torchvision.datasets.CIFAR10(root="./", train=True, download=True)
testset = torchvision.datasets.CIFAR10(root="./", train=False, download=True)
Path(output_path + "/train").mkdir(parents=True, exist_ok=True)
Path(output_path + "/val").mkdir(parents=True, exist_ok=True)
Path(output_path + "/test").mkdir(parents=True, exist_ok=True)
random_seed = 25
y = trainset.targets
trainset, valset, y_train, y_val = train_test_split(
trainset, y, stratify=y, shuffle=True, test_size=0.2, random_state=random_seed
)
for name in [(trainset, "train"), (valset, "val"), (testset, "test")]:
with wds.ShardWriter(
output_path + "/" + str(name[1]) + "/" + str(name[1]) + "-%d.tar", maxcount=1000
) as sink:
for index, (image, cls) in enumerate(name[0]):
sink.write({"__key__": "%06d" % index, "ppm": image, "cls": cls})
entry_point=["ls", "-R", output_path]
run_code = subprocess.run(entry_point, stdout=subprocess.PIPE)
print(run_code.stdout)
| 32.765957
| 93
| 0.681818
|
e8e854481b0e934e4271724e1e97b7b382f331bf
| 4,493
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/service/coordinated_read_ft_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/data/experimental/kernel_tests/service/coordinated_read_ft_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/data/experimental/kernel_tests/service/coordinated_read_ft_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fault tolerance testst for tf.data service coordinated reads."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class CoordinatedReadFTTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(workers_to_add=[1, 3, 10])))
def testAddWorkers(self, workers_to_add):
starting_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=starting_workers)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 7
ds = self.make_coordinated_read_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
zeros_seen = 0
for _ in range(25):
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
for _ in range(workers_to_add):
cluster.add_worker()
# Read until all new workers have joined.
while zeros_seen < starting_workers + workers_to_add:
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
# Read some more.
for _ in range(25):
results.append(self.evaluate(get_next()))
self.checkCoordinatedReadGroups(results, num_consumers)
@combinations.generate(test_base.eager_only_combinations())
def testRestartWorker(self):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 5
ds = self.make_coordinated_read_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 20)
cluster.workers[1].stop()
# Check that we can continue to read even with a worker stopped.
self.read(get_next, results, 20)
cluster.workers[1].restart()
# Read until we get results from the restarted worker, then read some more.
while results[-1] != 0:
results.append(self.evaluate(get_next()))
self.read(get_next, results, 20)
self.checkCoordinatedReadGroups(results, num_consumers)
@combinations.generate(test_base.eager_only_combinations())
def testMultiStartStop(self):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 5
ds = self.make_coordinated_read_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 20)
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 20)
cluster.workers[i].restart()
self.read(get_next, results, 20)
cluster.add_worker()
cluster.restart_dispatcher()
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 20)
self.checkCoordinatedReadGroups(results, num_consumers)
if __name__ == "__main__":
test.main()
| 37.756303
| 104
| 0.718228
|
b4f9e74a03a71d7129afa6dd86d7e72809eee736
| 1,793
|
py
|
Python
|
frappe/templates/pages/integrations/razorpay_checkout.py
|
GitlabABC/frappe
|
be3aaf715c1c978240bfe276bfcbb699c1dc33ba
|
[
"MIT"
] | null | null | null |
frappe/templates/pages/integrations/razorpay_checkout.py
|
GitlabABC/frappe
|
be3aaf715c1c978240bfe276bfcbb699c1dc33ba
|
[
"MIT"
] | null | null | null |
frappe/templates/pages/integrations/razorpay_checkout.py
|
GitlabABC/frappe
|
be3aaf715c1c978240bfe276bfcbb699c1dc33ba
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import json
import frappe
from frappe import _
from frappe.utils import cint, flt
no_cache = 1
expected_keys = (
"amount",
"title",
"description",
"reference_doctype",
"reference_docname",
"payer_name",
"payer_email",
"order_id",
"currency",
)
def get_context(context):
context.no_cache = 1
context.api_key = get_api_key()
try:
doc = frappe.get_doc("Integration Request", frappe.form_dict["token"])
payment_details = json.loads(doc.data)
for key in expected_keys:
context[key] = payment_details[key]
context["token"] = frappe.form_dict["token"]
context["amount"] = flt(context["amount"])
context["subscription_id"] = (
payment_details["subscription_id"] if payment_details.get("subscription_id") else ""
)
except Exception as e:
frappe.redirect_to_message(
_("Invalid Token"),
_("Seems token you are using is invalid!"),
http_status_code=400,
indicator_color="red",
)
frappe.local.flags.redirect_location = frappe.local.response.location
raise frappe.Redirect
def get_api_key():
api_key = frappe.db.get_value("Razorpay Settings", None, "api_key")
if cint(frappe.form_dict.get("use_sandbox")):
api_key = frappe.conf.sandbox_api_key
return api_key
@frappe.whitelist(allow_guest=True)
def make_payment(razorpay_payment_id, options, reference_doctype, reference_docname, token):
data = {}
if isinstance(options, str):
data = json.loads(options)
data.update(
{
"razorpay_payment_id": razorpay_payment_id,
"reference_docname": reference_docname,
"reference_doctype": reference_doctype,
"token": token,
}
)
data = frappe.get_doc("Razorpay Settings").create_request(data)
frappe.db.commit()
return data
| 22.4125
| 92
| 0.731177
|
5c6c04ae0e226dd143d55c85ce2a1026b409607b
| 130
|
py
|
Python
|
utils/__init__.py
|
Kylin9511/CRNet
|
22c9abd61490af46af63d737ca470f0943efa973
|
[
"MIT"
] | 52
|
2019-10-25T17:00:34.000Z
|
2022-03-28T12:36:45.000Z
|
utils/__init__.py
|
WANDERMEN/CRNet
|
45427797c702bd88d7667ab703e9774900340676
|
[
"MIT"
] | 6
|
2019-10-30T03:14:34.000Z
|
2022-03-25T15:20:02.000Z
|
utils/__init__.py
|
WANDERMEN/CRNet
|
45427797c702bd88d7667ab703e9774900340676
|
[
"MIT"
] | 23
|
2019-11-01T02:14:52.000Z
|
2022-01-09T10:37:39.000Z
|
from . import logger
from .logger import log_level, line_seg
from .init import *
from .scheduler import *
from .solver import *
| 16.25
| 39
| 0.753846
|
e406a519c8d982209cc618d6d7ee3cff06d4c268
| 1,665
|
py
|
Python
|
bbsauth.py
|
HenryHu/pybbs
|
cb2a870f3879d5432739d0748a01842660793ba9
|
[
"BSD-2-Clause-FreeBSD"
] | 14
|
2015-01-18T16:27:20.000Z
|
2021-06-20T19:27:32.000Z
|
bbsauth.py
|
HenryHu/pybbs
|
cb2a870f3879d5432739d0748a01842660793ba9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
bbsauth.py
|
HenryHu/pybbs
|
cb2a870f3879d5432739d0748a01842660793ba9
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-04-11T07:08:04.000Z
|
2018-08-02T15:39:27.000Z
|
"""bbsauth -- verifies session token
<http://www.ietf.org/rfc/rfc4616.txt>
Copyright (c) 2009, Coptix, Inc. All rights reserved.
See the LICENSE file for license terms and warranty disclaimer.
"""
from __future__ import absolute_import
from sasl import mechanism as mech, auth
__all__ = ('BBSAuth')
class BBSAuth(mech.Mechanism):
"""The bbsauth mechanism simply submits the optional authorization
id, the authentication id, and token separated by null
bytes."""
NULL = u'\x00'
def __init__(self, auth):
self.auth = auth
def verify(self, *args):
return self.auth.verify_token(*args)
state = mech.AuthState
## Server
def challenge(self):
return self.state(self.verify_challenge, None, '')
def verify_challenge(self, entity, response):
try:
token = response.decode('utf-8')
except ValueError as exc:
return self.state(False, entity, None)
try:
result = self.verify(token)
if result:
entity = entity or self.auth.username()
return self.state(result, entity, None)
except auth.PasswordError as exc:
return self.state(False, entity, None)
## Client
def respond(self, data):
assert data == ''
auth = self.auth
zid = auth.authorization_id()
cid = auth.username()
response = self.NULL.join((
u'' if (not zid or zid == cid) else zid,
(cid or u''),
(auth.token() or u'')
)).encode('utf-8')
self.authorized = zid or cid
return self.state(None, zid or cid, response)
| 25.615385
| 70
| 0.601802
|
60e065fca11b08c0e5f4263776eab5ffbfc1f083
| 9,495
|
py
|
Python
|
RockPhysicsInversionDriver.py
|
aadm/SeReMpy
|
16486812b35d6c0e8d8c9787cd1e7cfe05a5e960
|
[
"MIT"
] | null | null | null |
RockPhysicsInversionDriver.py
|
aadm/SeReMpy
|
16486812b35d6c0e8d8c9787cd1e7cfe05a5e960
|
[
"MIT"
] | null | null | null |
RockPhysicsInversionDriver.py
|
aadm/SeReMpy
|
16486812b35d6c0e8d8c9787cd1e7cfe05a5e960
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 07:24:51 2020
@author: dariograna
Reference: Grana and de Figueiredo, 2021, SeReMpy
"""
#% Rock physics inversion Driver %%
# In this script we apply the Bayesian Rock phyisics inversion to predict
# the petrophysical properties (porosity, clay volume, and water saturation
# We implement 4 different options:
# Gaussian distribution and linear model
# Gaussian mixture distribution and linear model (Grana, 2016)
# Gaussian mixture distribution and non-linear model (Grana and Della Rossa, 2010)
# Non-parametric distribution and non-linear model (Grana, 2018).
# The linear rock physics model is a multi-linear regression and it is
# estimated from a training dataset.
# In this implementation of the non-linear model we assume that the joint
# distribution of model and % data can be estimated from a training dataset
# (generated, for example, using a rock physics model)
import numpy as np
from scipy.linalg import toeplitz
from numpy.linalg import multi_dot
from scipy import stats
from scipy.io import loadmat
import matplotlib.pyplot as plt
from RockPhysics import *
from Inversion import *
#% Available data and parameters
# Load data (seismic data and time)
x = np.loadtxt('Data/data4.dat')
Clay = x[:,0].reshape(-1, 1)
Depth = x[:,1].reshape(-1, 1)
Facies = x[:,2].reshape(-1, 1)
Phi = x[:,3].reshape(-1, 1)
Rho = x[:,4].reshape(-1, 1)
Rhorpm = x[:,5].reshape(-1, 1)
Sw = x[:,6].reshape(-1, 1)
Vp = x[:,7].reshape(-1, 1)
Vprpm = x[:,8].reshape(-1, 1)
Vs = x[:,9].reshape(-1, 1)
Vsrpm = x[:,10].reshape(-1, 1)
Facies = Facies-1
Facies = Facies.astype(int)
# training dataset
mtrain = np.hstack([Phi, Clay, Sw])
nv = mtrain.shape[1]
dtrain = np.hstack([Vprpm, Vsrpm, Rhorpm])
nd = dtrain.shape[1]
nf = max(np.unique(Facies))+1
# domain to evaluate the posterior PDF
phidomain = np.arange(0,0.405,0.005)
cdomain = np.arange(0,0.81,0.01)
swdomain = np.arange(0,1.01,0.01)
P, V, S = np.mgrid[0:0.405:0.005, 0:0.81:0.01, 0:1.01:0.01]
mdomain = np.stack((P,V,S), axis=3)
# measured data (elastic logs)
dcond = np.hstack([Vp, Vs, Rho])
ns = dcond.shape[0]
# matrix associated to the linear rock physics operator
R = np.zeros((nd, nv + 1))
X = np.hstack([mtrain, np.ones(Phi.shape)])
R[0, :] = (np.linalg.lstsq(X,Vprpm,rcond=None)[0]).T
R[1, :] = (np.linalg.lstsq(X,Vsrpm,rcond=None)[0]).T
R[2, :] = (np.linalg.lstsq(X,Rhorpm,rcond=None)[0]).T
# Error
sigmaerr = 10 ** -2 * np.eye(nd)
#% Gaussian linear case
# prior model
mum = np.mean(mtrain,axis=0)
mum = mum.reshape(1,nv)
sm = np.cov(mtrain.T)
# linearization
G = R[:,0:nv]
datacond = dcond - R[:,-1].T
# # inversion
[mupost, sigmapost, Ppost] = RockPhysicsLinGaussInversion(mum, sm, G, mdomain, datacond, sigmaerr)
# posterior mean
Phipost = mupost[:, 0]
Cpost = mupost[:, 1]
Swpost = mupost[:, 2]
Philp = mupost[:, 0] - 1.96 * np.sqrt(sigmapost[0,0])
Clp = mupost[:, 1] - 1.96 * np.sqrt(sigmapost[1,1])
Swlp = mupost[:, 2] - 1.96 * np.sqrt(sigmapost[2,2])
Phiup = mupost[:, 0] + 1.96 * np.sqrt(sigmapost[0,0])
Cup = mupost[:, 1] + 1.96 * np.sqrt(sigmapost[1,1])
Swup = mupost[:, 2] + 1.96 * np.sqrt(sigmapost[2,2])
# marginal posterior distributions
Ppostphi = np.zeros((ns, len(phidomain)))
Ppostclay = np.zeros((ns, len(cdomain)))
Ppostsw = np.zeros((ns, len(swdomain)))
for i in range(ns):
Ppostphi[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=1)
Ppostclay[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=0)
Ppostsw[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=1)), axis=0)
Ppostphi[i,:]= Ppostphi[i,:] / sum(Ppostphi[i,:])
Ppostclay[i,:]= Ppostclay[i,:] / sum(Ppostclay[i,:])
Ppostsw[i,:]= Ppostsw[i,:] / sum(Ppostsw[i,:])
# plots
plt.figure(1)
plt.subplot(131)
plt.pcolor(phidomain, Depth, Ppostphi)
plt.colorbar()
plt.plot(Phi, Depth, 'k')
plt.plot(Phipost, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Porosity')
plt.xlim([0, 0.4])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(132)
plt.pcolor(cdomain, Depth, Ppostclay)
plt.colorbar()
plt.plot(Clay, Depth, 'k')
plt.plot(Cpost, Depth, 'r')
plt.xlabel('Clay volume')
plt.xlim([0, 0.8])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(133)
plt.pcolor(swdomain, Depth, Ppostsw)
plt.colorbar()
plt.plot(Sw, Depth, 'k')
plt.plot(Swpost, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Water saturation')
plt.xlim([0, 1])
plt.ylim([max(Depth), min(Depth)])
plt.suptitle('Rock physics linear inversion')
plt.show()
#% Gaussian mixture linear case
# prior model
pf = np.zeros((nf,1))
mum = np.zeros((nf,nv))
sm = np.zeros((nv,nv,nf))
for k in range(nf):
pf[k,0] = np.sum(Facies == k) / ns
mum[k,:] = np.mean(mtrain[Facies[:,0] == k,:],axis=0)
sm[:,:,k] = np.cov(mtrain[Facies[:,0] == k,:].T)
mupost, sigmapost, pfpost, Ppost = RockPhysicsLinGaussMixInversion(pf, mum, sm, G, mdomain, datacond, sigmaerr)
# marginal posterior distributions
Ppostphi = np.zeros((ns, len(phidomain)))
Ppostclay = np.zeros((ns, len(cdomain)))
Ppostsw = np.zeros((ns, len(swdomain)))
Phimap = np.zeros((ns, 1))
Cmap = np.zeros((ns, 1))
Swmap = np.zeros((ns, 1))
for i in range(ns):
Ppostphi[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=1)
Ppostclay[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=0)
Ppostsw[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=1)), axis=0)
Ppostphi[i,:]= Ppostphi[i,:] / sum(Ppostphi[i,:])
Ppostclay[i,:]= Ppostclay[i,:] / sum(Ppostclay[i,:])
Ppostsw[i,:]= Ppostsw[i,:] / sum(Ppostsw[i,:])
Phimapind = np.argmax(Ppostphi[i,:])
Cmapind = np.argmax(Ppostclay[i,:])
Swmapind = np.argmax(Ppostsw[i,:])
Phimap[i,0]= phidomain[Phimapind]
Cmap[i,0]= cdomain[Cmapind]
Swmap[i,0]= swdomain[Swmapind]
# plots
plt.figure(2)
plt.subplot(131)
plt.pcolor(phidomain, Depth, Ppostphi)
plt.colorbar()
plt.plot(Phi, Depth, 'k')
plt.plot(Phimap, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Porosity')
plt.xlim([0, 0.4])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(132)
plt.pcolor(cdomain, Depth, Ppostclay)
plt.colorbar()
plt.plot(Clay, Depth, 'k')
plt.plot(Cmap, Depth, 'r')
plt.xlabel('Clay volume')
plt.xlim([0, 0.8])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(133)
plt.pcolor(swdomain, Depth, Ppostsw)
plt.colorbar()
plt.plot(Sw, Depth, 'k')
plt.plot(Swmap, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Water saturation')
plt.xlim([0, 1])
plt.ylim([max(Depth), min(Depth)])
plt.suptitle('Rock physics linear inversion - Gaussian Mix prior')
plt.show()
#% Gaussian mixture case
# The joint Gaussian mixture distribution is estimated from the training dataset
mupost, sigmapost, pfpost, Ppost = RockPhysicsGaussMixInversion(Facies, mtrain, dtrain, mdomain, dcond, sigmaerr)
# The joint Gaussian distribution can also be used
# mupost, sigmapost, Ppost = RockPhysicsGaussInversion(mtrain, dtrain, mdomain, dcond, sigmaerr);
# marginal posterior distributions
Ppostphi = np.zeros((ns, len(phidomain)))
Ppostclay = np.zeros((ns, len(cdomain)))
Ppostsw = np.zeros((ns, len(swdomain)))
Phimap = np.zeros((ns, 1))
Cmap = np.zeros((ns, 1))
Swmap = np.zeros((ns, 1))
for i in range(ns):
Ppostphi[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=1)
Ppostclay[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=2)), axis=0)
Ppostsw[i,:]= np.sum(np.squeeze(np.sum(np.squeeze(Ppost[i,:,:,:]), axis=1)), axis=0)
Ppostphi[i,:]= Ppostphi[i,:] / sum(Ppostphi[i,:])
Ppostclay[i,:]= Ppostclay[i,:] / sum(Ppostclay[i,:])
Ppostsw[i,:]= Ppostsw[i,:] / sum(Ppostsw[i,:])
Phimapind = np.argmax(Ppostphi[i,:])
Cmapind = np.argmax(Ppostclay[i,:])
Swmapind = np.argmax(Ppostsw[i,:])
Phimap[i,0]= phidomain[Phimapind]
Cmap[i,0]= cdomain[Cmapind]
Swmap[i,0]= swdomain[Swmapind]
# plots
plt.figure(3)
plt.subplot(131)
plt.pcolor(phidomain, Depth, Ppostphi)
plt.colorbar()
plt.plot(Phi, Depth, 'k')
plt.plot(Phimap, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Porosity')
plt.xlim([0, 0.4])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(132)
plt.pcolor(cdomain, Depth, Ppostclay)
plt.colorbar()
plt.plot(Clay, Depth, 'k')
plt.plot(Cmap, Depth, 'r')
plt.xlabel('Clay volume')
plt.xlim([0, 0.8])
plt.ylim([max(Depth), min(Depth)])
plt.subplot(133)
plt.pcolor(swdomain, Depth, Ppostsw)
plt.colorbar()
plt.plot(Sw, Depth, 'k')
plt.plot(Swmap, Depth, 'r')
plt.ylabel('Depth (m)')
plt.xlabel('Water saturation')
plt.xlim([0, 1])
plt.ylim([max(Depth), min(Depth)])
plt.suptitle('Rock physics inversion - Gaussian Mix joint distribution')
plt.show()
# % Non-parametric case (Kernel density estimation)
## Inefficient implementation ##
# # phidomain = np.arange(0,0.425,0.025)
# # cdomain = np.arange(0,0.85,0.05)
# # swdomain = np.arange(0,1.05,0.05)
# P, V, S, VP, VS, R= np.mgrid[0:0.42:0.02, 0:0.85:0.05, 0:1.05:0.05, min(Vp):max(Vp):(max(Vp)-min(Vp))/30, min(Vs):max(Vs):(max(Vs)-min(Vs))/30, min(Rho):max(Rho):(max(Rho)-min(Rho))/30]
# jointdomain = np.vstack([P.ravel(), V.ravel(), S.ravel(), VP.ravel(), VS.ravel(), R.ravel()])
# datadomain = np.vstack([VP[0,0,0,:,0,0], VS[0,0,0,0,:,0], R[0,0,0,0,0,:]])
# phidomain=P[:,0,0,0,0,0]
# cdomain=V[0,:,0,0,0,0]
# swdomain=S[0,0,:,0,0,0]
# jointdim = P.shape
# mdim = P[:,:,:,0,0,0].shape
# # # inversion
# Ppost = RockPhysicsKDEInversion(mtrain, dtrain, jointdomain, datadomain, dcond, jointdim, mdim)
| 32.406143
| 187
| 0.661295
|
ff793e04829b0530c5aaa13414fc7efa98c78d4d
| 603
|
py
|
Python
|
ganexa_event_manager/users/migrations/0002_auto_20220315_0511.py
|
luiscberrocal/ganexa_event_manager
|
c74d2b687c713cc9c90e8bf33dcf48a4eecf0000
|
[
"MIT"
] | null | null | null |
ganexa_event_manager/users/migrations/0002_auto_20220315_0511.py
|
luiscberrocal/ganexa_event_manager
|
c74d2b687c713cc9c90e8bf33dcf48a4eecf0000
|
[
"MIT"
] | 6
|
2022-03-18T10:41:25.000Z
|
2022-03-31T10:38:16.000Z
|
ganexa_event_manager/users/migrations/0002_auto_20220315_0511.py
|
luiscberrocal/ganexa_event_manager
|
c74d2b687c713cc9c90e8bf33dcf48a4eecf0000
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-03-15 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=80, verbose_name='First name'),
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=80, verbose_name='Last name'),
),
]
| 25.125
| 89
| 0.588723
|
7582d7e4e7ccdf08edeacffb8ac393bed5aa65e1
| 16,372
|
py
|
Python
|
lib/exabgp/bgp/message/update/nlri/flow.py
|
lochiiconnectivity/exabgp
|
2cb8a99af89969ff4b0b5561de6168a18179b704
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/bgp/message/update/nlri/flow.py
|
lochiiconnectivity/exabgp
|
2cb8a99af89969ff4b0b5561de6168a18179b704
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/bgp/message/update/nlri/flow.py
|
lochiiconnectivity/exabgp
|
2cb8a99af89969ff4b0b5561de6168a18179b704
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
"""
flow.py
Created by Thomas Mangin on 2010-01-14.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
# Do not use __slots__ here, we never create enough of them to be worth it
# And it really break complex inheritance
from struct import pack
from struct import unpack
from exabgp.protocol.ip import IP
from exabgp.protocol.ip import NoIP
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message import OUT
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.nlri.cidr import CIDR
from exabgp.protocol import Protocol
from exabgp.protocol import NamedProtocol
from exabgp.protocol.ip.icmp import ICMPType
from exabgp.protocol.ip.icmp import ICMPCode
from exabgp.protocol.ip.icmp import NamedICMPType
from exabgp.protocol.ip.icmp import NamedICMPCode
from exabgp.protocol.ip.fragment import Fragment
from exabgp.protocol.ip.fragment import NamedFragment
from exabgp.protocol.ip.tcp.flag import TCPFlag
from exabgp.protocol.ip.tcp.flag import NamedTCPFlag
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.update.nlri.qualifier.rd import RouteDistinguisher
# =================================================================== Flow Components
class IComponent (object):
# all have ID
# should have an interface for serialisation and put it here
pass
class CommonOperator (object):
# power (2,x) is the same as 1 << x which is what the RFC say the len is
power = {0:1, 1:2, 2:4, 3:8,}
rewop = {1:0, 2:1, 4:2, 8:3,}
len_position = 0x30
EOL = 0x80 # 0b10000000
AND = 0x40 # 0b01000000
LEN = 0x30 # 0b00110000
NOP = 0x00
OPERATOR = 0xFF ^ (EOL | LEN)
@staticmethod
def eol (data):
return data & CommonOperator.EOL
@staticmethod
def operator (data):
return data & CommonOperator.OPERATOR
@staticmethod
def length (data):
return 1 << ((data & CommonOperator.LEN) >> 4)
class NumericOperator (CommonOperator):
# reserved = 0x08 # 0b00001000
LT = 0x04 # 0b00000100
GT = 0x02 # 0b00000010
EQ = 0x01 # 0b00000001
class BinaryOperator (CommonOperator):
# reserved = 0x0C # 0b00001100
NOT = 0x02 # 0b00000010
MATCH = 0x01 # 0b00000001
def _len_to_bit (value):
return NumericOperator.rewop[value] << 4
def _bit_to_len (value):
return NumericOperator.power[(value & CommonOperator.len_position) >> 4]
def _number (string):
value = 0
for c in string:
value = (value << 8) + ord(c)
return value
# def short (value):
# return (ord(value[0]) << 8) + ord(value[1])
# Interface ..................
class IPv4 (object):
afi = AFI.ipv4
class IPv6 (object):
afi = AFI.ipv6
class IPrefix (object):
pass
# Prococol
class IPrefix4 (IPrefix,IComponent,IPv4):
# not used, just present for simplying the nlri generation
operations = 0x0
# NAME
def __init__ (self,raw,netmask):
self.nlri = CIDR(raw,netmask)
def pack (self):
raw = self.nlri.pack()
return "%s%s" % (chr(self.ID),raw)
def __str__ (self):
return str(self.nlri)
class IPrefix6 (IPrefix,IComponent,IPv6):
# not used, just present for simplying the nlri generation
operations = 0x0
# NAME
def __init__ (self,raw,netmask,offset):
self.nlri = CIDR(raw,netmask)
self.offset = offset
def pack (self):
raw = self.nlri.packed_ip()
return "%s%s%s%s" % (chr(self.ID),chr(self.nlri.mask),chr(self.offset),raw)
def __str__ (self):
return "%s/%s" % (self.nlri,self.offset)
class IOperation (IComponent):
# need to implement encode which encode the value of the operator
def __init__ (self,operations,value):
self.operations = operations
self.value = value
self.first = None # handled by pack/str
def pack (self):
l,v = self.encode(self.value)
op = self.operations | _len_to_bit(l)
return "%s%s" % (chr(op),v)
def encode (self,value):
raise NotImplemented('this method must be implemented by subclasses')
def decode (self,value):
raise NotImplemented('this method must be implemented by subclasses')
#class IOperationIPv4 (IOperation):
# def encode (self,value):
# return 4, socket.pton(socket.AF_INET,value)
class IOperationByte (IOperation):
def encode (self,value):
return 1,chr(value)
def decode (self,bgp):
return ord(bgp[0]),bgp[1:]
class IOperationByteShort (IOperation):
def encode (self,value):
if value < (1<<8):
return 1,chr(value)
return 2,pack('!H',value)
def decode (self,bgp):
return unpack('!H',bgp[:2])[0],bgp[2:]
# String representation for Numeric and Binary Tests
class NumericString (object):
_string = {
NumericOperator.LT : '<',
NumericOperator.GT : '>',
NumericOperator.EQ : '=',
NumericOperator.LT|NumericOperator.EQ : '<=',
NumericOperator.GT|NumericOperator.EQ : '>=',
NumericOperator.AND|NumericOperator.LT : '&<',
NumericOperator.AND|NumericOperator.GT : '&>',
NumericOperator.AND|NumericOperator.EQ : '&=',
NumericOperator.AND|NumericOperator.LT|NumericOperator.EQ : '&<=',
NumericOperator.AND|NumericOperator.GT|NumericOperator.EQ : '&>=',
}
def __str__ (self):
return "%s%s" % (self._string[self.operations & (CommonOperator.EOL ^ 0xFF)], self.value)
class BinaryString (object):
_string = {
BinaryOperator.NOT : '!',
BinaryOperator.MATCH : '=',
BinaryOperator.AND|BinaryOperator.NOT : '&!',
BinaryOperator.AND|BinaryOperator.MATCH : '&=',
}
def __str__ (self):
return "%s%s" % (self._string[self.operations & (CommonOperator.EOL ^ 0xFF)], self.value)
# Components ..............................
def converter (function,klass=int):
def _integer (value):
try:
return klass(value)
except ValueError:
return function(value)
return _integer
def decoder (function,klass=int):
def _inner (value):
return klass(function(value))
return _inner
def PacketLength (data):
_str_bad_length = "cloudflare already found that invalid max-packet length for for you .."
number = int(data)
if number > 0xFFFF:
raise ValueError(_str_bad_length)
return number
def PortValue (data):
_str_bad_port = "you tried to set an invalid port number .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_port)
return number
def DSCPValue (data):
_str_bad_dscp = "you tried to filter a flow using an invalid dscp for a component .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_dscp)
return number
def ClassValue (data):
_str_bad_class = "you tried to filter a flow using an invalid traffic class for a component .."
number = int(data)
if number < 0 or number > 0xFFFF:
raise ValueError(_str_bad_class)
return number
def LabelValue (data):
_str_bad_label = "you tried to filter a flow using an invalid traffic label for a component .."
number = int(data)
if number < 0 or number > 0xFFFFF: # 20 bits 5 bytes
raise ValueError(_str_bad_label)
return number
# Protocol Shared
class FlowDestination (object):
ID = 0x01
NAME = 'destination'
class FlowSource (object):
ID = 0x02
NAME = 'source'
# Prefix
class Flow4Destination (IPrefix4,FlowDestination):
pass
# Prefix
class Flow4Source (IPrefix4,FlowSource):
pass
# Prefix
class Flow6Destination (IPrefix6,FlowDestination):
pass
# Prefix
class Flow6Source (IPrefix6,FlowSource):
pass
class FlowIPProtocol (IOperationByte,NumericString,IPv4):
ID = 0x03
NAME = 'protocol'
converter = staticmethod(converter(NamedProtocol,Protocol))
decoder = staticmethod(decoder(ord,Protocol))
class FlowNextHeader (IOperationByte,NumericString,IPv6):
ID = 0x03
NAME = 'next-header'
converter = staticmethod(converter(NamedProtocol,Protocol))
decoder = staticmethod(decoder(ord,Protocol))
class FlowAnyPort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x04
NAME = 'port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowDestinationPort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x05
NAME = 'destination-port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowSourcePort (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x06
NAME = 'source-port'
converter = staticmethod(converter(PortValue))
decoder = staticmethod(_number)
class FlowICMPType (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x07
NAME = 'icmp-type'
converter = staticmethod(converter(NamedICMPType))
decoder = staticmethod(decoder(_number,ICMPType))
class FlowICMPCode (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x08
NAME = 'icmp-code'
converter = staticmethod(converter(NamedICMPCode))
decoder = staticmethod(decoder(_number,ICMPCode))
class FlowTCPFlag (IOperationByte,BinaryString,IPv4,IPv6):
ID = 0x09
NAME = 'tcp-flags'
converter = staticmethod(converter(NamedTCPFlag))
decoder = staticmethod(decoder(ord,TCPFlag))
class FlowPacketLength (IOperationByteShort,NumericString,IPv4,IPv6):
ID = 0x0A
NAME = 'packet-length'
converter = staticmethod(converter(PacketLength))
decoder = staticmethod(_number)
# RFC2474
class FlowDSCP (IOperationByteShort,NumericString,IPv4):
ID = 0x0B
NAME = 'dscp'
converter = staticmethod(converter(DSCPValue))
decoder = staticmethod(_number)
# RFC2460
class FlowTrafficClass (IOperationByte,NumericString,IPv6):
ID = 0x0B
NAME = 'traffic-class'
converter = staticmethod(converter(ClassValue))
decoder = staticmethod(_number)
# BinaryOperator
class FlowFragment (IOperationByteShort,NumericString,IPv4):
ID = 0x0C
NAME = 'fragment'
converter = staticmethod(converter(NamedFragment))
decoder = staticmethod(decoder(ord,Fragment))
# draft-raszuk-idr-flow-spec-v6-01
class FlowFlowLabel (IOperationByteShort,NumericString,IPv6):
ID = 0x0D
NAME = 'flow-label'
converter = staticmethod(converter(LabelValue))
decoder = staticmethod(_number)
# ..........................................................
decode = {AFI.ipv4: {}, AFI.ipv6: {}}
factory = {AFI.ipv4: {}, AFI.ipv6: {}}
for content in dir():
klass = globals().get(content,None)
if not isinstance(klass,type(IComponent)):
continue
if not issubclass(klass,IComponent):
continue
if issubclass(klass,IPv4):
afi = AFI.ipv4
elif issubclass(klass,IPv6):
afi = AFI.ipv6
else:
continue
ID = getattr(klass,'ID',None)
if not ID:
continue
factory[afi][ID] = klass
name = getattr(klass,'NAME')
if issubclass(klass, IOperation):
if issubclass(klass, BinaryString):
decode[afi][ID] = 'binary'
elif issubclass(klass, NumericString):
decode[afi][ID] = 'numeric'
else:
raise RuntimeError('invalid class defined (string)')
elif issubclass(klass, IPrefix):
decode[afi][ID] = 'prefix'
else:
raise RuntimeError('unvalid class defined (type)')
# ..........................................................
def _unique ():
value = 0
while True:
yield value
value += 1
unique = _unique()
class Flow (NLRI):
def __init__ (self,afi=AFI.ipv4,safi=SAFI.flow_ip,nexthop=None,rd=None):
NLRI.__init__(self,afi,safi)
self.rules = {}
self.action = OUT.announce
self.nexthop = IP.unpack(nexthop) if nexthop else NoIP
self.rd = rd
self.unique = unique.next()
def __len__ (self):
return len(self.pack())
def add (self,rule):
ID = rule.ID
if ID in (FlowDestination.ID,FlowSource.ID):
if ID in self.rules:
return False
if ID == FlowDestination.ID:
pair = self.rules.get(FlowSource.ID,[])
else:
pair = self.rules.get(FlowDestination.ID,[])
if pair:
if rule.afi != pair[0].afi:
return False
self.rules.setdefault(ID,[]).append(rule)
return True
# The API requires addpath, but it is irrelevant here.
def pack (self,addpath=None):
ordered_rules = []
# the order is a RFC requirement
for ID in sorted(self.rules.keys()):
rules = self.rules[ID]
# for each component get all the operation to do
# the format use does not prevent two opposing rules meaning that no packet can ever match
for rule in rules:
rule.operations &= (CommonOperator.EOL ^ 0xFF)
rules[-1].operations |= CommonOperator.EOL
# and add it to the last rule
if ID not in (FlowDestination.ID,FlowSource.ID):
ordered_rules.append(chr(ID))
ordered_rules.append(''.join(rule.pack() for rule in rules))
components = ''.join(ordered_rules)
if self.safi == SAFI.flow_vpn:
components = self.rd.pack() + components
l = len(components)
if l < 0xF0:
data = "%s%s" % (chr(l),components)
elif l < 0x0FFF:
data = "%s%s" % (pack('!H',l | 0xF000),components)
else:
raise Notify(3,0,"rule too big for NLRI - how to handle this - does this work ?")
# data = "%s" % chr(0)
return data
def extensive (self):
string = []
for index in sorted(self.rules):
rules = self.rules[index]
s = []
for idx,rule in enumerate(rules):
# only add ' ' after the first element
if idx and not rule.operations & NumericOperator.AND:
s.append(' ')
s.append(rule)
string.append(' %s %s' % (rules[0].NAME,''.join(str(_) for _ in s)))
nexthop = ' next-hop %s' % self.nexthop if self.nexthop is not NoIP else ''
rd = str(self.rd) if self.rd else ''
return 'flow' + rd + ''.join(string) + nexthop
def __str__ (self):
return self.extensive()
def _json (self):
string = []
for index in sorted(self.rules):
rules = self.rules[index]
s = []
for idx,rule in enumerate(rules):
# only add ' ' after the first element
if idx and not rule.operations & NumericOperator.AND:
s.append(', ')
s.append('"%s"' % rule)
string.append(' "%s": [ %s ]' % (rules[0].NAME,''.join(str(_) for _ in s)))
nexthop = ', "next-hop": "%s"' % self.nexthop if self.nexthop is not NoIP else ''
rd = ', %s' % self.rd.json() if self.rd else ''
compatibility = ', "string": "%s"' % self.extensive()
return '{' + rd + ','.join(string) + nexthop + compatibility +' }'
def json (self):
# this is a stop gap so flow route parsing does not crash exabgp
# delete unique when this is fixed
return '"flow-%d": %s' % (self.unique,self._json())
def index (self):
return self.pack()
@classmethod
def unpack (cls,afi,safi,bgp,has_multiple_path,nexthop,action):
total = len(bgp)
length,bgp = ord(bgp[0]),bgp[1:]
if length & 0xF0 == 0xF0: # bigger than 240
extra,bgp = ord(bgp[0]),bgp[1:]
length = ((length & 0x0F) << 16) + extra
if length > len(bgp):
raise Notify(3,10,'invalid length at the start of the the flow')
bgp = bgp[:length]
nlri = Flow(afi,safi,nexthop)
nlri.action = action
if safi == SAFI.flow_vpn:
nlri.rd = RouteDistinguisher(bgp[:8])
bgp = bgp[8:]
seen = []
while bgp:
what,bgp = ord(bgp[0]),bgp[1:]
if what not in decode.get(afi,{}):
raise Notify(3,10,'unknown flowspec component received for address family %d' % what)
seen.append(what)
if sorted(seen) != seen:
raise Notify(3,10,'components are not sent in the right order %s' % seen)
decoder = decode[afi][what]
klass = factory[afi][what]
if decoder == 'prefix':
if afi == AFI.ipv4:
_,rd,_,mask,size,prefix,left = NLRI._nlri(afi,safi,bgp,action,False)
adding = klass(prefix,mask)
if not nlri.add(adding):
raise Notify(3,10,'components are incompatible (two sources, two destinations, mix ipv4/ipv6) %s' % seen)
# logger.parser(LazyFormat("added flow %s (%s) payload " % (klass.NAME,adding),od,bgp[:-len(left)]))
bgp = left
else:
byte,bgp = bgp[1],bgp[0]+bgp[2:]
offset = ord(byte)
_,rd,_,mask,size,prefix,left = NLRI._nlri(afi,safi,bgp,action,False)
adding = klass(prefix,mask,offset)
if not nlri.add(adding):
raise Notify(3,10,'components are incompatible (two sources, two destinations, mix ipv4/ipv6) %s' % seen)
# logger.parser(LazyFormat("added flow %s (%s) payload " % (klass.NAME,adding),od,bgp[:-len(left)]))
bgp = left
else:
end = False
while not end:
byte,bgp = ord(bgp[0]),bgp[1:]
end = CommonOperator.eol(byte)
operator = CommonOperator.operator(byte)
length = CommonOperator.length(byte)
value,bgp = bgp[:length],bgp[length:]
adding = klass.decoder(value)
nlri.add(klass(operator,adding))
# logger.parser(LazyFormat("added flow %s (%s) operator %d len %d payload " % (klass.NAME,adding,byte,length),od,value))
return total-len(bgp),nlri
for safi in (SAFI.flow_ip,SAFI.flow_vpn):
for afi in (AFI.ipv4, AFI.ipv6):
Flow.register_nlri(afi,safi)
| 27.986325
| 125
| 0.689836
|
804c5d561600d69d59b80e5b2dd0b0e4b67416ba
| 2,878
|
py
|
Python
|
fidesctl/src/fidesapi/main.py
|
Tannahooks/fides
|
d4e1b3bdf3490a31ae5a7876b65ce527884d25d8
|
[
"Apache-2.0"
] | null | null | null |
fidesctl/src/fidesapi/main.py
|
Tannahooks/fides
|
d4e1b3bdf3490a31ae5a7876b65ce527884d25d8
|
[
"Apache-2.0"
] | null | null | null |
fidesctl/src/fidesapi/main.py
|
Tannahooks/fides
|
d4e1b3bdf3490a31ae5a7876b65ce527884d25d8
|
[
"Apache-2.0"
] | null | null | null |
"""
Contains the code that sets up the API.
"""
from datetime import datetime
from enum import Enum
from logging import WARNING
from typing import Callable, Dict
from fastapi import FastAPI, Request, Response
from loguru import logger as log
from uvicorn import Config, Server
from fidesapi import crud, database, db_session, view, visualize
from fidesapi.logger import setup as setup_logging
from fidesctl.core.config import FidesctlConfig, get_config
app = FastAPI(title="fidesctl")
CONFIG: FidesctlConfig = get_config()
def configure_routes() -> None:
"Include all of the routers not defined in this module."
routers = crud.routers + visualize.routers
for router in routers:
log.debug(f'Adding router to fidesctl: {" ".join(router.tags)}')
app.include_router(router)
app.include_router(view.router)
# Configure the routes here so we can generate the openapi json file
configure_routes()
def configure_db(database_url: str) -> None:
"Set up the db to be used by the app."
database.create_db_if_not_exists(database_url)
db_session.global_init(database_url)
database.init_db(database_url)
@app.on_event("startup")
def setup_server() -> None:
"Run all of the required setup steps for the webserver."
setup_logging(
CONFIG.api.log_level,
serialize=CONFIG.api.log_serialization,
desination=CONFIG.api.log_destination,
)
configure_db(CONFIG.api.database_url)
@app.middleware("http")
async def log_request(request: Request, call_next: Callable) -> Response:
"Log basic information about every request handled by the server."
start = datetime.now()
response = await call_next(request)
handler_time = datetime.now() - start
log.bind(
method=request.method,
status_code=response.status_code,
handler_time=f"{handler_time.microseconds * 0.001}ms",
path=request.url.path,
).info("Request received")
return response
@app.get("/health", tags=["Health"])
async def health() -> Dict:
"Confirm that the API is running and healthy."
return {"data": {"message": "Fidesctl API service is healthy!"}}
class DBActions(str, Enum):
"The available path parameters for the `/admin/db/{action}` endpoint."
init = "init"
reset = "reset"
@app.post("/admin/db/{action}", tags=["Admin"])
async def db_action(action: DBActions) -> Dict:
"""
Initiate one of the enumerated DBActions.
"""
action_text = "initialized"
if action == DBActions.reset:
database.reset_db(CONFIG.api.database_url)
action_text = DBActions.reset
configure_db(CONFIG.api.database_url)
return {"data": {"message": f"Fidesctl database {action_text}"}}
def start_webserver() -> None:
"Run the webserver."
server = Server(Config(app, host="0.0.0.0", port=8080, log_level=WARNING))
server.run()
| 29.367347
| 78
| 0.706393
|
398811e51d14355898a6134e31708bc98e1715ff
| 2,155
|
py
|
Python
|
src/classes/Property.py
|
PayParty/dbtools
|
086d4df9072e81befd4257540ab7d3034c44b721
|
[
"MIT"
] | null | null | null |
src/classes/Property.py
|
PayParty/dbtools
|
086d4df9072e81befd4257540ab7d3034c44b721
|
[
"MIT"
] | null | null | null |
src/classes/Property.py
|
PayParty/dbtools
|
086d4df9072e81befd4257540ab7d3034c44b721
|
[
"MIT"
] | null | null | null |
from bson import ObjectId
class Property:
# Property
#
# Represents a simple property contained in a model.
#
def __init__(self, from_plain=None, **kwargs):
# Constructor
#
# Properties
# _class: string with object class
# name: string with property name
# type: string with property type
# optional: whether or not the property is optional (bool)
#
if from_plain:
self.__class = 'Property'
self.name = from_plain.get('name', '')
self.type = from_plain.get('type', 'None')
self.optional = from_plain.get('optional', False)
else:
self.__class = 'Property'
self.name = kwargs.get('name', '')
self.type = kwargs.get('type', 'None')
self.optional = kwargs.get('optional', False)
def __repr__(self):
return (
'Property object \'{name}\' of type {type}.'.format(name=self.name, type=self.type)
)
def analyze(self, document_property):
types = {
'String': [str],
'Number': [int, float],
'Boolean': [bool],
'ObjectID': [ObjectId],
'Any': [str, int, float, bool, ObjectId, dict, list]
}
if document_property == None:
if self.optional:
return None
else:
return 'missing property'
else:
if type(document_property) in types[self.type]:
return None
else:
return 'incorrect property type'
def to_plain(self):
# to_plain
#
# Returns a plain python object representing the Property object
#
return {
'__class': self.__class,
'name': self.name,
'type': self.type,
'optional': self.optional
}
@property
def name(self):
return self.__name
@name.setter
def name(self, new_name):
if isinstance(new_name, str):
self.__name = new_name
@property
def type(self):
return self.__type
@type.setter
def type(self, new_type):
if isinstance(new_type, str):
self.__type = new_type
@property
def optional(self):
return self.__optional
@optional.setter
def optional(self, new_optional):
if isinstance(new_optional, bool):
self.__optional = new_optional
| 22.447917
| 89
| 0.620882
|
41092111f70e26e08ed312e033de3e34d7e1fcce
| 217
|
py
|
Python
|
tests/integration/testdata/logs/python-apigw-sfn/apigw-function/app.py
|
praneetap/aws-sam-cli
|
2a713566c8de72a68eb8954584674a61a2d807ac
|
[
"Apache-2.0"
] | 2,285
|
2017-08-11T16:57:31.000Z
|
2018-05-08T20:38:25.000Z
|
tests/integration/testdata/logs/python-apigw-sfn/apigw-function/app.py
|
praneetap/aws-sam-cli
|
2a713566c8de72a68eb8954584674a61a2d807ac
|
[
"Apache-2.0"
] | 314
|
2017-08-11T17:29:27.000Z
|
2018-05-08T20:51:47.000Z
|
tests/integration/testdata/logs/python-apigw-sfn/apigw-function/app.py
|
praneetap/aws-sam-cli
|
2a713566c8de72a68eb8954584674a61a2d807ac
|
[
"Apache-2.0"
] | 284
|
2017-08-11T17:35:48.000Z
|
2018-05-08T20:15:59.000Z
|
def handler(event, context):
print("Hello world from HelloWorldServerlessApi/hello function")
print("Hello world from ApiGwFunction function")
print("this should be filtered ApiGwFunction")
return {}
| 31
| 68
| 0.741935
|
416a13c10d0d11eb00aede9096e16fcdca6732bb
| 885
|
py
|
Python
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/tree_alignments/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/tree_alignments/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/tree_alignments/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import warnings
warnings.warn("please use the pm4py.algo.conformance.alignments.process_tree package.")
from pm4py.algo.conformance.alignments.process_tree import *
| 40.227273
| 87
| 0.748023
|
2701ad89bf85792091760d18d393b7124981569b
| 2,171
|
py
|
Python
|
packages/Python/lldbsuite/test/lang/swift/variables/cgtypes/TestCGTypes.py
|
xiaobai/swift-lldb
|
9238527ce430e6837108a16d2a91b147551fb83c
|
[
"Apache-2.0"
] | 765
|
2015-12-03T16:44:59.000Z
|
2022-03-07T12:41:10.000Z
|
packages/Python/lldbsuite/test/lang/swift/variables/cgtypes/TestCGTypes.py
|
xiaobai/swift-lldb
|
9238527ce430e6837108a16d2a91b147551fb83c
|
[
"Apache-2.0"
] | 1,815
|
2015-12-11T23:56:05.000Z
|
2020-01-10T19:28:43.000Z
|
packages/Python/lldbsuite/test/lang/swift/variables/cgtypes/TestCGTypes.py
|
xiaobai/swift-lldb
|
9238527ce430e6837108a16d2a91b147551fb83c
|
[
"Apache-2.0"
] | 284
|
2015-12-03T16:47:25.000Z
|
2022-03-12T05:39:48.000Z
|
# TestCGTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we are able to properly format basic CG types
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftCoreGraphicsTypes(TestBase):
mydir = TestBase.compute_mydir(__file__)
@swiftTest
@skipUnlessDarwin
def test_swift_coregraphics_types(self):
"""Test that we are able to properly format basic CG types"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
def do_test(self):
"""Test that we are able to properly format basic CG types"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
self.expect('frame variable f', substrs=[' = 1'])
self.expect('frame variable p', substrs=[' = (x = 1, y = 1)'])
self.expect('frame variable r', substrs=[
' = (origin = (x = 0, y = 0), size = (width = 0, height = 0))'])
self.expect('expr f', substrs=[' = 1'])
self.expect('expr p', substrs=[' = (x = 1, y = 1)'])
self.expect(
'expr r',
substrs=[' = (origin = (x = 0, y = 0), size = (width = 0, height = 0))'])
self.expect('po f', substrs=['1.0'])
self.expect('po p', substrs=['x : 1.0', 'y : 1.0'])
self.expect(
'po r',
substrs=[
'x : 0.0',
'y : 0.0',
'width : 0.0',
'height : 0.0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| 31.014286
| 85
| 0.573929
|
480d1df18c2e9a74bc42bf0c110e981cf89f2f2b
| 1,488
|
py
|
Python
|
test/cmd/at/test_cmd_at_get_imsi.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
test/cmd/at/test_cmd_at_get_imsi.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
test/cmd/at/test_cmd_at_get_imsi.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Testing AtGetIMSI commands.
"""
__author__ = 'Lukasz Blaszkiewicz, Kamil Kania, Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'kamil.kania@nokia.com, grzegorz.latuszek@nokia.com'
import pytest
# --------------------------- testing base class ---------------------------
def test_calling_at_cmd_get_imsi_returns_expected_result(buffer_connection):
from moler.cmd.at import get_imsi
at_cmd_get_imsi = get_imsi.AtCmdGetIMSI(connection=buffer_connection.moler_connection,
**get_imsi.COMMAND_KWARGS_ver_execute)
buffer_connection.remote_inject_response([get_imsi.COMMAND_OUTPUT_ver_execute])
result = at_cmd_get_imsi()
assert result == get_imsi.COMMAND_RESULT_ver_execute
def test_at_cmd_get_imsi_raises_AtCommandModeNotSupported_when_instantiated_in_read_mode():
from moler.cmd.at.get_imsi import AtCmdGetIMSI, AtCommandModeNotSupported
with pytest.raises(AtCommandModeNotSupported):
AtCmdGetIMSI(operation="read")
def test_calling_at_cmd_get_imsi_in_test_mode_returns_empty_result(buffer_connection):
from moler.cmd.at import get_imsi
buffer_connection.remote_inject_response([get_imsi.COMMAND_OUTPUT_ver_test])
at_cmd_get_imsi = get_imsi.AtCmdGetIMSI(connection=buffer_connection.moler_connection,
**get_imsi.COMMAND_KWARGS_ver_test)
result = at_cmd_get_imsi()
assert result == {}
| 41.333333
| 91
| 0.733199
|
237b6353d53104d1ccca9ecedea6efd6910a2128
| 23,439
|
py
|
Python
|
heisenbridge/private_room.py
|
sumnerevans/heisenbridge
|
caf122a0f064b8bc2cd9fbd0f9ae703d323b7d04
|
[
"MIT"
] | null | null | null |
heisenbridge/private_room.py
|
sumnerevans/heisenbridge
|
caf122a0f064b8bc2cd9fbd0f9ae703d323b7d04
|
[
"MIT"
] | null | null | null |
heisenbridge/private_room.py
|
sumnerevans/heisenbridge
|
caf122a0f064b8bc2cd9fbd0f9ae703d323b7d04
|
[
"MIT"
] | null | null | null |
import asyncio
import html
import logging
import re
import unicodedata
from datetime import datetime
from datetime import timezone
from html import escape
from typing import List
from typing import Optional
from typing import Tuple
from urllib.parse import urlparse
from mautrix.api import Method
from mautrix.api import SynapseAdminPath
from heisenbridge.command_parse import CommandManager
from heisenbridge.command_parse import CommandParser
from heisenbridge.command_parse import CommandParserError
from heisenbridge.parser import IRCMatrixParser
from heisenbridge.parser import IRCRecursionContext
from heisenbridge.room import Room
class NetworkRoom:
pass
def unix_to_local(timestamp: Optional[str]):
try:
dt = datetime.fromtimestamp(int(timestamp), timezone.utc)
return dt.strftime("%c %Z") # intentionally UTC for now
except ValueError:
logging.debug("Tried to convert '{timestamp}' to int")
return timestamp
def connected(f):
def wrapper(*args, **kwargs):
self = args[0]
if not self.network or not self.network.conn or not self.network.conn.connected:
self.send_notice("Need to be connected to use this command.")
return asyncio.sleep(0)
return f(*args, **kwargs)
return wrapper
# this is very naive and will break html tag close/open order right now
def parse_irc_formatting(input: str, pills=None) -> Tuple[str, Optional[str]]:
plain = []
formatted = []
have_formatting = False
bold = False
italic = False
underline = False
for m in re.finditer(
r"(\x02|\x03([0-9]{1,2})?(,([0-9]{1,2}))?|\x1D|\x1F|\x16|\x0F)?([^\x02\x03\x1D\x1F\x16\x0F]*)", input
):
# fg is group 2, bg is group 4 but we're ignoring them now
(ctrl, text) = (m.group(1), m.group(5))
if ctrl:
have_formatting = True
if ctrl[0] == "\x02":
if not bold:
formatted.append("<b>")
else:
formatted.append("</b>")
bold = not bold
if ctrl[0] == "\x03":
"""
ignoring color codes for now
"""
elif ctrl[0] == "\x1D":
if not italic:
formatted.append("<i>")
else:
formatted.append("</i>")
italic = not italic
elif ctrl[0] == "\x1F":
if not underline:
formatted.append("<u>")
else:
formatted.append("</u>")
underline = not underline
elif ctrl[0] == "\x16":
"""
ignore reverse
"""
elif ctrl[0] == "\x0F":
if bold:
formatted.append("</b>")
if italic:
formatted.append("</i>")
if underline:
formatted.append("</u>")
bold = italic = underline = False
if text:
plain.append(text)
# escape any existing html in the text
text = escape(text)
# create pills
if pills:
def replace_pill(m):
word = m.group(0).lower()
if word in pills:
mxid, displayname = pills[word]
return f'<a href="https://matrix.to/#/{escape(mxid)}">{escape(displayname)}</a>'
return m.group(0)
# this will also match some non-nick characters so pillify fails on purpose
text = re.sub(r"[^\s\?!:;,\.]+(\.[A-Za-z0-9])?", replace_pill, text)
# if the formatted version has a link, we took some pills
if "<a href" in text:
have_formatting = True
formatted.append(text)
if bold:
formatted.append("</b>")
if italic:
formatted.append("</i>")
if underline:
formatted.append("</u>")
return ("".join(plain), "".join(formatted) if have_formatting else None)
def split_long(nick, user, host, target, message):
out = []
# this is an easy template to calculate the overhead of the sender and target
template = f":{nick}!{user}@{host} PRIVMSG {target} :\r\n"
maxlen = 512 - len(template.encode())
dots = "..."
words = []
for word in message.split(" "):
words.append(word)
line = " ".join(words)
if len(line.encode()) + len(dots) > maxlen:
words.pop()
out.append(" ".join(words) + dots)
words = [dots, word]
out.append(" ".join(words))
return out
# generate an edit that follows usual IRC conventions
def line_diff(a, b):
a = a.split()
b = b.split()
pre = None
post = None
mlen = min(len(a), len(b))
for i in range(0, mlen):
if a[i] != b[i]:
break
pre = i + 1
for i in range(1, mlen + 1):
if a[-i] != b[-i]:
break
post = -i
rem = a[pre:post]
add = b[pre:post]
if len(add) == 0 and len(rem) > 0:
return "-" + (" ".join(rem))
if len(rem) == 0 and len(add) > 0:
return "+" + (" ".join(add))
if len(add) > 0:
return "* " + (" ".join(add))
return None
class PrivateRoom(Room):
# irc nick of the other party, name for consistency
name: str
network: Optional[NetworkRoom]
network_id: str
network_name: Optional[str]
media: List[List[str]]
# for compatibility with plumbed rooms
max_lines = 0
force_forward = False
commands: CommandManager
def init(self) -> None:
self.name = None
self.network = None
self.network_id = None
self.network_name = None # deprecated
self.media = []
self.commands = CommandManager()
if type(self) == PrivateRoom:
cmd = CommandParser(prog="WHOIS", description="WHOIS the other user")
self.commands.register(cmd, self.cmd_whois)
self.mx_register("m.room.message", self.on_mx_message)
self.mx_register("m.room.redaction", self.on_mx_redaction)
def from_config(self, config: dict) -> None:
if "name" not in config:
raise Exception("No name key in config for ChatRoom")
self.name = config["name"]
if "network_id" in config:
self.network_id = config["network_id"]
if "media" in config:
self.media = config["media"]
# only used for migration
if "network" in config:
self.network_name = config["network"]
if self.network_name is None and self.network_id is None:
raise Exception("No network or network_id key in config for PrivateRoom")
def to_config(self) -> dict:
return {"name": self.name, "network": self.network_name, "network_id": self.network_id, "media": self.media[:5]}
@staticmethod
def create(network: NetworkRoom, name: str) -> "PrivateRoom":
logging.debug(f"PrivateRoom.create(network='{network.name}', name='{name}')")
irc_user_id = network.serv.irc_user_id(network.name, name)
room = PrivateRoom(
None,
network.user_id,
network.serv,
[network.user_id, irc_user_id, network.serv.user_id],
[],
)
room.name = name.lower()
room.network = network
room.network_id = network.id
room.network_name = network.name
asyncio.ensure_future(room._create_mx(name))
return room
async def _create_mx(self, displayname) -> None:
if self.id is None:
irc_user_id = await self.network.serv.ensure_irc_user_id(self.network.name, displayname, update_cache=False)
self.id = await self.network.serv.create_room(
"{} ({})".format(displayname, self.network.name),
"Private chat with {} on {}".format(displayname, self.network.name),
[self.network.user_id, irc_user_id],
)
self.serv.register_room(self)
await self.az.intent.user(irc_user_id).ensure_joined(self.id)
await self.save()
# start event queue now that we have an id
self._queue.start()
# attach to network space
if self.network.space:
await self.network.space.attach(self.id)
def is_valid(self) -> bool:
if self.network_id is None and self.network_name is None:
return False
if self.name is None:
return False
if self.user_id is None:
return False
if not self.in_room(self.user_id):
return False
return True
def cleanup(self) -> None:
logging.debug(f"Cleaning up network connected room {self.id}.")
# cleanup us from network space if we have it
if self.network and self.network.space:
asyncio.ensure_future(self.network.space.detach(self.id))
# cleanup us from network rooms
if self.network and self.name in self.network.rooms:
logging.debug(f"... and we are attached to network {self.network.id}, detaching.")
del self.network.rooms[self.name]
# if leaving this room invalidated the network, clean it up
if not self.network.is_valid():
logging.debug(f"... and we invalidated network {self.network.id} while cleaning up.")
self.network.serv.unregister_room(self.network.id)
self.network.cleanup()
asyncio.ensure_future(self.network.serv.leave_room(self.network.id, self.network.members))
super().cleanup()
def send_notice(
self,
text: str,
user_id: Optional[str] = None,
formatted=None,
fallback_html: Optional[str] = None,
forward=False,
):
if (self.force_forward or forward) and user_id is None:
self.network.send_notice(text=f"{self.name}: {text}", formatted=formatted, fallback_html=fallback_html)
else:
super().send_notice(text=text, user_id=user_id, formatted=formatted, fallback_html=fallback_html)
def send_notice_html(self, text: str, user_id: Optional[str] = None, forward=False) -> None:
if (self.force_forward or forward) and user_id is None:
self.network.send_notice_html(text=f"{self.name}: {text}")
else:
super().send_notice_html(text=text, user_id=user_id)
def pills(self):
# if pills are disabled, don't generate any
if self.network.pills_length < 1:
return None
ret = {}
ignore = list(map(lambda x: x.lower(), self.network.pills_ignore))
# push our own name first
lnick = self.network.conn.real_nickname.lower()
if self.user_id in self.displaynames and len(lnick) >= self.network.pills_length and lnick not in ignore:
ret[lnick] = (self.user_id, self.displaynames[self.user_id])
# assuming displayname of a puppet matches nick
for member in self.members:
if not member.startswith("@" + self.serv.puppet_prefix) or not member.endswith(":" + self.serv.server_name):
continue
if member in self.displaynames:
nick = self.displaynames[member]
lnick = nick.lower()
if len(nick) >= self.network.pills_length and lnick not in ignore:
ret[lnick] = (member, nick)
return ret
def on_privmsg(self, conn, event) -> None:
if self.network is None:
return
irc_user_id = self.serv.irc_user_id(self.network.name, event.source.nick)
(plain, formatted) = parse_irc_formatting(event.arguments[0], self.pills())
if event.source.nick == self.network.conn.real_nickname:
self.send_message(f"You said: {plain}", formatted=(f"You said: {formatted}" if formatted else None))
return
self.send_message(
plain,
irc_user_id,
formatted=formatted,
fallback_html=f"<b>Message from {str(event.source)}</b>: {html.escape(plain)}",
)
# lazy update displayname if we detect a change
if (
not self.serv.is_user_cached(irc_user_id, event.source.nick)
and irc_user_id not in self.lazy_members
and irc_user_id in self.members
):
asyncio.ensure_future(self.serv.ensure_irc_user_id(self.network.name, event.source.nick))
def on_privnotice(self, conn, event) -> None:
if self.network is None:
return
(plain, formatted) = parse_irc_formatting(event.arguments[0])
if event.source.nick == self.network.conn.real_nickname:
self.send_notice(f"You noticed: {plain}", formatted=(f"You noticed: {formatted}" if formatted else None))
return
# if the local user has left this room notify in network
if self.user_id not in self.members:
source = self.network.source_text(conn, event)
self.network.send_notice_html(
f"Notice from <b>{source}:</b> {formatted if formatted else html.escape(plain)}"
)
return
irc_user_id = self.serv.irc_user_id(self.network.name, event.source.nick)
self.send_notice(
plain,
irc_user_id,
formatted=formatted,
fallback_html=f"<b>Notice from {str(event.source)}</b>: {formatted if formatted else html.escape(plain)}",
)
def on_ctcp(self, conn, event) -> None:
if self.network is None:
return
irc_user_id = self.serv.irc_user_id(self.network.name, event.source.nick)
command = event.arguments[0].upper()
if command == "ACTION" and len(event.arguments) > 1:
(plain, formatted) = parse_irc_formatting(event.arguments[1])
if event.source.nick == self.network.conn.real_nickname:
self.send_emote(f"(you) {plain}")
return
self.send_emote(
plain, irc_user_id, fallback_html=f"<b>Emote from {str(event.source)}</b>: {html.escape(plain)}"
)
else:
(plain, formatted) = parse_irc_formatting(" ".join(event.arguments))
self.send_notice_html(f"<b>{str(event.source)}</b> requested <b>CTCP {html.escape(plain)}</b> (ignored)")
def on_ctcpreply(self, conn, event) -> None:
if self.network is None:
return
(plain, formatted) = parse_irc_formatting(" ".join(event.arguments))
self.send_notice_html(f"<b>{str(event.source)}</b> sent <b>CTCP REPLY {html.escape(plain)}</b> (ignored)")
def _process_event_content(self, event, prefix, reply_to=None):
content = event.content
if content.formatted_body:
lines = str(
IRCMatrixParser.parse(content.formatted_body, IRCRecursionContext(displaynames=self.displaynames))
).split("\n")
elif content.body:
body = content.body
for user_id, displayname in self.displaynames.items():
body = body.replace(user_id, displayname)
# FluffyChat prefixes mentions in fallback with @
body = body.replace("@" + displayname, displayname)
lines = body.split("\n")
else:
logging.warning("_process_event_content called with no usable body")
return
# drop all whitespace-only lines
lines = [x for x in lines if not re.match(r"^\s*$", x)]
# handle replies
if reply_to and reply_to.sender != event.sender:
# resolve displayname
sender = reply_to.sender
if sender in self.displaynames:
sender = self.displaynames[sender]
# prefix first line with nickname of the reply_to source
first_line = sender + ": " + lines.pop(0)
lines.insert(0, first_line)
messages = []
for i, line in enumerate(lines):
# prefix first line if needed
if i == 0 and prefix and len(prefix) > 0:
line = prefix + line
# filter control characters except ZWSP
line = "".join(c for c in line if unicodedata.category(c)[0] != "C" or c == "\u200B")
messages += split_long(
self.network.conn.real_nickname,
self.network.real_user,
self.network.real_host,
self.name,
line,
)
return messages
async def _send_message(self, event, func, prefix=""):
# try to find out if this was a reply
reply_to = None
if event.content.get_reply_to():
rel_event = event
# traverse back all edits
while rel_event.content.get_edit():
rel_event = await self.az.intent.get_event(self.id, rel_event.content.get_edit())
# see if the original is a reply
if rel_event.content.get_reply_to():
reply_to = await self.az.intent.get_event(self.id, rel_event.content.get_reply_to())
if event.content.get_edit():
messages = self._process_event_content(event, prefix, reply_to)
event_id = event.content.relates_to.event_id
prev_event = self.last_messages[event.sender]
if prev_event and prev_event.event_id == event_id:
old_messages = self._process_event_content(prev_event, prefix, reply_to)
mlen = max(len(messages), len(old_messages))
edits = []
for i in range(0, mlen):
try:
old_msg = old_messages[i]
except IndexError:
old_msg = ""
try:
new_msg = messages[i]
except IndexError:
new_msg = ""
edit = line_diff(old_msg, new_msg)
if edit:
edits.append(prefix + edit)
# use edits only if one line was edited
if len(edits) == 1:
messages = edits
# update last message _content_ to current so re-edits work
self.last_messages[event.sender].content = event.content
else:
# last event was not found so we fall back to full message BUT we can reconstrut enough of it
self.last_messages[event.sender] = event
else:
# keep track of the last message
self.last_messages[event.sender] = event
messages = self._process_event_content(event, prefix, reply_to)
for i, message in enumerate(messages):
if self.max_lines > 0 and i == self.max_lines - 1 and len(messages) > self.max_lines:
self.react(event.event_id, "\u2702") # scissors
if self.use_pastebin:
content_uri = await self.az.intent.upload_media(
"\n".join(messages).encode("utf-8"), mime_type="text/plain; charset=UTF-8"
)
if self.max_lines == 1:
func(
self.name,
f"{prefix}{self.serv.mxc_to_url(str(content_uri))} (long message, {len(messages)} lines)",
)
else:
func(
self.name,
f"... long message truncated: {self.serv.mxc_to_url(str(content_uri))} ({len(messages)} lines)",
)
self.react(event.event_id, "\U0001f4dd") # memo
self.media.append([event.event_id, str(content_uri)])
await self.save()
else:
if self.max_lines == 1:
# best effort is to send the first line and give up
func(self.name, message)
else:
func(self.name, "... long message truncated")
return
func(self.name, message)
# show number of lines sent to IRC
if self.max_lines == 0 and len(messages) > 1:
self.react(event.event_id, f"\u2702 {len(messages)} lines")
async def on_mx_message(self, event) -> None:
if event.sender != self.user_id:
return
if self.network is None or self.network.conn is None or not self.network.conn.connected:
self.send_notice("Not connected to network.")
return
if str(event.content.msgtype) == "m.emote":
await self._send_message(event, self.network.conn.action)
elif str(event.content.msgtype) in ["m.image", "m.file", "m.audio", "m.video"]:
self.network.conn.privmsg(self.name, self.serv.mxc_to_url(event.content.url, event.content.body))
self.react(event.event_id, "\U0001F517") # link
self.media.append([event.event_id, event.content.url])
await self.save()
elif str(event.content.msgtype) == "m.text":
# allow commanding the appservice in rooms
match = re.match(r"^\s*@?([^:,\s]+)[\s:,]*(.+)$", event.content.body)
if match and match.group(1).lower() == self.serv.registration["sender_localpart"]:
try:
await self.commands.trigger(match.group(2))
except CommandParserError as e:
self.send_notice(str(e))
finally:
return
await self._send_message(event, self.network.conn.privmsg)
await self.az.intent.send_receipt(event.room_id, event.event_id)
async def on_mx_redaction(self, event) -> None:
for media in self.media:
if media[0] == event.redacts:
url = urlparse(media[1])
if self.serv.synapse_admin:
try:
await self.az.intent.api.request(
Method.POST, SynapseAdminPath.v1.media.quarantine[url.netloc][url.path[1:]]
)
self.network.send_notice(
f"Associated media {media[1]} for redacted event {event.redacts} "
+ f"in room {self.name} was quarantined."
)
except Exception:
self.network.send_notice(
f"Failed to quarantine media! Associated media {media[1]} "
+ f"for redacted event {event.redacts} in room {self.name} is left available."
)
else:
self.network.send_notice(
f"No permission to quarantine media! Associated media {media[1]} "
+ f"for redacted event {event.redacts} in room {self.name} is left available."
)
return
@connected
async def cmd_whois(self, args) -> None:
self.network.conn.whois(f"{self.name} {self.name}")
| 35.567527
| 124
| 0.55894
|
64c09a058b4774ff4bf88d9b1dba9a494a4179e1
| 322
|
py
|
Python
|
setup.py
|
shinoaki-tatsuya/mp4tojson
|
3b21e8966b56774105481ba88e76fe35e0e4c123
|
[
"MIT"
] | null | null | null |
setup.py
|
shinoaki-tatsuya/mp4tojson
|
3b21e8966b56774105481ba88e76fe35e0e4c123
|
[
"MIT"
] | null | null | null |
setup.py
|
shinoaki-tatsuya/mp4tojson
|
3b21e8966b56774105481ba88e76fe35e0e4c123
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import mp4tojson
setup(
name='mp4tojson',
version=mp4tojson.__version__,
packages=find_packages(exclude=['tests']),
entry_points='''
[console_scripts]
mp4tojson = mp4tojson.main:main
''',
install_requires=[
'click',
])
| 21.466667
| 46
| 0.636646
|
9364babed1813daf4504ebae83fe160b7ee54f6b
| 8,040
|
py
|
Python
|
examples/pybullet/gym/pybullet_envs/bullet/kuka.py
|
msieb1/bullet3
|
0803a1f53e22ddd212eafe1194b1dd9e891a7eaf
|
[
"Zlib"
] | null | null | null |
examples/pybullet/gym/pybullet_envs/bullet/kuka.py
|
msieb1/bullet3
|
0803a1f53e22ddd212eafe1194b1dd9e891a7eaf
|
[
"Zlib"
] | null | null | null |
examples/pybullet/gym/pybullet_envs/bullet/kuka.py
|
msieb1/bullet3
|
0803a1f53e22ddd212eafe1194b1dd9e891a7eaf
|
[
"Zlib"
] | null | null | null |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import pybullet as p
import numpy as np
import copy
import math
import pybullet_data
import time
from pdb import set_trace
class Kuka:
def __init__(self, urdfRootPath=pybullet_data.getDataPath(), timeStep=0.01):
self.urdfRootPath = urdfRootPath
self.timeStep = timeStep
self.maxVelocity = .35
self.maxForce = 200.
self.fingerAForce = 2
self.fingerBForce = 2.5
self.fingerTipForce = 2
self.useInverseKinematics = 1
self.useSimulation = 1
self.useNullSpace = 0
self.useOrientation = 1
self.kukaEndEffectorIndex = 6
self.kukaGripperIndex = 7
#lower limits for null space
self.ll=[-.967,-2 ,-2.96,0.19,-2.96,-2.09,-3.05]
#upper limits for null space
self.ul=[.967,2 ,2.96,2.29,2.96,2.09,3.05]
#joint ranges for null space
self.jr=[5.8,4,5.8,4,5.8,4,6]
#restposes for null space
self.rp=[0,0,0,0.5*math.pi,0,-math.pi*0.5*0.66,0]
#joint damping coefficents
self.jd=[0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001,0.00001]
self.reset()
def reset(self):
objects = p.loadSDF(os.path.join(self.urdfRootPath,"kuka_iiwa/kuka_with_gripper2.sdf"))
self.kukaUid = objects[0]
#for i in range (p.getNumJoints(self.kukaUid)):
# print(p.getJointInfo(self.kukaUid,i))
p.resetBasePositionAndOrientation(self.kukaUid,[-0.100000,0.000000,0.070000],[0.000000,0.000000,0.000000,1.000000])
self.jointPositions=[ 0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684, -0.006539, 0.000048, -0.299912, 0.000000, -0.000043, 0.299960, 0.000000, -0.000200 ]
self.numJoints = p.getNumJoints(self.kukaUid)
for jointIndex in range (self.numJoints):
p.resetJointState(self.kukaUid,jointIndex,self.jointPositions[jointIndex])
p.setJointMotorControl2(self.kukaUid,jointIndex,p.POSITION_CONTROL,targetPosition=self.jointPositions[jointIndex],force=self.maxForce)
#self.trayUid = p.loadURDF(os.path.join(self.urdfRootPath,"tray/tray.urdf"), 0.640000,0.075000,-0.190000,0.000000,0.000000,1.000000,0.000000)
self.endEffectorPos = [0.537,0.0,0.5]
self.endEffectorAngle = 0
self.motorNames = []
self.motorIndices = []
for i in range (self.numJoints):
jointInfo = p.getJointInfo(self.kukaUid,i)
qIndex = jointInfo[3]
if qIndex > -1:
#print("motorname")
#print(jointInfo[1])
self.motorNames.append(str(jointInfo[1]))
self.motorIndices.append(i)
def getActionDimension(self):
if (self.useInverseKinematics):
return len(self.motorIndices)
return 6 #position x,y,z and roll/pitch/yaw euler angles of end effector
def getObservationDimension(self):
return len(self.getObservation())
def getObservation(self):
observation = []
state = p.getLinkState(self.kukaUid,self.kukaGripperIndex)
pos = state[0]
orn = state[1]
euler = p.getEulerFromQuaternion(orn)
observation.extend(list(pos))
observation.extend(list(euler))
return observation
def applyAction(self, motorCommands):
#print ("self.numJoints")
#print (self.numJoints)
if (self.useInverseKinematics):
xlow = 0.4
xhigh = 0.8
ylow = -0.2
yhigh = 0.2
zlow = 0.6
zhigh = 0.2
dx = motorCommands[0]
dy = motorCommands[1]
dz = motorCommands[2]
da = motorCommands[3]
fingerAngle = motorCommands[4]
state = p.getLinkState(self.kukaUid,self.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
#print("pos[2] (getLinkState(kukaEndEffectorIndex)")
#print(actualEndEffectorPos[2])
self.endEffectorPos[0] = self.endEffectorPos[0]+dx
if (self.endEffectorPos[0]>xhigh):
self.endEffectorPos[0]=xhigh
if (self.endEffectorPos[0]<xlow):
self.endEffectorPos[0]=xlow
self.endEffectorPos[1] = self.endEffectorPos[1]+dy
if (self.endEffectorPos[1]<ylow):
self.endEffectorPos[1]=ylow
if (self.endEffectorPos[1]>yhigh):
self.endEffectorPos[1]=yhigh
self.endEffectorPos[2] = self.endEffectorPos[2]+dz
if (self.endEffectorPos[2]<zlow):
self.endEffectorPos[2]=zlow
if (self.endEffectorPos[2]>zhigh):
self.endEffectorPos[2]=zhigh
#print ("self.endEffectorPos[2]")
#print (self.endEffectorPos[2])
#print("actualEndEffectorPos[2]")
#print(actualEndEffectorPos[2])
#if (dz<0 or actualEndEffectorPos[2]<0.5):
self.endEffectorAngle = self.endEffectorAngle + da
pos = self.endEffectorPos
orn = p.getQuaternionFromEuler([0,-math.pi,0]) # -math.pi,yaw])
if (self.useNullSpace==1):
if (self.useOrientation==1):
jointPoses = p.calculateInverseKinematics(self.kukaUid,self.kukaEndEffectorIndex,pos,orn,self.ll,self.ul,self.jr,self.rp)
else:
jointPoses = p.calculateInverseKinematics(self.kukaUid,self.kukaEndEffectorIndex,pos,lowerLimits=self.ll, upperLimits=self.ul, jointRanges=self.jr, restPoses=self.rp)
else:
if (self.useOrientation==1):
jointPoses = p.calculateInverseKinematics(self.kukaUid,self.kukaEndEffectorIndex,pos,orn,jointDamping=self.jd)
else:
jointPoses = p.calculateInverseKinematics(self.kukaUid,self.kukaEndEffectorIndex,pos)
#print("jointPoses")
#print(jointPoses)
#print("self.kukaEndEffectorIndex")
#print(self.kukaEndEffectorIndex)
if (self.useSimulation):
for i in range (self.kukaEndEffectorIndex+1):
#print(i)
p.setJointMotorControl2(bodyUniqueId=self.kukaUid,jointIndex=i,controlMode=p.POSITION_CONTROL,targetPosition=jointPoses[i],targetVelocity=0,force=self.maxForce,maxVelocity=self.maxVelocity, positionGain=0.3,velocityGain=1)
else:
#reset the joint state (ignoring all dynamics, not recommended to use during simulation)
for i in range (self.numJoints):
p.resetJointState(self.kukaUid,i,jointPoses[i])
#fingers
p.setJointMotorControl2(self.kukaUid,7,p.POSITION_CONTROL,targetPosition=self.endEffectorAngle,force=self.maxForce)
p.setJointMotorControl2(self.kukaUid,8,p.POSITION_CONTROL,targetPosition=-fingerAngle,force=self.fingerAForce)
p.setJointMotorControl2(self.kukaUid,11,p.POSITION_CONTROL,targetPosition=fingerAngle,force=self.fingerBForce)
p.setJointMotorControl2(self.kukaUid,10,p.POSITION_CONTROL,targetPosition=0,force=self.fingerTipForce)
p.setJointMotorControl2(self.kukaUid,13,p.POSITION_CONTROL,targetPosition=0,force=self.fingerTipForce)
else:
for action in range (len(motorCommands)):
motor = self.motorIndices[action]
p.setJointMotorControl2(self.kukaUid,motor,p.POSITION_CONTROL,targetPosition=motorCommands[action],force=self.maxForce)
def moveKukaEndtoPos(self, newxy, orn=None):
if orn is None:
orn = p.getQuaternionFromEuler([0, -math.pi,0]) # so gripper is always pointing down
kuka_min_height = 0.0 #limit min height
newxy[2] = max(kuka_min_height, newxy[2])
for kuka_sec in range(500):
jointPoses = p.calculateInverseKinematics(
self.kukaUid,
self.kukaEndEffectorIndex,
newxy,
orn,
lowerLimits=self.ll,
upperLimits=self.ul,
jointRanges=self.jr,
restPoses=self.rp)
for i in range(self.kukaEndEffectorIndex+1):
p.setJointMotorControl2(bodyIndex=self.kukaUid,jointIndex=i,controlMode=p.POSITION_CONTROL,\
targetPosition=jointPoses[i],targetVelocity=0,force=self.maxForce,positionGain=0.03,velocityGain=1)
p.stepSimulation()
time.sleep(0.001)
| 40.2
| 232
| 0.6801
|
9a213abc44ec3762e4b3676f12a5c930d48e419c
| 633
|
py
|
Python
|
rapidfire/__init__.py
|
wanshot/rapid_fire
|
edee22a86b609805c602bb701bd6812c214f5c7b
|
[
"MIT"
] | 2
|
2017-02-16T00:05:20.000Z
|
2017-03-06T09:00:49.000Z
|
rapidfire/__init__.py
|
wanshot/rapid_fire
|
edee22a86b609805c602bb701bd6812c214f5c7b
|
[
"MIT"
] | 14
|
2017-02-19T14:52:18.000Z
|
2017-05-28T07:09:28.000Z
|
rapidfire/__init__.py
|
wanshot/RapidFire
|
edee22a86b609805c602bb701bd6812c214f5c7b
|
[
"MIT"
] | null | null | null |
__version__ = '1.0.0'
__license__ = 'MIT'
__author__ = 'wanshot'
__author_email__ = 'nishikawa0228@sj9.so-net.ne.jp'
__logo__ = """
____ _ _________
/ __ \____ _____ (_)___/ / ____(_)_______
/ /_/ / __ `/ __ \/ / __ / /_ / / ___/ _ \\
/ _, _/ /_/ / /_/ / / /_/ / __/ / / / / __/
/_/ |_|\__,_/ .___/_/\__,_/_/ /_/_/ \___/
/_/
"""
from .core import Core
def task(*args, **kwargs):
# call @task
if len(args) == 1 and callable(args[0]):
return Core(args[0], **kwargs)
def inner(obj):
# call @task()
return Core(obj, **kwargs)
return inner
| 23.444444
| 51
| 0.496051
|
2646eb026ed9e4bf15b8e8bfb6d0edab5104a3bc
| 6,936
|
py
|
Python
|
utils.py
|
meta-inf/qbdiv
|
27ea43d5a453b9367aabadf08349a4cbc287e9b2
|
[
"MIT"
] | 2
|
2022-03-04T20:48:44.000Z
|
2022-03-14T04:19:28.000Z
|
utils.py
|
meta-inf/qbdiv
|
27ea43d5a453b9367aabadf08349a4cbc287e9b2
|
[
"MIT"
] | null | null | null |
utils.py
|
meta-inf/qbdiv
|
27ea43d5a453b9367aabadf08349a4cbc287e9b2
|
[
"MIT"
] | null | null | null |
# import torch as th
from __future__ import annotations
from typing import Iterable, Callable, Any, Union, Tuple
import numpy as onp
import scipy
import jax
from jax import numpy as np
class UniformNystromSampler(object):
"""
the sampler will not modify its state
"""
def __init__(self, pkey: jax.random.PRNGKey, rho=0.5, a=1, mn=60):
self.pkey, self.rho, self.a, self.mn = pkey, rho, a, mn
def __call__(self, z_all: np.ndarray) -> np.ndarray:
n = z_all.shape[0]
m = min(max(self.mn, int(self.a * n**self.rho)), n)
idcs = jax.random.permutation(self.pkey, n)[:m]
return z_all[idcs]
def split_pkey(k: Union[jax.random.PRNGKey, None], num: int = 2):
if k is not None:
return jax.random.split(k, num)
return tuple([None] * num)
class PRNGKeyHolder(object):
""" For use inside a function. """
def __init__(self, pkey):
self.pkey = pkey
def gen_key(self):
self.pkey, ckey = jax.random.split(self.pkey)
return ckey
def ceil_div(a, b): return (a + b - 1) // b
def gen_bs_mask(pkey, N, ratio, n_particles):
if ratio + 1e-4 < 1:
Mk = np.ones((N, n_particles), dtype=np.bool_)
idcs = np.arange(N)
bs_n_removed = max(int(N * (1 - ratio)), 1)
for i in range(n_particles):
pkey, ckey = jax.random.split(pkey)
excluded = jax.random.choice(ckey, idcs, (bs_n_removed, 1), replace=False)
Mk = jax.ops.index_update(Mk, jax.ops.index[excluded, i], False)
else:
Mk = np.zeros((N, n_particles), dtype=np.float32)
for i in range(n_particles):
pkey, ckey = jax.random.split(pkey)
idcs = jax.random.randint(ckey, (N, 1), 0, N)
Mk = jax.ops.index_add(Mk, jax.ops.index[idcs, i], 1)
return Mk
def l2_regularizer(params: Any) -> np.ndarray:
return jax.tree_util.tree_reduce(
lambda x, y: x+y,
jax.tree_map(lambda p: (p**2).sum(), params))
def ci_coverage(
actual: np.ndarray, pmean: np.ndarray, psd: np.ndarray, r: float = 0.95) -> np.ndarray:
assert actual.shape == pmean.shape == psd.shape
k = scipy.stats.norm.ppf((1+r) / 2)
return np.logical_and(pmean - k*psd <= actual, actual <= pmean + k*psd).mean()
def mse(a: np.ndarray, b: np.ndarray) -> np.ndarray:
assert a.shape == b.shape
return ((a-b) ** 2).mean()
def normal_loglh(mean, sd, val):
return -0.5 * (np.log(np.pi*2) + 2*np.log(sd) + ((mean-val)/sd)**2)
class TensorDataLoader(object):
""" Lightweight DataLoader . TensorDataset for jax """
def __init__(self, *arrs, batch_size=None, shuffle=False, rng=None, dtype=np.float32):
assert batch_size is not None
self.arrs = [a.astype(dtype) for a in arrs]
self.N, self.B = arrs[0].shape[0], batch_size
self.shuffle = shuffle
assert all(a.shape[0] == self.N for a in arrs[1:])
self.rng = rng if rng is not None else onp.random.RandomState(23)
def __iter__(self):
idcs = onp.arange(self.N)
if self.shuffle:
self.rng.shuffle(idcs)
self.arrs_cur = [a[idcs] for a in self.arrs]
self.i = 0
return self
def __next__(self):
if self.i < self.N:
old_i = self.i
self.i = min(self.i + self.B, self.N)
return tuple(a[old_i:self.i] for a in self.arrs)
else:
raise StopIteration
def __len__(self):
return (self.N+self.B-1) // self.B
def split_list_args(s, typ=float):
if len(s.strip()) == 0:
return []
return list(map(typ, s.split(',')))
def data_split(*arrs, split_ratio=0.8, rng=None):
if rng == None:
rng = onp.random
N = arrs[0].shape[0]
assert all(a.shape[0] == N for a in arrs)
idcs = onp.arange(N)
rng.shuffle(idcs)
split = int(N * split_ratio)
train_tuple = tuple(a[idcs[:split]] for a in arrs)
test_tuple = tuple(a[idcs[split:]] for a in arrs)
return train_tuple, test_tuple
def log_linspace(s, e, n):
return onp.exp(onp.linspace(onp.log(s), onp.log(e), n))
class Accumulator(object):
def __init__(self):
self.a = []
def append(self, d):
# if isinstance(d, th.Tensor):
# d = d.item()
if isinstance(d, jax.numpy.ndarray) and hasattr(d, '_device'):
d = float(d)
self.a.append(d)
def average(self):
return onp.mean(self.a)
def minimum(self, s=0):
return onp.min(self.a[s:])
def maximum(self, s=0):
return onp.max(self.a[s:])
def argmin(self):
return onp.argmin(self.a)
def __getitem__(self, i):
return self.a[i]
class StatsDict(dict):
def __init__(self, *args):
if len(args) == 0:
super().__init__()
else:
assert len(args) == 1
a = args[0]
if isinstance(a, dict):
super().__init__(a.items())
else:
super().__init__(a)
def add_prefix(self, pref: str, sep: str = '/') -> StatsDict:
return StatsDict((pref + sep + k, v) for k, v in self.items())
def filter(self, pred_or_key: Union[Callable[[str], True], str]):
pred = pred_or_key if callable(pred_or_key) else lambda k: k == pred_or_key
return StatsDict((k, v) for k, v in self.items() if not pred(k))
class StatsAccumulator(object):
def __init__(self):
pass
def append(self, d: dict):
if not hasattr(self, 'stats'):
self.stats = dict((k, Accumulator()) for k in d)
for k in d:
v = float(d[k])
self.stats[k].append(v)
def dump(self) -> StatsDict:
return StatsDict((k, self.stats[k].average()) for k in self.stats)
def __getitem__(self, k: str) -> Accumulator:
return self.stats[k]
def traverse_ds(
step_fn: Callable[[Any], Any], dset: Iterable[Any], has_stats: bool,
rng: Union[np.ndarray, None] = None
) -> Tuple[float, StatsDict]:
stats = StatsAccumulator()
for data in dset:
if rng is not None:
rng, c_rng = jax.random.split(rng)
ret = step_fn(data, rng=c_rng)
else:
ret = step_fn(data)
if has_stats:
loss, rdct = ret
rdct['_loss'] = loss
else:
rdct = StatsDict({'_loss': ret})
stats.append(rdct)
ret = stats.dump()
return ret['_loss'], ret
class DummyContext(object):
def __init__(self, v):
self.v = v
def __enter__(self):
return self.v
def __exit__(self, *args, **kw):
pass
def set_postfix(self):
pass
def add_bool_flag(parser, name, default=None):
parser.add_argument('-'+name, action='store_true', default=default)
parser.add_argument('-no_'+name, action='store_false', dest=name)
| 28.195122
| 95
| 0.575836
|
864da24b695fd8123732b16c273cfa57f14655df
| 1,027
|
py
|
Python
|
ingaia-helpers/config.py
|
emersonfdias/ingaia-helpers
|
f9670072a3339c7cbd619f27e80278b109f67819
|
[
"MIT"
] | null | null | null |
ingaia-helpers/config.py
|
emersonfdias/ingaia-helpers
|
f9670072a3339c7cbd619f27e80278b109f67819
|
[
"MIT"
] | null | null | null |
ingaia-helpers/config.py
|
emersonfdias/ingaia-helpers
|
f9670072a3339c7cbd619f27e80278b109f67819
|
[
"MIT"
] | null | null | null |
"""
api.helpers.config
~~~~~~~~~~~~~~~~~~~~~~~~~
Implements project configuration helpers.
"""
import yaml
class Dotable(dict):
__getattr__= dict.__getitem__
def __init__(self, d):
self.update(**dict((k, self.parse(v))
for k, v in d.iteritems()))
@classmethod
def parse(cls, v):
if isinstance(v, dict):
return cls(v)
elif isinstance(v, list):
return [cls.parse(i) for i in v]
else:
return v
def env():
from google.appengine.api.app_identity import get_application_id
appname = get_application_id()
if appname.find('-development') > 0 or appname.find('-dev') > 0:
return 'dev'
elif appname.find('-staging') > 0 or appname.find('-st') > 0:
return 'staging'
else:
return 'prod'
def version():
import os; return os.environ['CURRENT_VERSION_ID']
with open('config/%s.yaml' % env(), 'r') as yamlfile:
config = Dotable.parse(yaml.load(yamlfile))
| 23.883721
| 68
| 0.573515
|
1de1909c4179fc455525beb87a0303ecd36b9979
| 1,148
|
py
|
Python
|
metricbeat/scripts/generate_imports_helper.py
|
swift1911/beats
|
f9e06417b654c6e32bb7d05d34fb63b7a759f77a
|
[
"Apache-2.0"
] | 11
|
2018-04-01T06:15:48.000Z
|
2021-04-29T06:02:58.000Z
|
metricbeat/scripts/generate_imports_helper.py
|
swift1911/beats
|
f9e06417b654c6e32bb7d05d34fb63b7a759f77a
|
[
"Apache-2.0"
] | 13
|
2018-03-23T21:04:55.000Z
|
2019-02-06T21:27:01.000Z
|
metricbeat/scripts/generate_imports_helper.py
|
swift1911/beats
|
f9e06417b654c6e32bb7d05d34fb63b7a759f77a
|
[
"Apache-2.0"
] | 4
|
2019-02-08T16:32:49.000Z
|
2021-06-23T14:55:56.000Z
|
from os.path import abspath, isdir, join
from os import listdir
comment = """Package include imports all Module and MetricSet packages so that they register
their factories with the global registry. This package can be imported in the
main package to automatically register all of the standard supported Metricbeat
modules."""
def get_importable_lines(go_beat_path, import_line):
path = abspath("module")
imported_lines = []
modules = [m for m in listdir(path) if isdir(join(path, m)) and m != "_meta"]
for module in modules:
module_import = import_line.format(beat_path=go_beat_path, module="module", name=module)
imported_lines.append(module_import)
module_path = join(path, module)
metricsets = [m for m in listdir(module_path) if isdir(join(module_path, m)) and m not in ["_meta", "vendor"]]
for metricset in metricsets:
metricset_name = "{}/{}".format(module, metricset)
metricset_import = import_line.format(beat_path=go_beat_path, module="module", name=metricset_name)
imported_lines.append(metricset_import)
return sorted(imported_lines)
| 42.518519
| 118
| 0.718641
|
834f97fb57966ed7136d2ddaf2ab7a0ec4e86e09
| 1,184
|
py
|
Python
|
postgres/migrations/migrations/versions/3dcd7657d563_.py
|
rbroc/neuroscout
|
da6b0097050c1a468baf03d3c71161adbf688d5b
|
[
"BSD-3-Clause"
] | null | null | null |
postgres/migrations/migrations/versions/3dcd7657d563_.py
|
rbroc/neuroscout
|
da6b0097050c1a468baf03d3c71161adbf688d5b
|
[
"BSD-3-Clause"
] | 9
|
2019-11-05T16:02:19.000Z
|
2022-03-11T23:52:52.000Z
|
postgres/migrations/migrations/versions/3dcd7657d563_.py
|
rbroc/neuroscout
|
da6b0097050c1a468baf03d3c71161adbf688d5b
|
[
"BSD-3-Clause"
] | 1
|
2019-07-22T18:08:55.000Z
|
2019-07-22T18:08:55.000Z
|
"""empty message
Revision ID: 3dcd7657d563
Revises: 9efb0d7634e3
Create Date: 2019-05-22 19:23:56.537827
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3dcd7657d563'
down_revision = '9efb0d7634e3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('result')
op.add_column('analysis', sa.Column('traceback', sa.Text(), nullable=True))
op.drop_column('analysis', 'compile_traceback')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('analysis', sa.Column('compile_traceback', sa.TEXT(), autoincrement=False, nullable=True))
op.drop_column('analysis', 'traceback')
op.create_table('result',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('analysis_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='result_analysis_id_fkey'),
sa.PrimaryKeyConstraint('id', name='result_pkey')
)
# ### end Alembic commands ###
| 31.157895
| 108
| 0.701014
|
d5d40f573c6a7992d8c580173eb490cb1838befd
| 140
|
py
|
Python
|
test/__init__.py
|
JosephMcGrath/Chao-Examiner
|
a1b5b016ccfd579fbe0ae7a9ab9810ab280b3432
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
JosephMcGrath/Chao-Examiner
|
a1b5b016ccfd579fbe0ae7a9ab9810ab280b3432
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
JosephMcGrath/Chao-Examiner
|
a1b5b016ccfd579fbe0ae7a9ab9810ab280b3432
|
[
"MIT"
] | null | null | null |
"""
Tests for the chao_examiner package.
"""
import sys
sys.path.append("src")
import chao_examiner
chao_examiner.setup_logging("test")
| 11.666667
| 36
| 0.75
|
b5f23acd5972d0bbe44ccc4bba9d69319eb1e888
| 3,085
|
py
|
Python
|
configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py
|
BB88Lee/mmdetection3d
|
62aeeadf70ac1229c595e3a4fe09d8a49df808f1
|
[
"Apache-2.0"
] | 136
|
2021-06-03T06:37:56.000Z
|
2022-03-29T13:29:03.000Z
|
configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py
|
BB88Lee/mmdetection3d
|
62aeeadf70ac1229c595e3a4fe09d8a49df808f1
|
[
"Apache-2.0"
] | 38
|
2021-06-05T12:41:30.000Z
|
2022-03-23T07:31:28.000Z
|
configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py
|
BB88Lee/mmdetection3d
|
62aeeadf70ac1229c595e3a4fe09d8a49df808f1
|
[
"Apache-2.0"
] | 14
|
2020-08-12T13:14:32.000Z
|
2022-01-28T16:34:21.000Z
|
voxel_size = [0.2, 0.2, 8]
model = dict(
type='CenterPoint',
pts_voxel_layer=dict(
max_num_points=20, voxel_size=voxel_size, max_voxels=(30000, 40000)),
pts_voxel_encoder=dict(
type='PillarFeatureNet',
in_channels=5,
feat_channels=[64],
with_distance=False,
voxel_size=(0.2, 0.2, 8),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
legacy=False),
pts_middle_encoder=dict(
type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)),
pts_backbone=dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False)),
pts_neck=dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[128, 128, 128],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True),
pts_bbox_head=dict(
type='CenterHead',
in_channels=sum([128, 128, 128]),
tasks=[
dict(num_class=1, class_names=['car']),
dict(num_class=2, class_names=['truck', 'construction_vehicle']),
dict(num_class=2, class_names=['bus', 'trailer']),
dict(num_class=1, class_names=['barrier']),
dict(num_class=2, class_names=['motorcycle', 'bicycle']),
dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),
],
common_heads=dict(
reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)),
share_conv_channel=64,
bbox_coder=dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.1,
out_size_factor=4,
voxel_size=voxel_size[:2],
code_size=9),
seperate_head=dict(
type='SeparateHead', init_bias=-2.19, final_kernel=3),
loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),
loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),
norm_bbox=True))
# model training and testing settings
train_cfg = dict(
pts=dict(
grid_size=[512, 512, 1],
voxel_size=voxel_size,
out_size_factor=4,
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2]))
test_cfg = dict(
pts=dict(
post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_per_img=500,
max_pool_nms=False,
min_radius=[4, 12, 10, 1, 0.85, 0.175],
score_threshold=0.1,
pc_range=[-51.2, -51.2],
out_size_factor=4,
voxel_size=voxel_size[:2],
nms_type='rotate',
pre_max_size=1000,
post_max_size=83,
nms_thr=0.2))
| 36.72619
| 77
| 0.572447
|
fdc0bd1b448005fad76d752deb47bfb177420ba8
| 776
|
py
|
Python
|
game/data/scripts/custom/7001_ClanItems/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 10
|
2019-07-27T13:12:11.000Z
|
2022-01-15T19:13:26.000Z
|
game/data/scripts/custom/7001_ClanItems/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 1
|
2021-08-06T12:15:01.000Z
|
2021-08-09T10:18:47.000Z
|
game/data/scripts/custom/7001_ClanItems/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 2
|
2020-02-20T23:02:26.000Z
|
2020-11-22T09:27:51.000Z
|
# Created by L2Emu Team
import sys
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "7001_ClanItems"
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onFirstTalk (Self,npc,player):
st = player.getQuestState(qn)
npcId = npc.getNpcId()
if npcId in [32024,32025] :
if player.isClanLeader() :
htmltext = str(npcId) + ".htm"
else :
htmltext = str(npcId) + "-no.htm"
return htmltext
QUEST = Quest(7001,qn,"custom")
QUEST.addStartNpc(32024)
QUEST.addStartNpc(32025)
QUEST.addFirstTalkId(32024)
QUEST.addFirstTalkId(32025)
| 26.758621
| 79
| 0.722938
|
6148e4fdd5599aff18b9c6d8af04b75ab214fbf1
| 81
|
py
|
Python
|
egs2/slurp_entity/asr1/local/evaluation/metrics/__init__.py
|
roshansh-cmu/espnet
|
5fa6dcc4e649dc66397c629d0030d09ecef36b80
|
[
"Apache-2.0"
] | null | null | null |
egs2/slurp_entity/asr1/local/evaluation/metrics/__init__.py
|
roshansh-cmu/espnet
|
5fa6dcc4e649dc66397c629d0030d09ecef36b80
|
[
"Apache-2.0"
] | null | null | null |
egs2/slurp_entity/asr1/local/evaluation/metrics/__init__.py
|
roshansh-cmu/espnet
|
5fa6dcc4e649dc66397c629d0030d09ecef36b80
|
[
"Apache-2.0"
] | null | null | null |
from .distance import Distance
from .metrics import ErrorMetric, compute_metrics
| 27
| 49
| 0.851852
|
5a8ccc7a90068c91644e1667a207cbe0377655df
| 28,319
|
py
|
Python
|
cirq-core/cirq/work/observable_measurement.py
|
BearerPipelineTest/Cirq
|
e00767a2ef1233e82e9089cf3801a77e4cc3aea3
|
[
"Apache-2.0"
] | 1
|
2022-02-05T22:17:39.000Z
|
2022-02-05T22:17:39.000Z
|
cirq-core/cirq/work/observable_measurement.py
|
BearerPipelineTest/Cirq
|
e00767a2ef1233e82e9089cf3801a77e4cc3aea3
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-core/cirq/work/observable_measurement.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import dataclasses
import itertools
import os
import tempfile
import warnings
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
import sympy
from cirq import circuits, study, ops, value, protocols
from cirq._doc import document
from cirq.work.observable_grouping import group_settings_greedy, GROUPER_T
from cirq.work.observable_measurement_data import (
BitstringAccumulator,
ObservableMeasuredResult,
flatten_grouped_results,
)
from cirq.work.observable_settings import InitObsSetting, observables_to_settings, _MeasurementSpec
if TYPE_CHECKING:
import cirq
from cirq.value.product_state import _NamedOneQubitState
MAX_REPETITIONS_PER_JOB = 3_000_000
document(
MAX_REPETITIONS_PER_JOB,
"""The maximum repetitions allowed in a single batch job.
This depends on the Sampler executing your batch job. It is set to be
tens of minutes assuming ~kilosamples per second.
""",
)
def _with_parameterized_layers(
circuit: 'cirq.AbstractCircuit', qubits: Sequence['cirq.Qid'], needs_init_layer: bool
) -> 'cirq.Circuit':
"""Return a copy of the input circuit with parameterized single-qubit rotations.
These rotations flank the circuit: the initial two layers of X and Y gates
are given parameter names "{qubit}-Xi" and "{qubit}-Yi" and are used
to set up the initial state. If `needs_init_layer` is False,
these two layers of gates are omitted.
The final two layers of X and Y gates are given parameter names
"{qubit}-Xf" and "{qubit}-Yf" and are use to change the frame of the
qubit before measurement, effectively measuring in bases other than Z.
"""
x_beg_mom = circuits.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xi') for q in qubits])
y_beg_mom = circuits.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yi') for q in qubits])
x_end_mom = circuits.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xf') for q in qubits])
y_end_mom = circuits.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yf') for q in qubits])
meas_mom = circuits.Moment([ops.measure(*qubits, key='z')])
if needs_init_layer:
total_circuit = circuits.Circuit([x_beg_mom, y_beg_mom])
total_circuit += circuit.unfreeze()
else:
total_circuit = circuit.unfreeze()
total_circuit.append([x_end_mom, y_end_mom, meas_mom])
return total_circuit
class StoppingCriteria(abc.ABC):
"""An abstract object that queries a BitstringAccumulator to figure out
whether that `meas_spec` is complete."""
@abc.abstractmethod
def more_repetitions(self, accumulator: BitstringAccumulator) -> int:
"""Return the number of additional repetitions to take.
StoppingCriteria should be respectful and have some notion of a
maximum number of repetitions per chunk.
"""
@dataclasses.dataclass(frozen=True)
class VarianceStoppingCriteria(StoppingCriteria):
"""Stop sampling when average variance per term drops below a variance bound."""
variance_bound: float
repetitions_per_chunk: int = 10_000
def more_repetitions(self, accumulator: BitstringAccumulator) -> int:
if len(accumulator.bitstrings) == 0:
return self.repetitions_per_chunk
cov = accumulator.covariance()
n_terms = cov.shape[0]
sum_variance = np.sum(cov)
var_of_the_e = sum_variance / len(accumulator.bitstrings)
vpt = var_of_the_e / n_terms
if vpt <= self.variance_bound:
# Done
return 0
return self.repetitions_per_chunk
def _json_dict_(self):
return protocols.dataclass_json_dict(self)
@dataclasses.dataclass(frozen=True)
class RepetitionsStoppingCriteria(StoppingCriteria):
"""Stop sampling when the number of repetitions has been reached."""
total_repetitions: int
repetitions_per_chunk: int = 10_000
def more_repetitions(self, accumulator: BitstringAccumulator) -> int:
done = accumulator.n_repetitions
todo = self.total_repetitions - done
if todo <= 0:
return 0
to_do_next = min(self.repetitions_per_chunk, todo)
return to_do_next
def _json_dict_(self):
return protocols.dataclass_json_dict(self)
_OBS_TO_PARAM_VAL: Dict[Tuple['cirq.Pauli', bool], Tuple[float, float]] = {
(ops.X, False): (0, -1 / 2),
(ops.X, True): (0, +1 / 2),
(ops.Y, False): (1 / 2, 0),
(ops.Y, True): (-1 / 2, 0),
(ops.Z, False): (0, 0),
(ops.Z, True): (1, 0),
}
"""Mapping from single-qubit Pauli observable to the X- and Y-rotation parameter values. The
second element in the key is whether to measure in the positive or negative (flipped) basis
for readout symmetrization."""
_STATE_TO_PARAM_VAL: Dict['_NamedOneQubitState', Tuple[float, float]] = {
value.KET_PLUS: (0, +1 / 2),
value.KET_MINUS: (0, -1 / 2),
value.KET_IMAG: (-1 / 2, 0),
value.KET_MINUS_IMAG: (+1 / 2, 0),
value.KET_ZERO: (0, 0),
value.KET_ONE: (1, 0),
}
"""Mapping from an initial _NamedOneQubitState to the X- and Y-rotation parameter values."""
def _get_params_for_setting(
setting: InitObsSetting,
flips: Iterable[bool],
qubits: Sequence['cirq.Qid'],
needs_init_layer: bool,
) -> Dict[str, float]:
"""Return the parameter dictionary for the given setting.
This must be used in conjunction with a circuit generated by
`_with_parameterized_layers`. `flips` (used for readout symmetrization)
should be of the same length as `qubits` and will modify the parameters
to also include a bit flip (`X`). Code responsible for running the
circuit should make sure to flip bits back prior to analysis.
Like `_with_parameterized_layers`, we omit params for initialization gates
if we know that `setting.init_state` is the all-zeros state and
`needs_init_layer` is False.
"""
setting = _pad_setting(setting, qubits)
params = {}
for qubit, flip in itertools.zip_longest(qubits, flips):
if qubit is None or flip is None:
raise ValueError("`qubits` and `flips` must be equal length")
# When getting the one-qubit state / observable for this qubit,
# you may be wondering what if there's no observable specified
# for that qubit. We mandate that by the time you get to this stage,
# each _max_setting has
# weight(in_state) == weight(out_operator) == len(qubits)
# See _pad_setting
pauli = setting.observable[qubit]
xf_param, yf_param = _OBS_TO_PARAM_VAL[pauli, flip]
params[f'{qubit}-Xf'] = xf_param
params[f'{qubit}-Yf'] = yf_param
if needs_init_layer:
state = setting.init_state[qubit]
xi_param, yi_param = _STATE_TO_PARAM_VAL[state]
params[f'{qubit}-Xi'] = xi_param
params[f'{qubit}-Yi'] = yi_param
return params
def _pad_setting(
max_setting: InitObsSetting,
qubits: Sequence['cirq.Qid'],
pad_init_state_with=value.KET_ZERO,
pad_obs_with: 'cirq.Gate' = ops.Z,
) -> InitObsSetting:
"""Pad `max_setting`'s `init_state` and `observable` with `pad_xx_with` operations
(defaults: |0> and Z) so each max_setting has the same qubits. We need this
to be the case so we can fill in all the parameters, see `_get_params_for_setting`.
"""
obs = max_setting.observable
assert obs.coefficient == 1, "Only the max_setting should be padded."
for qubit in qubits:
if not qubit in obs:
obs *= pad_obs_with(qubit)
init_state = max_setting.init_state
init_state_original_qubits = init_state.qubits
for qubit in qubits:
if not qubit in init_state_original_qubits:
init_state *= pad_init_state_with(qubit)
return InitObsSetting(init_state=init_state, observable=obs)
def _aggregate_n_repetitions(next_chunk_repetitions: Set[int]) -> int:
"""A stopping criteria can request a different number of more_repetitions for each
measurement spec. For batching efficiency, we take the max and issue a warning in this case."""
if len(next_chunk_repetitions) == 1:
return list(next_chunk_repetitions)[0]
reps = max(next_chunk_repetitions)
warnings.warn(
f"The stopping criteria specified a various numbers of "
f"repetitions to perform next. To be able to submit as a single "
f"sweep, the largest value will be used: {reps}."
)
return reps
def _check_meas_specs_still_todo(
meas_specs: List[_MeasurementSpec],
accumulators: Dict[_MeasurementSpec, BitstringAccumulator],
stopping_criteria: StoppingCriteria,
) -> Tuple[List[_MeasurementSpec], int]:
"""Filter `meas_specs` in case some are done.
In the sampling loop in `measure_grouped_settings`, we submit
each `meas_spec` in chunks. This function contains the logic for
removing `meas_spec`s from the loop if they are done.
"""
still_todo = []
repetitions_set: Set[int] = set()
for meas_spec in meas_specs:
accumulator = accumulators[meas_spec]
more_repetitions = stopping_criteria.more_repetitions(accumulator)
if more_repetitions < 0:
raise ValueError(
"Stopping criteria's `more_repetitions` should return 0 or a positive number."
)
if more_repetitions == 0:
continue
repetitions_set.add(more_repetitions)
still_todo.append(meas_spec)
if len(still_todo) == 0:
return still_todo, 0
repetitions = _aggregate_n_repetitions(repetitions_set)
total_repetitions = len(still_todo) * repetitions
if total_repetitions > MAX_REPETITIONS_PER_JOB:
old_repetitions = repetitions
repetitions = MAX_REPETITIONS_PER_JOB // len(still_todo)
if repetitions < 10:
raise ValueError(
"You have requested too many parameter settings to batch your job effectively. "
"Consider fewer sweeps or manually splitting sweeps into multiple jobs."
)
warnings.warn(
f"The number of requested sweep parameters is high. To avoid a batched job with more "
f"than {MAX_REPETITIONS_PER_JOB} shots, the number of shots per call to run_sweep "
f"(per parameter value) will be throttled from {old_repetitions} to {repetitions}."
)
return still_todo, repetitions
@dataclasses.dataclass(frozen=True)
class _FlippyMeasSpec:
"""Internally, each MeasurementSpec class is split into two
_FlippyMeasSpecs to support readout symmetrization.
Bitstring results are combined, so this should be opaque to the user.
"""
meas_spec: _MeasurementSpec
flips: np.ndarray
qubits: Sequence['cirq.Qid']
def param_tuples(self, *, needs_init_layer=True):
yield from _get_params_for_setting(
self.meas_spec.max_setting,
flips=self.flips,
qubits=self.qubits,
needs_init_layer=needs_init_layer,
).items()
yield from self.meas_spec.circuit_params.items()
def _subdivide_meas_specs(
meas_specs: Iterable[_MeasurementSpec],
repetitions: int,
qubits: Sequence['cirq.Qid'],
readout_symmetrization: bool,
) -> Tuple[List[_FlippyMeasSpec], int]:
"""Split measurement specs into sub-jobs for readout symmetrization
In readout symmetrization, we first run the "normal" circuit followed
by running the circuit with flipped measurement.
One _MeasurementSpec is split into two _FlippyMeasSpecs. These are run
separately but accumulated according to their shared _MeasurementSpec.
"""
n_qubits = len(qubits)
flippy_mspecs = []
for meas_spec in meas_specs:
all_normal = np.zeros(n_qubits, dtype=bool)
flippy_mspecs.append(_FlippyMeasSpec(meas_spec=meas_spec, flips=all_normal, qubits=qubits))
if readout_symmetrization:
all_flipped = np.ones(n_qubits, dtype=bool)
flippy_mspecs.append(
_FlippyMeasSpec(meas_spec=meas_spec, flips=all_flipped, qubits=qubits)
)
if readout_symmetrization:
repetitions //= 2
return flippy_mspecs, repetitions
def _to_sweep(param_tuples):
"""Turn param tuples into a sweep."""
to_sweep = [dict(pt) for pt in param_tuples]
to_sweep = study.to_sweep(to_sweep)
return to_sweep
def _parse_checkpoint_options(
checkpoint: bool, checkpoint_fn: Optional[str], checkpoint_other_fn: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
"""Parse the checkpoint-oriented options in `measure_grouped_settings`.
This function contains the validation and defaults logic. Please see
`measure_grouped_settings` for documentation on these args.
Returns:
checkpoint_fn, checkpoint_other_fn: Parsed or default filenames for primary and previous
checkpoint files.
Raises:
ValueError: If a `checkpoint_fn` was specified, but `checkpoint` was False, if the
`checkpoint_fn` is not of the form filename.json, or if `checkout_fn` and
`checkpoint_other_fn` are the same filename.
"""
if not checkpoint:
if checkpoint_fn is not None or checkpoint_other_fn is not None:
raise ValueError(
"Checkpoint filenames were provided but `checkpoint` was set to False."
)
return None, None
if checkpoint_fn is None:
checkpoint_dir = tempfile.mkdtemp()
chk_basename = 'observables'
checkpoint_fn = f'{checkpoint_dir}/{chk_basename}.json'
if checkpoint_other_fn is None:
checkpoint_dir = os.path.dirname(checkpoint_fn)
chk_basename = os.path.basename(checkpoint_fn)
chk_basename, dot, ext = chk_basename.rpartition('.')
if chk_basename == '' or dot != '.' or ext == '':
raise ValueError(
f"You specified `checkpoint_fn={checkpoint_fn!r}` which does not follow the "
f"pattern of 'filename.extension'. Please follow this pattern or fully specify "
f"`checkpoint_other_fn`."
)
if ext != 'json':
raise ValueError(
"Please use a `.json` filename or fully "
"specify checkpoint_fn and checkpoint_other_fn"
)
if checkpoint_dir == '':
checkpoint_other_fn = f'{chk_basename}.prev.json'
else:
checkpoint_other_fn = f'{checkpoint_dir}/{chk_basename}.prev.json'
if checkpoint_fn == checkpoint_other_fn:
raise ValueError(
f"`checkpoint_fn` and `checkpoint_other_fn` were set to the same "
f"filename: {checkpoint_fn}. Please use two different filenames."
)
return checkpoint_fn, checkpoint_other_fn
@dataclasses.dataclass(frozen=True)
class CheckpointFileOptions:
"""Options to configure "checkpointing" to save intermediate results.
Args:
checkpoint: If set to True, save cumulative raw results at the end
of each iteration of the sampling loop. Load in these results
with `cirq.read_json`.
checkpoint_fn: The filename for the checkpoint file. If `checkpoint`
is set to True and this is not specified, a file in a temporary
directory will be used.
checkpoint_other_fn: The filename for another checkpoint file, which
contains the previous checkpoint. This lets us avoid losing data if
a failure occurs during checkpoint writing. If `checkpoint`
is set to True and this is not specified, a file in a temporary
directory will be used. If `checkpoint` is set to True and
`checkpoint_fn` is specified but this argument is *not* specified,
"{checkpoint_fn}.prev.json" will be used.
"""
checkpoint: bool = False
checkpoint_fn: Optional[str] = None
checkpoint_other_fn: Optional[str] = None
def __post_init__(self):
fn, other_fn = _parse_checkpoint_options(
self.checkpoint, self.checkpoint_fn, self.checkpoint_other_fn
)
object.__setattr__(self, 'checkpoint_fn', fn)
object.__setattr__(self, 'checkpoint_other_fn', other_fn)
def maybe_to_json(self, obj: Any):
"""Call `cirq.to_json with `value` according to the configuration options in this class.
If `checkpoint=False`, nothing will happen. Otherwise, we will use `checkpoint_fn` and
`checkpoint_other_fn` as the destination JSON file as described in the class docstring.
"""
if not self.checkpoint:
return
assert self.checkpoint_fn is not None, 'mypy'
assert self.checkpoint_other_fn is not None, 'mypy'
if os.path.exists(self.checkpoint_fn):
os.replace(self.checkpoint_fn, self.checkpoint_other_fn)
protocols.to_json(obj, self.checkpoint_fn)
def _needs_init_layer(grouped_settings: Dict[InitObsSetting, List[InitObsSetting]]) -> bool:
"""Helper function to go through init_states and determine if any of them need an
initialization layer of single-qubit gates."""
for max_setting in grouped_settings.keys():
if any(st is not value.KET_ZERO for _, st in max_setting.init_state):
return True
return False
def measure_grouped_settings(
circuit: 'cirq.AbstractCircuit',
grouped_settings: Dict[InitObsSetting, List[InitObsSetting]],
sampler: 'cirq.Sampler',
stopping_criteria: StoppingCriteria,
*,
readout_symmetrization: bool = False,
circuit_sweep: 'cirq.Sweepable' = None,
readout_calibrations: Optional[BitstringAccumulator] = None,
checkpoint: CheckpointFileOptions = CheckpointFileOptions(),
) -> List[BitstringAccumulator]:
"""Measure a suite of grouped InitObsSetting settings.
This is a low-level API for accessing the observable measurement
framework. See also `measure_observables` and `measure_observables_df`.
Args:
circuit: The circuit. This can contain parameters, in which case
you should also specify `circuit_sweep`.
grouped_settings: A series of setting groups expressed as a dictionary.
The key is the max-weight setting used for preparing single-qubit
basis-change rotations. The value is a list of settings
compatible with the maximal setting you desire to measure.
Automated routing algorithms like `group_settings_greedy` can
be used to construct this input.
sampler: A sampler.
stopping_criteria: A StoppingCriteria object that can report
whether enough samples have been sampled.
readout_symmetrization: If set to True, each `meas_spec` will be
split into two runs: one normal and one where a bit flip is
incorporated prior to measurement. In the latter case, the
measured bit will be flipped back classically and accumulated
together. This causes readout error to appear symmetric,
p(0|0) = p(1|1).
circuit_sweep: Additional parameter sweeps for parameters contained
in `circuit`. The total sweep is the product of the circuit sweep
with parameter settings for the single-qubit basis-change rotations.
readout_calibrations: The result of `calibrate_readout_error`.
checkpoint: Options to set up optional checkpointing of intermediate
data for each iteration of the sampling loop. See the documentation
for `CheckpointFileOptions` for more. Load in these results with
`cirq.read_json`.
Raises:
ValueError: If readout calibration is specified, but `readout_symmetrization
is not True.
"""
if readout_calibrations is not None and not readout_symmetrization:
raise ValueError("Readout calibration only works if `readout_symmetrization` is enabled.")
qubits = sorted({q for ms in grouped_settings.keys() for q in ms.init_state.qubits})
qubit_to_index = {q: i for i, q in enumerate(qubits)}
needs_init_layer = _needs_init_layer(grouped_settings)
measurement_param_circuit = _with_parameterized_layers(circuit, qubits, needs_init_layer)
# meas_spec provides a key for accumulators.
# meas_specs_todo is a mutable list. We will pop things from it as various
# specs are measured to the satisfaction of the stopping criteria
accumulators = {}
meas_specs_todo = []
for max_setting, param_resolver in itertools.product(
grouped_settings.keys(), study.to_resolvers(circuit_sweep)
):
circuit_params = param_resolver.param_dict
meas_spec = _MeasurementSpec(max_setting=max_setting, circuit_params=circuit_params)
accumulator = BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=grouped_settings[max_setting],
qubit_to_index=qubit_to_index,
readout_calibration=readout_calibrations,
)
accumulators[meas_spec] = accumulator
meas_specs_todo += [meas_spec]
while True:
meas_specs_todo, repetitions = _check_meas_specs_still_todo(
meas_specs=meas_specs_todo,
accumulators=accumulators,
stopping_criteria=stopping_criteria,
)
if len(meas_specs_todo) == 0:
break
flippy_meas_specs, repetitions = _subdivide_meas_specs(
meas_specs=meas_specs_todo,
repetitions=repetitions,
qubits=qubits,
readout_symmetrization=readout_symmetrization,
)
resolved_params = [
flippy_ms.param_tuples(needs_init_layer=needs_init_layer)
for flippy_ms in flippy_meas_specs
]
resolved_params = _to_sweep(resolved_params)
results = sampler.run_sweep(
program=measurement_param_circuit, params=resolved_params, repetitions=repetitions
)
assert len(results) == len(
flippy_meas_specs
), 'Not as many results received as sweeps requested!'
for flippy_ms, result in zip(flippy_meas_specs, results):
accumulator = accumulators[flippy_ms.meas_spec]
bitstrings = np.logical_xor(flippy_ms.flips, result.measurements['z'])
accumulator.consume_results(bitstrings.astype(np.uint8, casting='safe'))
checkpoint.maybe_to_json(list(accumulators.values()))
return list(accumulators.values())
_GROUPING_FUNCS: Dict[str, GROUPER_T] = {'greedy': group_settings_greedy}
def _parse_grouper(grouper: Union[str, GROUPER_T] = group_settings_greedy) -> GROUPER_T:
"""Logic for turning a named grouper into one of the build-in groupers in support of the
high-level `measure_observables` API."""
if isinstance(grouper, str):
try:
grouper = _GROUPING_FUNCS[grouper.lower()]
except KeyError:
raise ValueError(f"Unknown grouping function {grouper}")
return grouper
def _get_all_qubits(
circuit: 'cirq.AbstractCircuit', observables: Iterable['cirq.PauliString']
) -> List['cirq.Qid']:
"""Helper function for `measure_observables` to get all qubits from a circuit and a
collection of observables."""
qubit_set = set()
for obs in observables:
qubit_set |= set(obs.qubits)
qubit_set |= circuit.all_qubits()
return sorted(qubit_set)
def measure_observables(
circuit: 'cirq.AbstractCircuit',
observables: Iterable['cirq.PauliString'],
sampler: Union['cirq.Simulator', 'cirq.Sampler'],
stopping_criteria: StoppingCriteria,
*,
readout_symmetrization: bool = False,
circuit_sweep: Optional['cirq.Sweepable'] = None,
grouper: Union[str, GROUPER_T] = group_settings_greedy,
readout_calibrations: Optional[BitstringAccumulator] = None,
checkpoint: CheckpointFileOptions = CheckpointFileOptions(),
) -> List[ObservableMeasuredResult]:
"""Measure a collection of PauliString observables for a state prepared by a Circuit.
If you need more control over the process, please see `measure_grouped_settings` for a
lower-level API. If you would like your results returned as a pandas DataFrame,
please see `measure_observables_df`.
Args:
circuit: The circuit used to prepare the state to measure. This can contain parameters,
in which case you should also specify `circuit_sweep`.
observables: A collection of PauliString observables to measure. These will be grouped
into simultaneously-measurable groups, see `grouper` argument.
sampler: The sampler.
stopping_criteria: A StoppingCriteria object to indicate how precisely to sample
measurements for estimating observables.
readout_symmetrization: If set to True, each run will be split into two: one normal and
one where a bit flip is incorporated prior to measurement. In the latter case, the
measured bit will be flipped back classically and accumulated together. This causes
readout error to appear symmetric, p(0|0) = p(1|1).
circuit_sweep: Additional parameter sweeps for parameters contained in `circuit`. The
total sweep is the product of the circuit sweep with parameter settings for the
single-qubit basis-change rotations.
grouper: Either "greedy" or a function that groups lists of `InitObsSetting`. See the
documentation for the `grouped_settings` argument of `measure_grouped_settings` for
full details.
readout_calibrations: The result of `calibrate_readout_error`.
checkpoint: Options to set up optional checkpointing of intermediate data for each
iteration of the sampling loop. See the documentation for `CheckpointFileOptions` for
more. Load in these results with `cirq.read_json`.
Returns:
A list of ObservableMeasuredResult; one for each input PauliString.
"""
qubits = _get_all_qubits(circuit, observables)
settings = list(observables_to_settings(observables, qubits))
actual_grouper = _parse_grouper(grouper)
grouped_settings = actual_grouper(settings)
accumulators = measure_grouped_settings(
circuit=circuit,
grouped_settings=grouped_settings,
sampler=sampler,
stopping_criteria=stopping_criteria,
circuit_sweep=circuit_sweep,
readout_symmetrization=readout_symmetrization,
readout_calibrations=readout_calibrations,
checkpoint=checkpoint,
)
return flatten_grouped_results(accumulators)
def measure_observables_df(
circuit: 'cirq.AbstractCircuit',
observables: Iterable['cirq.PauliString'],
sampler: Union['cirq.Simulator', 'cirq.Sampler'],
stopping_criteria: StoppingCriteria,
*,
readout_symmetrization: bool = False,
circuit_sweep: Optional['cirq.Sweepable'] = None,
grouper: Union[str, GROUPER_T] = group_settings_greedy,
readout_calibrations: Optional[BitstringAccumulator] = None,
checkpoint: CheckpointFileOptions = CheckpointFileOptions(),
):
"""Measure observables and return resulting data as a Pandas dataframe.
Please see `measure_observables` for argument documentation.
"""
results = measure_observables(
circuit=circuit,
observables=observables,
sampler=sampler,
stopping_criteria=stopping_criteria,
readout_symmetrization=readout_symmetrization,
circuit_sweep=circuit_sweep,
grouper=grouper,
readout_calibrations=readout_calibrations,
checkpoint=checkpoint,
)
df = pd.DataFrame(res.as_dict() for res in results)
return df
| 40.340456
| 99
| 0.694269
|
891d5b6d543f30860cddad3a823ee0f978da10f8
| 8,514
|
py
|
Python
|
tests/python/pants_test/engine/test_engine.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_engine.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_engine.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from dataclasses import dataclass, field
from textwrap import dedent
from typing import List
from pants.engine.rules import RootRule, rule
from pants.engine.scheduler import ExecutionError
from pants.engine.selectors import Get
from pants.reporting.async_workunit_handler import AsyncWorkunitHandler
from pants.testutil.engine.util import assert_equal_with_printing, remove_locations_from_traceback
from pants_test.engine.scheduler_test_base import SchedulerTestBase
class A:
pass
class B:
pass
class C:
pass
class D:
pass
def fn_raises(x):
raise Exception(f'An exception for {type(x).__name__}')
@rule
def nested_raise(x: B) -> A:
fn_raises(x)
@dataclass(frozen=True)
class Fib:
val: int
@rule
def fib(n: int) -> Fib:
if n < 2:
yield Fib(n)
x, y = yield Get(Fib, int(n-2)), Get(Fib, int(n-1))
yield Fib(x.val + y.val)
@dataclass(frozen=True)
class MyInt:
val: int
@dataclass(frozen=True)
class MyFloat:
val: float
@rule
def upcast(n: MyInt) -> MyFloat:
yield MyFloat(float(n.val))
class EngineTest(unittest.TestCase, SchedulerTestBase):
assert_equal_with_printing = assert_equal_with_printing
def scheduler(self, rules, include_trace_on_error):
return self.mk_scheduler(rules=rules, include_trace_on_error=include_trace_on_error)
def test_recursive_multi_get(self):
# Tests that a rule that "uses itself" multiple times per invoke works.
rules = [
fib,
RootRule(int),
]
fib_10, = self.mk_scheduler(rules=rules).product_request(Fib, subjects=[10])
self.assertEqual(55, fib_10.val)
def test_no_include_trace_error_raises_boring_error(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing('1 Exception encountered:\n Exception: An exception for B', str(cm.exception))
def test_no_include_trace_error_multiple_paths_raises_executionerror(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[B(), B()]))
self.assert_equal_with_printing(dedent('''
2 Exceptions encountered:
Exception: An exception for B
Exception: An exception for B''').lstrip(),
str(cm.exception))
def test_include_trace_error_raises_error_with_trace(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(dedent('''
1 Exception encountered:
Computing Select(<pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A)
Computing Task(nested_raise(), <pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in nested_raise
fn_raises(x)
File LOCATION-INFO, in fn_raises
raise Exception(f'An exception for {type(x).__name__}')
Exception: An exception for B
''').lstrip()+'\n',
remove_locations_from_traceback(str(cm.exception)))
def test_fork_context(self):
# A smoketest that confirms that we can successfully enter and exit the fork context, which
# implies acquiring and releasing all relevant Engine resources.
expected = "42"
def fork_context_body():
return expected
res = self.mk_scheduler().with_fork_context(fork_context_body)
self.assertEquals(res, expected)
@unittest.skip('Inherently flaky as described in https://github.com/pantsbuild/pants/issues/6829')
def test_trace_multi(self):
# Tests that when multiple distinct failures occur, they are each rendered.
@rule
def d_from_b_nested_raise(b: B) -> D:
fn_raises(b)
@rule
def c_from_b_nested_raise(b: B) -> C:
fn_raises(b)
@rule
def a_from_c_and_d(c: C, d: D) -> A:
return A()
rules = [
RootRule(B),
d_from_b_nested_raise,
c_from_b_nested_raise,
a_from_c_and_d,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(dedent('''
1 Exception encountered:
Computing Select(<pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A, true)
Computing Task(d_from_b_nested_raise(), <pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, =D, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in d_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {}'.format(type(x).__name__))
Exception: An exception for B
Computing Select(<pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, A, true)
Computing Task(c_from_b_nested_raise(), <pants_test.engine.test_engine.B object at 0xEEEEEEEEE>, =C, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in c_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {}'.format(type(x).__name__))
Exception: An exception for B
''').lstrip()+'\n',
remove_locations_from_traceback(str(cm.exception)))
def test_illegal_root_selection(self):
rules = [
RootRule(B),
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
# No rules are available to compute A.
with self.assertRaises(Exception) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing('No installed @rules can compute A for input Params(B).', str(cm.exception))
def test_non_existing_root_fails_differently(self):
rules = [
upcast,
]
with self.assertRaises(Exception) as cm:
list(self.mk_scheduler(rules=rules, include_trace_on_error=False))
self.assert_equal_with_printing(dedent('''
Rules with errors: 1
(MyFloat, [MyInt], upcast()):
No rule was available to compute MyInt. Maybe declare it as a RootRule(MyInt)?
''').strip(),
str(cm.exception)
)
def test_async_reporting(self):
rules = [ fib, RootRule(int)]
scheduler = self.mk_scheduler(rules, include_trace_on_error=False, should_report_workunits=True)
@dataclass
class Tracker:
workunits: List[dict] = field(default_factory=list)
def add(self, workunits) -> None:
self.workunits.extend(workunits)
tracker = Tracker()
async_reporter = AsyncWorkunitHandler(scheduler, callback=tracker.add, report_interval_seconds=0.01)
with async_reporter.session():
scheduler.product_request(Fib, subjects=[0])
# One workunit should be coming from the `Select` intrinsic, and the other from the single execution
# of the `fib` rule, for two total workunits being appended during the run of this rule.
self.assertEquals(len(tracker.workunits), 2)
tracker.workunits = []
with async_reporter.session():
scheduler.product_request(Fib, subjects=[10])
# Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
# In this case, we expect 10 invocations of the `fib` rule + the one Select for a total of 11.
self.assertEquals(len(tracker.workunits), 11)
| 31.88764
| 116
| 0.685929
|
0fef6c7482ec5330a40fd7fdea2337f9f2d2b406
| 4,093
|
py
|
Python
|
python/wct.py
|
octo-willy/mapping-wetlands
|
a9acab6a55e25bb4ef2c70106389006fc5462fb7
|
[
"MIT"
] | null | null | null |
python/wct.py
|
octo-willy/mapping-wetlands
|
a9acab6a55e25bb4ef2c70106389006fc5462fb7
|
[
"MIT"
] | null | null | null |
python/wct.py
|
octo-willy/mapping-wetlands
|
a9acab6a55e25bb4ef2c70106389006fc5462fb7
|
[
"MIT"
] | 1
|
2021-04-20T01:53:21.000Z
|
2021-04-20T01:53:21.000Z
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from sklearn.cluster import MiniBatchKMeans
import rasterio as rio
import numpy as np
from skimage.filters import threshold_otsu
from python.misc import compute_index
from rasterio.plot import reshape_as_image
def compute_cluster(img,k=3,random_seed=42):
"""
Computes cluster based on input image (stack).
"""
img_float = img.astype(np.float32)/10000
samples = reshape_as_image(img_float).reshape(-1,img_float.shape[0])
kmeans_pred = MiniBatchKMeans(n_clusters=k+1,
random_state=random_seed,
max_iter=10,
batch_size=10000,
reassignment_ratio=0).fit(samples)
return kmeans_pred.labels_.reshape(img.shape[1], img.shape[2]).astype(rio.uint16)
def compute_rws(mndwi_img,mgrn_img,thr=0.3):
"""
Computes Reliable Water Sample (RWS) region after (Chen et al.,2020).
Requires MNDWI and MGRN images.
"""
if thr == 'otsu':
thr = threshold_otsu(mndwi_img[mndwi_img>=0])
return np.where( (mndwi_img>=thr) &((mgrn_img>0) & (mgrn_img<0.15)),1,0)
def compute_mnws(img,cluster_img):
"""
Computes Minimum Normalized Water Score (MNWS) image after (Chen et al.,2020).
Requires input band image (stack) and clustered image.
"""
if len(img.shape) > 2 and img.shape[0] > 2:
np.seterr(divide='ignore', invalid='ignore')
mnws = []
max_i = np.argmax(np.unique(cluster_img,return_counts=True)[1])
all_labels = list(range(0,cluster_img.max()+1))
labels = list(set(all_labels)-set([all_labels[max_i]]))
for label in labels:
#calculate band stats
region_img = np.where(cluster_img==label,img,0)
band_means = np.array(list(map(lambda x:np.mean(region_img[x][region_img[x]!=0],dtype=np.float32),
range(img.shape[0])))).reshape(img.shape[0],-1)
band_std = np.array(list(map(lambda x:np.std(region_img[x][region_img[x]!=0],dtype=np.float32),
range(img.shape[0])))).reshape(img.shape[0],-1)
#calculate nws
reshaped_img = img.reshape(img.shape[0],-1)
nws = (((( abs(reshaped_img-band_means) /band_std)**2).sum(0)/img.shape[0])**0.5).reshape(img.shape[1],img.shape[2])
mnws.append(nws)
mnws_img = np.array(mnws).min(0)
return mnws_img
def render_wcf(mnws_files,invalid_files,upland_file,thr=3,dec=2):
"""
Computes Water Coverage Frequency (WCF) image after (Chen et al.,2020).
Requires path of MNWS image files, invalid pixel mask files and terrain (upland) mask file.
"""
with rio.open(upland_file) as src_upland:
upland_mask = src_upland.read(1)
water_rws_detected = []
invalid_pixels = []
for i in tqdm(range(len(mnws_files)),position=0, leave=True):
mnws_file = mnws_files[i]
cl_mask_file = invalid_files[i]
with rio.open(mnws_file) as src_mnws,rio.open(cl_mask_file ) as src_mask:
profile = src_mnws.profile.copy()
mnws_img = src_mnws.read(1)
mnws_img[np.isnan(mnws_img)]=9999
cl_mask = src_mask.read(1)
invalid = np.where(cl_mask==2,1,0)
invalid_pixels.append(invalid)
water_rws = np.where(mnws_img<=thr,1,0)
water_rws = np.where( cl_mask>0,0,water_rws)
water_rws_detected.append(water_rws)
water_rws_detected_sum = np.where(upland_mask==1,0,np.array(water_rws_detected).sum(0))
invalid_pixels_sum = np.array(invalid_pixels).sum(0)
diff_invalid = len(mnws_files)-invalid_pixels_sum
water_freq_img = np.true_divide(water_rws_detected_sum , diff_invalid, where=(diff_invalid!=0),dtype=np.float32)*12
water_freq_img_r = np.round(water_freq_img,dec)
return water_freq_img_r,water_rws_detected_sum,profile
| 34.982906
| 128
| 0.620083
|
6989de50b8cb917f9a0186a4297967d681d036ee
| 454
|
py
|
Python
|
test/read_informs_test.py
|
qiyuangong/ppdpes_core
|
f4c955401559dcf09f62a7ef163e900df01f499b
|
[
"MIT"
] | null | null | null |
test/read_informs_test.py
|
qiyuangong/ppdpes_core
|
f4c955401559dcf09f62a7ef163e900df01f499b
|
[
"MIT"
] | null | null | null |
test/read_informs_test.py
|
qiyuangong/ppdpes_core
|
f4c955401559dcf09f62a7ef163e900df01f499b
|
[
"MIT"
] | null | null | null |
import unittest
from utils.read_informs_data import read_data
from utils.read_informs_data import read_tree
class test_read_informs(unittest.TestCase):
"""
informs dataset contains 58568 records
"""
def test_read_normal(self):
data = read_data()
self.assertEqual(len(data), 58568)
def test_read_tree(self):
ghs = read_tree()
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
| 19.73913
| 45
| 0.685022
|
7055f81cc915d1fe43ccc147082d5447eb4a4a54
| 2,031
|
py
|
Python
|
gdpr/utils.py
|
hronecviktor/django-GDPR
|
fa2a69e6f1d08fe8d2c646074c85fd0dc6ea6b8d
|
[
"MIT"
] | null | null | null |
gdpr/utils.py
|
hronecviktor/django-GDPR
|
fa2a69e6f1d08fe8d2c646074c85fd0dc6ea6b8d
|
[
"MIT"
] | null | null | null |
gdpr/utils.py
|
hronecviktor/django-GDPR
|
fa2a69e6f1d08fe8d2c646074c85fd0dc6ea6b8d
|
[
"MIT"
] | null | null | null |
from typing import Any, Type
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Model, QuerySet
def str_to_class(class_string: str) -> Any:
module_name, class_name = class_string.rsplit('.', 1)
# load the module, will raise ImportError if module cannot be loaded
m = __import__(module_name, globals(), locals(), [str(class_name)])
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, class_name)
return c
def get_number_guess_len(value):
"""
Safety measure against key getting one bigger (overflow) on decrypt e.g. (5)=1 -> 5 + 8 = 13 -> (13)=2
Args:
value: Number convertible to int to get it's length
Returns:
The even length of the whole part of the number
"""
guess_len = len(str(int(value)))
return guess_len if guess_len % 2 != 0 else (guess_len - 1)
def get_field_or_none(model: Type[Model], field_name: str):
"""
Use django's _meta field api to get field or return None.
Args:
model: The model to get the field on
field_name: The name of the field
Returns:
The field or None
"""
try:
return model._meta.get_field(field_name)
except FieldDoesNotExist:
return None
"""
Enable support for druids reversion fork
"""
def get_reversion_versions(obj: Any) -> QuerySet:
from reversion.models import Version
from django.contrib.contenttypes.models import ContentType
return Version.objects.get_for_object(obj)
def get_reversion_version_model(version) -> Type[Model]:
"""Get object model of the version."""
if hasattr(version, '_model'):
return version._model
return version.content_type.model_class()
def get_reversion_local_field_dict(obj):
if hasattr(obj, '_local_field_dict'):
return obj._local_field_dict
return obj.flat_field_dict
def is_reversion_installed():
try:
import reversion
return True
except ImportError:
return False
| 26.038462
| 106
| 0.687346
|
7fecbd18fa02c03d4003972af012cf71c6c34b8e
| 777
|
py
|
Python
|
francoralite/apps/francoralite_api/models/dance.py
|
Francoralite/francoralite
|
f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c
|
[
"BSD-3-Clause"
] | 2
|
2021-07-26T08:29:26.000Z
|
2021-07-26T08:29:27.000Z
|
francoralite/apps/francoralite_api/models/dance.py
|
lluc/telemeta-integration
|
c2fb116471235674eae597abac84a7113e0f7c82
|
[
"BSD-3-Clause"
] | 167
|
2018-10-20T14:34:46.000Z
|
2021-06-01T10:40:55.000Z
|
francoralite/apps/francoralite_api/models/dance.py
|
Francoralite/francoralite
|
f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c
|
[
"BSD-3-Clause"
] | 1
|
2021-06-06T12:16:49.000Z
|
2021-06-06T12:16:49.000Z
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Cooperative Artefacts <artefacts.lle@gmail.com>
from django.db import models
from django.utils.translation import gettext_lazy as _
class Dance(models.Model):
# Description of the table
"Type of dance"
# List of the fields
name = models.CharField(_('Nom'), unique=True, max_length=255)
notes = models.TextField(_('Notes'), null=True, blank=True)
class Meta:
app_label = 'francoralite_api'
db_table = 'api_dance'
verbose_name_plural = _('dances')
ordering = ['name']
def __unicode__(self):
return self.name
| 27.75
| 74
| 0.673102
|
aee0e8c69d98fb983bce30bf8e64850c6c6c20b7
| 1,644
|
py
|
Python
|
JPMC-tech-task-1/jpm_module_1/client_test.py
|
Abhishek-Doshi/J_P_Morgan_Chase-Virtual-Internship
|
62500dcea4284f11424c2b02f33685830efce6c8
|
[
"MIT"
] | 1
|
2020-07-02T15:11:10.000Z
|
2020-07-02T15:11:10.000Z
|
JPMC-tech-task-1/jpm_module_1/client_test.py
|
Abhishek-Doshi/J_P_Morgan_Chase-Virtual-Internship
|
62500dcea4284f11424c2b02f33685830efce6c8
|
[
"MIT"
] | null | null | null |
JPMC-tech-task-1/jpm_module_1/client_test.py
|
Abhishek-Doshi/J_P_Morgan_Chase-Virtual-Internship
|
62500dcea4284f11424c2b02f33685830efce6c8
|
[
"MIT"
] | null | null | null |
import unittest
from client3 import getDataPoint, getRatio
class ClientTest(unittest.TestCase):
def test_getDataPoint_calculatePrice(self):
quotes = [
{'top_ask': {'price': 121.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},
{'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}
]
""" ------------ Add the assertion below ------------ """
for quote in quotes:
self.assertEqual(getDataPoint(quote), (quote['stock'], quote['top_bid']['price'], quote['top_ask']['price'], (quote['top_bid']['price'] + quote['top_ask']['price'])/2))
def test_getDataPoint_calculatePriceBidGreaterThanAsk(self):
quotes = [
{'top_ask': {'price': 119.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},
{'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}
]
""" ------------ Add the assertion below ------------ """
for quote in quotes:
self.assertEqual(getDataPoint(quote), (quote['stock'], quote['top_bid']['price'], quote['top_ask']['price'], (quote['top_bid']['price'] + quote['top_ask']['price'])/2))
""" ------------ Add more unit tests ------------ """
if __name__ == '__main__':
unittest.main()
| 63.230769
| 180
| 0.559002
|
211d5fdda3ae431350ecddd346f910bb44c8516b
| 1,579
|
py
|
Python
|
tests/commands/new/test_validate_app_name.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T01:22:42.000Z
|
2022-03-29T01:22:42.000Z
|
tests/commands/new/test_validate_app_name.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | null | null | null |
tests/commands/new/test_validate_app_name.py
|
danyeaw/briefcase
|
fd9744e5b8dfc8a4c7606dc63cddfcda2dd00d78
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.mark.parametrize(
'name',
[
'helloworld',
'helloWorld',
'hello42world',
'42helloworld', # ?? Are we sure this is correct?
'hello_world',
'hello-world',
]
)
def test_valid_app_name(new_command, name):
"Test that valid app names are accepted"
assert new_command.validate_app_name(name)
@pytest.mark.parametrize(
'name',
[
'hello world', # contains a space
'helloworld!', # contains punctuation
'_helloworld', # leading underscore
'-helloworld', # leading hyphen
'pass', # python keyword
'Pass', # python keyword, but different case usage
'PASS', # python keyword, but all upper case
'in', # javascript keyword
'In', # javascript keyword, but different case usage
'IN', # javascript keyword, but all upper case
'synchronized', # Java keyword
'Synchronized', # Java keyword, but different case usage
'SYNCHRONIZED', # Java keyword, but all upper case
'false', # Python, Java and Javascript keyword (in different cases)
'False', # Python, Java and Javascript keyword (in different cases)
'FALSE', # Python, Java and Javascript keyword (in different cases)
'existing', # pre-existing directory name
]
)
def test_invalid_app_name(new_command, name, tmp_path):
"Test that invalid app names are rejected"
(tmp_path / 'existing').mkdir()
with pytest.raises(ValueError):
new_command.validate_app_name(name)
| 31.58
| 76
| 0.626979
|
37577ff41d6ecff098e672c763977cfd4ac2b2f3
| 3,206
|
py
|
Python
|
Lib/importlib/__init__.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/importlib/__init__.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
Lib/importlib/__init__.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
"""A pure Python implementation of import.
References on import:
* Language reference
http://docs.python.org/ref/import.html
* __import__ function
http://docs.python.org/lib/built-in-funcs.html
* Packages
http://www.python.org/doc/essays/packages.html
* PEP 235: Import on Case-Insensitive Platforms
http://www.python.org/dev/peps/pep-0235
* PEP 275: Import Modules from Zip Archives
http://www.python.org/dev/peps/pep-0273
* PEP 302: New Import Hooks
http://www.python.org/dev/peps/pep-0302/
* PEP 328: Imports: Multi-line and Absolute/Relative
http://www.python.org/dev/peps/pep-0328
"""
__all__ = ['__import__', 'import_module']
from . import _bootstrap
import os
import re
import tokenize
# Bootstrap help #####################################################
def _case_ok(directory, check):
"""Check if the directory contains something matching 'check'.
No check is done if the file/directory exists or not.
"""
if 'PYTHONCASEOK' in os.environ:
return True
elif check in os.listdir(directory):
return True
return False
def _w_long(x):
"""Convert a 32-bit integer to little-endian.
XXX Temporary until marshal's long functions are exposed.
"""
x = int(x)
int_bytes = []
int_bytes.append(x & 0xFF)
int_bytes.append((x >> 8) & 0xFF)
int_bytes.append((x >> 16) & 0xFF)
int_bytes.append((x >> 24) & 0xFF)
return bytearray(int_bytes)
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
# Required built-in modules.
try:
import posix as _os
except ImportError:
try:
import nt as _os
except ImportError:
try:
import os2 as _os
except ImportError:
raise ImportError('posix, nt, or os2 module required for importlib')
_bootstrap._os = _os
import imp, sys, marshal, errno, _io
_bootstrap.imp = imp
_bootstrap.sys = sys
_bootstrap.marshal = marshal
_bootstrap.errno = errno
_bootstrap._io = _io
import _warnings
_bootstrap._warnings = _warnings
from os import sep
# For os.path.join replacement; pull from Include/osdefs.h:SEP .
_bootstrap.path_sep = sep
_bootstrap._case_ok = _case_ok
marshal._w_long = _w_long
marshal._r_long = _r_long
# Public API #########################################################
from ._bootstrap import __import__
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
| 25.648
| 80
| 0.636619
|
149139c3656900d7a2d576df6b01f25ee9a683ff
| 2,335
|
py
|
Python
|
src/assets/environments/mingw64/lib/gcc/x86_64-w64-mingw32/4.8.1/libstdc++_64-6.dll-gdb.py
|
Sunhr520/ioeditor
|
6790a4384d075d6c61c680e0e30f5cd92b6a5425
|
[
"MIT"
] | null | null | null |
src/assets/environments/mingw64/lib/gcc/x86_64-w64-mingw32/4.8.1/libstdc++_64-6.dll-gdb.py
|
Sunhr520/ioeditor
|
6790a4384d075d6c61c680e0e30f5cd92b6a5425
|
[
"MIT"
] | null | null | null |
src/assets/environments/mingw64/lib/gcc/x86_64-w64-mingw32/4.8.1/libstdc++_64-6.dll-gdb.py
|
Sunhr520/ioeditor
|
6790a4384d075d6c61c680e0e30f5cd92b6a5425
|
[
"MIT"
] | null | null | null |
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/mingw64tdm/share/gcc-4.8.1/python'
libdir = '/mingw64tdm/lib/gcc/x86_64-w64-mingw32/4.8.1'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| 38.278689
| 72
| 0.719058
|
d06853e47b4bfaf2f6b7da5ab40ff5fff23fc6a8
| 858
|
py
|
Python
|
projects/DA-project-3/problem-1.py
|
metanitesh/Data-Structure-Algorithm-Python
|
48c4e74909700a3b4438f79050c7f974cd95b9a2
|
[
"MIT"
] | null | null | null |
projects/DA-project-3/problem-1.py
|
metanitesh/Data-Structure-Algorithm-Python
|
48c4e74909700a3b4438f79050c7f974cd95b9a2
|
[
"MIT"
] | null | null | null |
projects/DA-project-3/problem-1.py
|
metanitesh/Data-Structure-Algorithm-Python
|
48c4e74909700a3b4438f79050c7f974cd95b9a2
|
[
"MIT"
] | null | null | null |
def findRoot(starting, end, number):
mid = (end+ starting) //2;
if mid*mid == number:
return mid;
if (mid*mid < number) and (mid+1)*(mid+1)>number:
return mid;
if(mid*mid > number):
return findRoot(0, mid, number)
else:
return findRoot(mid, end, number)
def sqrt(number):
if(number < 0):
return 'negative numbers can not have square roots'
if(number is 0 or number is 1):
return number;
return findRoot(0, number, number)
print("Test-1----------------")
print(sqrt(16))
# 4
print(sqrt(24))
# 4
print("Test-2----------------")
print(sqrt(0))
# 0
print(sqrt(1))
# 1
print("Test-3----------------")
print(sqrt(-100))
# negative numbers can not have square roots
print("Test-4----------------")
print(sqrt(20000000000000))
# 4472135
import math
print(math.sqrt(20000000000000))
# 4472135.954999579
| 18.652174
| 57
| 0.601399
|
897ec9593bd7096aff055d2bb4cdb3e3b353b674
| 3,150
|
py
|
Python
|
lib/src/align/optimization/optimize_parameters.py
|
thormeier-fhnw-repos/ip619bb-i4ds02-audio-text-alignment
|
382800891c8de35c541d726da78f108a5a86b210
|
[
"MIT"
] | null | null | null |
lib/src/align/optimization/optimize_parameters.py
|
thormeier-fhnw-repos/ip619bb-i4ds02-audio-text-alignment
|
382800891c8de35c541d726da78f108a5a86b210
|
[
"MIT"
] | null | null | null |
lib/src/align/optimization/optimize_parameters.py
|
thormeier-fhnw-repos/ip619bb-i4ds02-audio-text-alignment
|
382800891c8de35c541d726da78f108a5a86b210
|
[
"MIT"
] | null | null | null |
from bin._bin import bin_print
from lib.src.align.aligner.google.GoogleFilesAligner import GoogleFilesAligner
from lib.src.align.compare.compare_alignments import compare_alignments
from typing import Dict, Any, List
from GPyOpt.methods import BayesianOptimization
import numpy as np
def optimize_parameters(
input_path: str,
output_path: str,
google_files_aligner: GoogleFilesAligner,
alignment_parameters: Dict[str, Any],
convergence_plot_file: str,
verbosity: int
) -> None:
"""
Tries to find the best parameters for google alignment.
:param input_path: Path to load all alignments from
:param output_path: Path to write the alignments to
:param google_files_aligner: GoogleFLiesAligner to re-align every epoch
:param alignment_parameters: Alignment parameters for comparison
:param convergence_plot_file: Where to save the convergence plot
:param verbosity: Verbosity of the output
:return: None
"""
def optimize_function(params: List) -> float:
"""
Function to optimize against
:param params: Parameters given by BOpt
:return: Calculated score
"""
bin_print(verbosity, 1, "Starting new iteration...")
google_files_aligner.alignment_parameters["algorithm"]["match_reward"] = params[0][0]
google_files_aligner.alignment_parameters["algorithm"]["mismatch_penalty"] = params[0][1]
google_files_aligner.alignment_parameters["algorithm"]["gap_penalty"] = params[0][2]
bin_print(verbosity, 3, "Configured params: ", google_files_aligner.alignment_parameters)
google_files_aligner.align_files(input_path, output_path, 0)
# Not "training_only", because we're using a further boiled down training set.
result = compare_alignments(input_path, 0, "hand", "google", False, alignment_parameters)
# Configurable, see config.example.yml
score = eval(google_files_aligner.alignment_parameters["optimize_params_formula"], {"__builtins__": None}, {
"deviation": result["scores"]["deviation"]["mean"],
"iou": result["ious"]["mean"],
"f1": result["appearance"]["f1_score"],
"precision": result["appearance"]["precision"],
"recall": result["appearance"]["recall"],
})
bin_print(verbosity, 1, "Parameters: ", params)
bin_print(verbosity, 1, "Achieved score (smaller == better): ", score)
return score
domain = [
{"name": "match_reward", "type": "continuous", "domain": (0, 100)},
{"name": "mismatch_penalty", "type": "continuous", "domain": (-100, 0)},
{"name": "gap_penalty", "type": "continuous", "domain": (-100, 0)},
]
bopt = BayesianOptimization(
f=optimize_function,
domain=domain,
model_type="GP",
acquisition_type="EI",
acquisition_jitter=0.05
)
bopt.run_optimization(max_iter=25)
bopt.plot_convergence(filename=convergence_plot_file)
bin_print(verbosity, 0, "Best values:", bopt.x_opt)
| 37.951807
| 116
| 0.659048
|
a5675d7ea8d11072b039e787f858d722f135619e
| 2,218
|
py
|
Python
|
app/location.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
app/location.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
app/location.py
|
PhotoScout/API
|
24c2040b0a2fcb1ea906c7aa095c9e74d3ca4fa9
|
[
"MIT"
] | null | null | null |
from app import app, db, auth, api
from flask import Flask, request, url_for, jsonify, abort, g
from flask_restful import Resource, reqparse, fields, marshal
from app.models import Photo, Guide
import requests
from app.fields.places import *
class Google_Location():
def __init__(self, place):
self.latitude = place['geometry']['location']['lat']
self.longitude = place['geometry']['location']['lng']
self.name = place['name']
if 'photos' in place:
self.image = self.getImage(place['photos'][0]['photo_reference'])
def getImage(self, ref):
url = "https://maps.googleapis.com/maps/api/place/photo?maxwidth=400&photoreference=" + ref + "&key="+app.config['GOOGLE_KEY']
response = requests.get(url)
return response.url
class Location_API(Resource):
decorators = [auth.login_required]
def get(self):
""" Get the location nearby the guide """
parser = reqparse.RequestParser()
# Get the guide ID
parser.add_argument(
'id',
type=int,
required=True,
help="Missing guide id"
)
args = parser.parse_args()
guide = Guide.query.get(args.id)
# Get the guide location
location = Guide.getFeaturedLocation(guide)
print(location)
if location:
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=" + str(location['latitude'])+"," + str(location['longitude']) + "&radius=2000&keyword=point of interest&key="+app.config['GOOGLE_KEY']
response = requests.get(url)
place_list = []
for res in response.json()['results']:
res_full = Google_Location(res)
place_list.append(res_full)
return marshal(place_list, PLACE_FIELDS), 200
# No location available for this guide
else:
return "No location", 404
api.add_resource(
Location_API,
app.config['BASE_URL']+'/guide/near',
endpoint='location'
)
# https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-33.8670522,151.1957362&radius=500&type=restaurant&keyword=cruise&key=YOUR_API_KEY
| 31.685714
| 224
| 0.629847
|
04242b4ce015a4f80890e4dcd107fdd6acddc4c0
| 11,314
|
py
|
Python
|
tests/test_types.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | 25
|
2021-04-03T20:26:57.000Z
|
2022-02-08T15:02:17.000Z
|
tests/test_types.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | 8
|
2022-01-23T10:55:43.000Z
|
2022-03-01T14:03:01.000Z
|
tests/test_types.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | 3
|
2021-09-16T11:23:25.000Z
|
2022-01-23T10:49:26.000Z
|
# -*- coding: utf-8 -*-
"""
test.test_types
~~~~~~~~~~~~~~~
This module contains the Diameter protocol types unittests.
:copyright: (c) 2020 Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import unittest
import os
import sys
testing_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.dirname(testing_dir)
sys.path.insert(0, base_dir)
from bromelia.avps import *
from bromelia.constants import *
from bromelia.exceptions import *
from bromelia.types import *
class TestOctetStringType(unittest.TestCase):
def test_octet_string_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = OctetStringType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class OctetStringType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class OctetStringType with abstract method __init__")
def test_octet_string_type__valid_input_data_for_subclass__str(self):
avp = ProxyStateAVP("CLOSED")
self.assertEqual(type(avp.data), bytes)
def test_octet_string_type__valid_input_data_for_subclass__bytes(self):
data = bytes.fromhex("00000011")
avp = ClassAVP(data)
self.assertEqual(type(avp.data), bytes)
def test_octet_string_type__valid_input_vendor_id_for_subclass__4_bytes_long(self):
class CustomAVP(DiameterAVP, OctetStringType):
code = bytes.fromhex("00000000")
def __init__(self, data):
DiameterAVP.__init__(self, CustomAVP.code)
DiameterAVP.set_vendor_id_bit(self, True)
OctetStringType.__init__(self, data=data, vendor_id=bytes.fromhex("11110000"))
avp = CustomAVP(data=bytes.fromhex("11"))
self.assertEqual(type(avp.data), bytes)
def test_octet_string_type__invalid_input_data_for_subclass__list(self):
data = bytes.fromhex("00000011")
with self.assertRaises(DataTypeError) as cm:
avp = AcctSessionIdAVP([data, data])
self.assertEqual(cm.exception.args[0], "OctetStringType MUST have data argument of 'bytes' or 'str'")
def test_octet_string_type__invalid_input_data_for_subclass__dict(self):
data = bytes.fromhex("00000011")
with self.assertRaises(DataTypeError) as cm:
avp = AcctSessionIdAVP({"data": data})
self.assertEqual(cm.exception.args[0], "OctetStringType MUST have data argument of 'bytes' or 'str'")
def test_octet_string_type__invalid_input_vendor_id_for_subclass__1_byte_long(self):
class CustomAVP(DiameterAVP, OctetStringType):
code = bytes.fromhex("00000000")
def __init__(self, data):
DiameterAVP.__init__(self, CustomAVP.code)
DiameterAVP.set_vendor_id_bit(self, True)
OctetStringType.__init__(self, data=data, vendor_id=bytes.fromhex("11"))
with self.assertRaises(DataTypeError) as cm:
avp = CustomAVP(data=bytes.fromhex("11"))
self.assertEqual(cm.exception.args[0], "Invalid vendor_id format for OctetStringType. It MUST be 4 bytes long")
def test_octet_string_type__invalid_input_vendor_id_for_subclass__2_bytes_long(self):
class CustomAVP(DiameterAVP, OctetStringType):
code = bytes.fromhex("00000000")
def __init__(self, data):
DiameterAVP.__init__(self, CustomAVP.code)
DiameterAVP.set_vendor_id_bit(self, True)
OctetStringType.__init__(self, data=data, vendor_id=bytes.fromhex("1111"))
with self.assertRaises(DataTypeError) as cm:
avp = CustomAVP(data=bytes.fromhex("11"))
self.assertEqual(cm.exception.args[0], "Invalid vendor_id format for OctetStringType. It MUST be 4 bytes long")
def test_octet_string_type__invalid_input_vendor_id_for_subclass__3_bytes_long(self):
class CustomAVP(DiameterAVP, OctetStringType):
code = bytes.fromhex("00000000")
def __init__(self, data):
DiameterAVP.__init__(self, CustomAVP.code)
DiameterAVP.set_vendor_id_bit(self, True)
OctetStringType.__init__(self, data=data, vendor_id=bytes.fromhex("111100"))
with self.assertRaises(DataTypeError) as cm:
avp = CustomAVP(data=bytes.fromhex("11"))
self.assertEqual(cm.exception.args[0], "Invalid vendor_id format for OctetStringType. It MUST be 4 bytes long")
class TestInteger32Type(unittest.TestCase):
def test_integer32_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = Integer32Type(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Integer32Type with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Integer32Type with abstract method __init__")
def test_integer32_type__valid_input_data_for_subclass__4_bytes_long(self):
class CustomAVP(DiameterAVP, Integer32Type):
code = bytes.fromhex("00000000")
def __init__(self, data):
DiameterAVP.__init__(self, CustomAVP.code)
DiameterAVP.set_vendor_id_bit(self, True)
Integer32Type.__init__(self, data=data, vendor_id=bytes.fromhex("11110000"))
avp = CustomAVP(data=bytes.fromhex("11001100"))
self.assertEqual(type(avp.data), bytes)
class TestUnsigned32Type(unittest.TestCase):
def test_unsigned32_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = Unsigned32Type(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Unsigned32Type with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Unsigned32Type with abstract method __init__")
class TestUnsigned64Type(unittest.TestCase):
def test_unsigned64_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = Unsigned64Type(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Unsigned64Type with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class Unsigned64Type with abstract method __init__")
class TestGroupedType(unittest.TestCase):
def test_grouped_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = GroupedType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class GroupedType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class GroupedType with abstract method __init__")
class TestAddressType(unittest.TestCase):
def test_address_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = AddressType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class AddressType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class AddressType with abstract method __init__")
class TestTimeType(unittest.TestCase):
def test_time_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = TimeType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class TimeType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class TimeType with abstract method __init__")
class TestUTF8StringType(unittest.TestCase):
def test_utf8_string_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = UTF8StringType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class UTF8StringType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class UTF8StringType with abstract method __init__")
class TestDiameterIdentityType(unittest.TestCase):
def test_diameter_identity_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = DiameterIdentityType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class DiameterIdentityType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class DiameterIdentityType with abstract method __init__")
class TestDiameterURIType(unittest.TestCase):
def test_diameter_uri_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = DiameterURIType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class DiameterURIType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class DiameterURIType with abstract method __init__")
class TestEnumeratedType(unittest.TestCase):
def test_enumerated_type__unable_to_instantiate_class(self):
data = bytes.fromhex("00000011")
with self.assertRaises(TypeError) as cm:
_type = EnumeratedType(data)
if sys.version_info[1] <= 8:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class EnumeratedType with abstract methods __init__")
elif sys.version_info[1] == 9:
self.assertEqual(cm.exception.args[0], "Can't instantiate abstract class EnumeratedType with abstract method __init__")
| 42.05948
| 138
| 0.680396
|
f9434ad6812b364772f198c34b0d780c1176903a
| 12,108
|
py
|
Python
|
yum/gui.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | 1
|
2018-02-12T15:48:59.000Z
|
2018-02-12T15:48:59.000Z
|
yum/gui.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | 1
|
2018-02-11T09:36:21.000Z
|
2018-02-11T09:36:21.000Z
|
yum/gui.py
|
HalfDeadPie/Yum4FIT
|
e47ae12f9da037c5c56277ae3e58d8d29b9692d0
|
[
"MIT"
] | null | null | null |
import json
import urllib
import webbrowser
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QListView, QListWidgetItem, QListWidget, QGroupBox, QTextBrowser, \
QCheckBox, QFileDialog, QInputDialog, QLineEdit
from PyQt5.uic.Compiler.qtproxies import QtWidgets
from PyQt5 import QtWidgets, uic, QtGui
from yum import CONSTANTS, Parser, account, state, food, yum, connector
class GUI():
"""
Class representing and provides Yum4FIT services via graphic user interface. To open the GUI
run "yum run" command
"""
def __init__(self, config):
"""
Constructor of the GUI class
:param config: Config file (default=config.cfg)
"""
app = QtWidgets.QApplication([])
window = QtWidgets.QMainWindow()
window.setWindowTitle("Yum4FIT")
with open(CONSTANTS.WINDOW) as f:
uic.loadUi(f, window)
self.app = app
self.w = window
self.config = config
self.profile = None
self.state = None
self.password = None
self.hashtag = None
self.show()
def show(self):
"""
Function to set all the elements and show the main window
:return: running configured GUI application
"""
self.login()
self.loadProfileBox()
self.loadFood()
self.loadRecipe()
self.loadShareButton()
self.loadFriendButton()
self.w.show()
return self.app.exec()
def login(self):
"""
Login to Instagram
"""
username = Parser.getUsername(None, self.config)
password = Parser.getPassword(None, self.config)
account.login(username, password)
self.profile = account.getProfile()
self.password = password
self.hashtag = Parser.get(self.config,'instagram', 'hashtag')
# ------------------------------------------------------------------------------------------------------------------
def loadUsername(self):
"""
Load username to label in profile block
:return:
"""
label = self.w.findChild(QtWidgets.QLabel, 'labUsername')
label.setText(self.profile.username)
label.setAlignment(Qt.AlignCenter)
def loadFullname(self):
"""
Load full name to label in profile block
"""
label = self.w.findChild(QtWidgets.QLabel, 'labFullname')
label.setText(self.profile.full_name)
label.setAlignment(Qt.AlignCenter)
def loadEmail(self):
"""
Load email to label in profile block
"""
label = self.w.findChild(QtWidgets.QLabel,'labEmail')
label.setText(self.profile.email)
label.setAlignment(Qt.AlignCenter)
def loadPicture(self):
"""
Load picture of user to label in profile block
"""
url = self.profile.avatar
data = urllib.request.urlopen(url).read()
image = QtGui.QImage()
image.loadFromData(data)
lbl = self.w.findChild(QtWidgets.QLabel,'labPicture')
lbl.setPixmap(QtGui.QPixmap(image))
lbl.setAlignment(Qt.AlignCenter)
def loadState(self):
"""
Load state of the user and set the level in profile block
:return:
"""
self.state = state.State(0, False)
label = self.w.findChild(QtWidgets.QLabel,'labLevel')
label.setAlignment(Qt.AlignCenter)
label.setText("Level: " + str(self.state.level))
def loadGainButton(self):
"""
Set the gain button
"""
button = self.w.findChild(QtWidgets.QPushButton, 'buttonGain')
button.setText("Gain")
button.clicked.connect(self.gain)
def loadProfileBox(self):
"""
Load profile box
:return:
"""
self.loadUsername()
self.loadFullname()
self.loadEmail()
self.loadPicture()
self.loadState()
self.loadGainButton()
# ------------------------------------------------------------------------------------------------------------------
def loadFood(self):
"""
Load all the user's food from instagram - depending on Yum4FIT hashtag
"""
self.foods = account.getAllFood(self.profile.username, self.password, self.hashtag)
listView = self.w.findChild(QtWidgets.QListWidget, 'listFoods')
listView.setViewMode(QListView.IconMode)
listView.setIconSize(QSize(400, 400))
listView.clear()
for actual in self.foods:
f = actual.split(' ')
url = f[1]
data = urllib.request.urlopen(url).read()
image = QtGui.QImage()
image.loadFromData(data)
map = QtGui.QPixmap(image)
item = QListWidgetItem(f[0])
icon = QtGui.QIcon()
icon.addPixmap(map,QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon)
listView.addItem(item)
# ------------------------------------------------------------------------------------------------------------------
def loadRecipe(self):
"""
Load last food from recipe file
"""
self.lastFood = food.Food(None)
self.lastFood = self.lastFood.load(CONSTANTS.RECIPE_FILE)
self.loadActualName()
self.loadIngredients()
self.loadActualImage()
self.loadGenerateButton()
self.loadGuideButton()
def reloadRecipe(self):
"""
Refresh the last food
"""
self.lastFood = food.Food(None)
self.lastFood = self.lastFood.load(CONSTANTS.RECIPE_FILE)
self.loadActualName()
self.loadIngredients()
self.loadActualImage()
def loadActualName(self):
"""
Load actual name of the food to group box
"""
box = self.w.findChild(QtWidgets.QGroupBox, 'boxActual')
box.setTitle(self.lastFood.name)
def loadIngredients(self):
"""
Load ingredients of the last recipe to the group box of last food
:return:
"""
text = self.w.findChild(QtWidgets.QTextBrowser, 'textIngredients')
text.setText(self.lastFood.ingredients)
def loadActualImage(self):
"""
Load the actual image of last food from recipe file to last food group box
:return:
"""
url = self.lastFood.picture
data = urllib.request.urlopen(url).read()
image = QtGui.QImage()
image.loadFromData(data)
map = QtGui.QPixmap(image)
imageLabel = self.w.findChild(QtWidgets.QLabel, 'actualImage')
imageLabel.setPixmap(map.scaledToWidth(200))
imageLabel.setAlignment(Qt.AlignCenter)
def loadGenerateButton(self):
"""
Set generate button - generates new recipes. User is able to set parameters of searching to config file
and set the checkbox to use theese parameters.
:return:
"""
button = self.w.findChild(QtWidgets.QPushButton, 'buttonGenerate')
button.setText("Generate")
button.clicked.connect(self.recipe)
def loadGuideButton(self):
"""
Set guide step-by-step button. This button opens the browser and redirect to URL of food.
:return:
"""
button = self.w.findChild(QtWidgets.QPushButton, 'buttonGuide')
button.setText("Guide")
button.clicked.connect(self.redirect)
# ------------------------------------------------------------------------------------------------------------------
def loadShareButton(self):
"""
Set buton for sharing food
:return:
"""
button = self.w.findChild(QtWidgets.QPushButton, 'buttonShare')
button.setText("Share")
button.clicked.connect(self.share)
def loadFriendButton(self):
"""
Set button for adding friend. The username writes to line friend's Instagram username and click on the food in
the list. Then click on this button
"""
button = self.w.findChild(QtWidgets.QPushButton, 'buttonFriend')
button.setText("Add friend")
button.clicked.connect(self.addFriend)
# ------------------------------------------------------------------------------------------------------------------
def gain(self):
"""
Support function to gain XP from likes
"""
state = account.gain(self.profile.username, self.password, self.hashtag, None, CONSTANTS.FRIENDS_FILE)
data = { 'likes' : str(state.likes),
'level' : str(state.level),
'xp' : str(state.xp )
}
json_data = json.dumps(data)
url = Parser.get(self.config, 'server', 'url')
url += 'gain'
connector.post(url, json_data, self.password)
self.loadState()
def redirect(self):
"""
Support function to open browser and redirect
"""
new = 2
url = self.lastFood.url
webbrowser.open(url, new=new)
def recipe(self):
"""
Support function to generate recipe
"""
api_id = Parser.get(self.config, 'yummly', 'api-id')
api_key = Parser.get(self.config, 'yummly', 'api-key')
server_url = Parser.get(self.config, 'server', 'url')
yummer = yum.Yum(api_id, api_key, server_url)
checkbox = self.w.findChild(QtWidgets.QCheckBox, 'checkConfig')
file = False
if checkbox.isChecked():
file = True
print("choosing recipe...")
result = yummer.recipe(diet=None, allergy=None,
cuisine=None, exclude_cuisine=None,
ingredient=None, exclude_ingredient=None,
holiday=None, exclude_holiday=None,
phrase=None,
file=file, config=self.config)
if result:
print("reloading...")
self.reloadRecipe()
def share(self):
"""
Support function for sharing photos of the food. Includes opening file dialog and setting caption
:return:
"""
file = self.open()
if file != None:
caption = self.setCaption()
if caption == None:
caption = ""
hashtag = Parser.get(self.config, 'instagram', 'hashtag')
server_url = Parser.get(self.config, 'server', 'url')
account.upload(self.profile.username, self.password, file, caption +" "+ hashtag, server_url)
self.loadFood()
def open(self):
"""
Support function to open file dialog with filter of jpeg files
"""
fileName, _ = QFileDialog.getOpenFileName(caption="Choose image for uploading to Instagram",
directory="",filter="Images (*.jpeg, *.jpg)")
if fileName:
return fileName
def setCaption(self):
"""
Open special input dialog for setting the caption
:return:
"""
text, okPressed = QInputDialog.getText(None,"Enther the captiong of image","Caption:", QLineEdit.Normal, "")
if okPressed and text != '':
return text
def addFriend(self):
"""
Support function to add friend using the item chosen by clicking to listWidget of foods
"""
line = self.w.findChild(QtWidgets.QLineEdit, 'lineFriend')
friend = line.text()
if(friend == None or friend == ""):
return
else:
listView = self.w.findChild(QtWidgets.QListWidget, 'listFoods')
id = listView.currentItem().text()
if id:
data = {"username": friend, "post": id}
json_data = json.dumps(data)
url = Parser.get(self.config,'server','url')
url += 'addfriend'
Parser.updateSection(CONSTANTS.FRIENDS_FILE, friend, id, 'no')
connector.post(url, json_data, self.password)
| 32.991826
| 120
| 0.555913
|
f529ccd3f630381a47bdf178b6d5f04901f084a8
| 24,142
|
py
|
Python
|
core/shell.py
|
happysalada/oil
|
11aa7b4abf93022fceada06abf23922c1c60787a
|
[
"Apache-2.0"
] | null | null | null |
core/shell.py
|
happysalada/oil
|
11aa7b4abf93022fceada06abf23922c1c60787a
|
[
"Apache-2.0"
] | null | null | null |
core/shell.py
|
happysalada/oil
|
11aa7b4abf93022fceada06abf23922c1c60787a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
"""
core/shell.py -- Entry point for the shell interpreter.
"""
from __future__ import print_function
import errno
import time
from _devbuild.gen import arg_types
from _devbuild.gen.option_asdl import option_i, builtin_i
from _devbuild.gen.syntax_asdl import source
from asdl import runtime
from core import alloc
from core import comp_ui
from core import dev
from core import error
from core import executor
from core import completion
from core import main_loop
from core import pyos
from core import process
from core import shell_native
from core import pyutil
from core.pyutil import stderr_line
from core import state
from core import ui
from core import util
from core.pyerror import log
unused = log
from core import vm
from frontend import args
from frontend import flag_def # side effect: flags are defined!
_ = flag_def
from frontend import flag_spec
from frontend import reader
from frontend import py_reader
from frontend import parse_lib
from oil_lang import expr_eval
from oil_lang import builtin_oil
from oil_lang import funcs_builtin
from osh import builtin_assign
from osh import builtin_comp
from osh import builtin_meta
from osh import builtin_misc
from osh import builtin_lib
from osh import builtin_printf
from osh import builtin_process
from osh import builtin_pure
from osh import cmd_eval
from osh import glob_
from osh import history
from osh import prompt
from osh import sh_expr_eval
from osh import split
from osh import word_eval
from mycpp import mylib
from pylib import os_path
import libc
import posix_ as posix
from typing import List, Dict, Optional, Any, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import Proc
def _InitDefaultCompletions(cmd_ev, complete_builtin, comp_lookup):
# type: (cmd_eval.CommandEvaluator, builtin_comp.Complete, completion.Lookup) -> None
# register builtins and words
complete_builtin.Run(shell_native.MakeBuiltinArgv(['-E', '-A', 'command']))
# register path completion
# Add -o filenames? Or should that be automatic?
complete_builtin.Run(shell_native.MakeBuiltinArgv(['-D', '-A', 'file']))
# TODO: Move this into demo/slow-completion.sh
if 1:
# Something for fun, to show off. Also: test that you don't repeatedly hit
# the file system / network / coprocess.
A1 = completion.TestAction(['foo.py', 'foo', 'bar.py'])
A2 = completion.TestAction(['m%d' % i for i in xrange(5)], delay=0.1)
C1 = completion.UserSpec([A1, A2], [], [], lambda candidate: True)
comp_lookup.RegisterName('slowc', {}, C1)
def SourceStartupFile(fd_state, rc_path, lang, parse_ctx, cmd_ev):
# type: (process.FdState, str, str, parse_lib.ParseContext, cmd_eval.CommandEvaluator) -> None
# Right now this is called when the shell is interactive. (Maybe it should
# be called on login_shel too.)
#
# Terms:
# - interactive shell: Roughly speaking, no args or -c, and isatty() is true
# for stdin and stdout.
# - login shell: Started from the top level, e.g. from init or ssh.
#
# We're not going to copy everything bash does because it's too complex, but
# for reference:
# https://www.gnu.org/software/bash/manual/bash.html#Bash-Startup-Files
# Bash also has --login.
try:
f = fd_state.Open(rc_path)
except OSError as e:
# TODO: Could warn about nonexistent explicit --rcfile?
if e.errno != errno.ENOENT:
raise # Goes to top level. Handle this better?
return
arena = parse_ctx.arena
rc_line_reader = reader.FileLineReader(f, arena)
rc_c_parser = parse_ctx.MakeOshParser(rc_line_reader)
with alloc.ctx_Location(arena, source.SourcedFile(rc_path)):
# TODO: handle status, e.g. 2 for ParseError
status = main_loop.Batch(cmd_ev, rc_c_parser, arena)
f.close()
class ShellOptHook(state.OptHook):
def __init__(self, line_input):
# type: (Any) -> None
self.line_input = line_input
def OnChange(self, opt0_array, opt_name, b):
# type: (List[bool], str, bool) -> bool
"""This method is called whenever an option is changed.
Returns success or failure.
"""
if opt_name == 'vi' or opt_name == 'emacs':
# TODO: Replace with a hook? Just like setting LANG= can have a hook.
if self.line_input:
self.line_input.parse_and_bind("set editing-mode " + opt_name);
else:
stderr_line(
"Warning: Can't set option %r because Oil wasn't built with line editing (e.g. GNU readline)", opt_name)
return False
# Invert: they are mutually exclusive!
if opt_name == 'vi':
opt0_array[option_i.emacs] = not b
elif opt_name == 'emacs':
opt0_array[option_i.vi] = not b
return True
def AddProcess(
b, # type: Dict[int, vm._Builtin]
mem, # type: state.Mem
shell_ex, # type: vm._Executor
ext_prog, # type: process.ExternalProgram
fd_state, # type: process.FdState
job_state, # type: process.JobState
waiter, # type: process.Waiter
tracer, # type: dev.Tracer
search_path, # type: state.SearchPath
errfmt # type: ui.ErrorFormatter
):
# type: (...) -> None
# Process
b[builtin_i.exec_] = builtin_process.Exec(mem, ext_prog, fd_state,
search_path, errfmt)
b[builtin_i.wait] = builtin_process.Wait(waiter, job_state, mem, tracer,
errfmt)
b[builtin_i.jobs] = builtin_process.Jobs(job_state)
b[builtin_i.fg] = builtin_process.Fg(job_state, waiter)
b[builtin_i.bg] = builtin_process.Bg(job_state)
b[builtin_i.umask] = builtin_process.Umask()
b[builtin_i.fork] = builtin_process.Fork(shell_ex)
b[builtin_i.forkwait] = builtin_process.ForkWait(shell_ex)
def AddOil(b, mem, cmd_ev, errfmt, procs, arena):
# type: (Dict[int, vm._Builtin], state.Mem, cmd_eval.CommandEvaluator, ui.ErrorFormatter, Dict[str, Proc], alloc.Arena) -> None
b[builtin_i.append] = builtin_oil.Append(mem, errfmt)
b[builtin_i.shvar] = builtin_pure.Shvar(mem, cmd_ev)
b[builtin_i.push_registers] = builtin_pure.PushRegisters(mem, cmd_ev)
b[builtin_i.write] = builtin_oil.Write(mem, errfmt)
b[builtin_i.pp] = builtin_oil.Pp(mem, errfmt, procs, arena)
b[builtin_i.use] = builtin_pure.Use(mem, errfmt)
b[builtin_i.argparse] = builtin_oil.ArgParse(mem, errfmt)
def Main(lang, arg_r, environ, login_shell, loader, line_input):
# type: (str, args.Reader, Dict[str, str], bool, pyutil._ResourceLoader, Any) -> int
"""The full shell lifecycle. Used by bin/osh and bin/oil.
Args:
lang: 'osh' or 'oil'
argv0, arg_r: command line arguments
environ: environment
login_shell: Was - on the front?
loader: to get help, version, grammar, etc.
line_input: optional GNU readline
"""
# Differences between osh and oil:
# - --help? I guess Oil has a SUPERSET of OSH options.
# - oshrc vs oilrc
# - shopt -s oil:all
# - Change the prompt in the interactive shell?
# osh-pure:
# - no oil grammar
# - no expression evaluator
# - no interactive shell, or line_input
# - no process.*
# process.{ExternalProgram,Waiter,FdState,JobState,SignalState} -- we want
# to evaluate config files without any of these
# Modules not translated yet: completion, comp_ui, builtin_comp, process
# - word evaluator
# - shouldn't glob? set -o noglob? or hard failure?
# - ~ shouldn't read from the file system
# - I guess it can just be the HOME=HOME?
# Builtin:
# shellvm -c 'echo hi'
# shellvm <<< 'echo hi'
argv0 = arg_r.Peek()
assert argv0 is not None
arg_r.Next()
assert lang in ('osh', 'oil'), lang
try:
attrs = flag_spec.ParseMore('main', arg_r)
except error.Usage as e:
stderr_line('osh usage error: %s', e.msg)
return 2
flag = arg_types.main(attrs.attrs)
arena = alloc.Arena()
errfmt = ui.ErrorFormatter(arena)
help_builtin = builtin_misc.Help(loader, errfmt)
if flag.help:
help_builtin.Run(shell_native.MakeBuiltinArgv(['%s-usage' % lang]))
return 0
if flag.version:
# OSH version is the only binary in Oil right now, so it's all one version.
pyutil.ShowAppVersion('Oil', loader)
return 0
no_str = None # type: str
debug_stack = [] # type: List[state.DebugFrame]
if arg_r.AtEnd():
dollar0 = argv0
else:
dollar0 = arg_r.Peek() # the script name, or the arg after -c
# Copy quirky bash behavior.
frame0 = state.DebugFrame(dollar0, 'main', no_str, state.LINE_ZERO, 0, 0)
debug_stack.append(frame0)
# Copy quirky bash behavior.
frame1 = state.DebugFrame(no_str, no_str, no_str, runtime.NO_SPID, 0, 0)
debug_stack.append(frame1)
script_name = arg_r.Peek() # type: Optional[str]
arg_r.Next()
mem = state.Mem(dollar0, arg_r.Rest(), arena, debug_stack)
opt_hook = ShellOptHook(line_input)
# Note: only MutableOpts needs mem, so it's not a true circular dep.
parse_opts, exec_opts, mutable_opts = state.MakeOpts(mem, opt_hook)
mem.exec_opts = exec_opts # circular dep
mutable_opts.Init()
version_str = pyutil.GetVersion(loader)
state.InitMem(mem, environ, version_str)
funcs_builtin.Init(mem)
procs = {} # type: Dict[str, Proc]
if attrs.show_options: # special case: sh -o
mutable_opts.ShowOptions([])
return 0
# Set these BEFORE processing flags, so they can be overridden.
if lang == 'oil':
mutable_opts.SetShoptOption('oil:all', True)
builtin_pure.SetShellOpts(mutable_opts, attrs.opt_changes, attrs.shopt_changes)
# feedback between runtime and parser
aliases = {} # type: Dict[str, str]
oil_grammar = pyutil.LoadOilGrammar(loader)
if flag.one_pass_parse and not exec_opts.noexec():
raise error.Usage('--one-pass-parse requires noexec (-n)')
parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar)
parse_ctx.Init_OnePassParse(flag.one_pass_parse)
# Three ParseContext instances SHARE aliases.
comp_arena = alloc.Arena()
comp_arena.PushSource(source.Unused('completion'))
trail1 = parse_lib.Trail()
# one_pass_parse needs to be turned on to complete inside backticks. TODO:
# fix the issue where ` gets erased because it's not part of
# set_completer_delims().
comp_ctx = parse_lib.ParseContext(comp_arena, parse_opts, aliases,
oil_grammar)
comp_ctx.Init_Trail(trail1)
comp_ctx.Init_OnePassParse(True)
hist_arena = alloc.Arena()
hist_arena.PushSource(source.Unused('history'))
trail2 = parse_lib.Trail()
hist_ctx = parse_lib.ParseContext(hist_arena, parse_opts, aliases,
oil_grammar)
hist_ctx.Init_Trail(trail2)
# Deps helps manages dependencies. These dependencies are circular:
# - cmd_ev and word_ev, arith_ev -- for command sub, arith sub
# - arith_ev and word_ev -- for $(( ${a} )) and $x$(( 1 ))
# - cmd_ev and builtins (which execute code, like eval)
# - prompt_ev needs word_ev for $PS1, which needs prompt_ev for @P
cmd_deps = cmd_eval.Deps()
cmd_deps.mutable_opts = mutable_opts
# TODO: In general, cmd_deps are shared between the mutually recursive
# evaluators. Some of the four below are only shared between a builtin and
# the CommandEvaluator, so we could put them somewhere else.
cmd_deps.traps = {}
cmd_deps.trap_nodes = [] # TODO: Clear on fork() to avoid duplicates
job_state = process.JobState()
fd_state = process.FdState(errfmt, job_state, mem, None, None)
my_pid = posix.getpid()
debug_path = ''
debug_dir = environ.get('OSH_DEBUG_DIR')
if flag.debug_file is not None:
# --debug-file takes precedence over OSH_DEBUG_DIR
debug_path = flag.debug_file
elif debug_dir is not None:
debug_path = os_path.join(debug_dir, '%d-osh.log' % my_pid)
if len(debug_path):
# This will be created as an empty file if it doesn't exist, or it could be
# a pipe.
try:
debug_f = util.DebugFile(fd_state.OpenForWrite(debug_path)) # type: util._DebugFile
except OSError as e:
stderr_line("osh: Couldn't open %r: %s", debug_path,
posix.strerror(e.errno))
return 2
else:
debug_f = util.NullDebugFile()
if flag.xtrace_to_debug_file:
trace_f = debug_f
else:
trace_f = util.DebugFile(mylib.Stderr())
tracer = dev.Tracer(parse_ctx, exec_opts, mutable_opts, mem, trace_f)
fd_state.tracer = tracer # circular dep
sig_state = pyos.SignalState()
sig_state.InitShell()
waiter = process.Waiter(job_state, exec_opts, sig_state, tracer)
fd_state.waiter = waiter
cmd_deps.debug_f = debug_f
# Not using datetime for dependency reasons. TODO: maybe show the date at
# the beginning of the log, and then only show time afterward? To save
# space, and make space for microseconds. (datetime supports microseconds
# but time.strftime doesn't).
if mylib.PYTHON:
iso_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
debug_f.log('%s [%d] OSH started with argv %s', iso_stamp, my_pid, arg_r.argv)
if len(debug_path):
debug_f.log('Writing logs to %r', debug_path)
interp = environ.get('OSH_HIJACK_SHEBANG', '')
search_path = state.SearchPath(mem)
ext_prog = process.ExternalProgram(interp, fd_state, errfmt, debug_f)
splitter = split.SplitContext(mem)
# split() builtin
# TODO: Accept IFS as a named arg? split('a b', IFS=' ')
funcs_builtin.SetGlobalFunc(
mem, 'split', lambda s, ifs=None: splitter.SplitForWordEval(s, ifs=ifs))
# glob() builtin
# TODO: This is instantiation is duplicated in osh/word_eval.py
globber = glob_.Globber(exec_opts)
funcs_builtin.SetGlobalFunc(
mem, 'glob', lambda s: globber.OilFuncCall(s))
# This could just be OSH_DEBUG_STREAMS='debug crash' ? That might be
# stuffing too much into one, since a .json crash dump isn't a stream.
crash_dump_dir = environ.get('OSH_CRASH_DUMP_DIR', '')
cmd_deps.dumper = dev.CrashDumper(crash_dump_dir)
comp_lookup = completion.Lookup()
# Various Global State objects to work around readline interfaces
compopt_state = completion.OptionState()
comp_ui_state = comp_ui.State()
prompt_state = comp_ui.PromptState()
dir_stack = state.DirStack()
#
# Initialize builtins that don't depend on evaluators
#
builtins = {} # type: Dict[int, vm._Builtin]
modules = {} # type: Dict[str, bool]
shell_ex = executor.ShellExecutor(
mem, exec_opts, mutable_opts, procs, builtins, search_path,
ext_prog, waiter, tracer, job_state, fd_state, errfmt)
shell_native.AddPure(builtins, mem, procs, modules, mutable_opts, aliases,
search_path, errfmt)
shell_native.AddIO(builtins, mem, dir_stack, exec_opts, splitter, parse_ctx,
errfmt)
AddProcess(builtins, mem, shell_ex, ext_prog, fd_state, job_state, waiter,
tracer, search_path, errfmt)
builtins[builtin_i.help] = help_builtin
# Interactive, depend on line_input
builtins[builtin_i.bind] = builtin_lib.Bind(line_input, errfmt)
builtins[builtin_i.history] = builtin_lib.History(line_input, mylib.Stdout())
#
# Initialize Evaluators
#
arith_ev = sh_expr_eval.ArithEvaluator(mem, exec_opts, parse_ctx, errfmt)
bool_ev = sh_expr_eval.BoolEvaluator(mem, exec_opts, parse_ctx, errfmt)
expr_ev = expr_eval.OilEvaluator(mem, procs, splitter, errfmt)
word_ev = word_eval.NormalWordEvaluator(mem, exec_opts, mutable_opts,
splitter, errfmt)
assign_b = shell_native.InitAssignmentBuiltins(mem, procs, errfmt)
cmd_ev = cmd_eval.CommandEvaluator(mem, exec_opts, errfmt, procs,
assign_b, arena, cmd_deps)
AddOil(builtins, mem, cmd_ev, errfmt, procs, arena)
# PromptEvaluator rendering is needed in non-interactive shells for @P.
prompt_ev = prompt.Evaluator(lang, parse_ctx, mem)
# Wire up circular dependencies.
vm.InitCircularDeps(arith_ev, bool_ev, expr_ev, word_ev, cmd_ev, shell_ex,
prompt_ev, tracer)
#
# Initialize builtins that depend on evaluators
#
# note: 'printf -v a[i]' and 'unset a[i]' require same deps
builtins[builtin_i.printf] = builtin_printf.Printf(mem, exec_opts, parse_ctx,
arith_ev, errfmt)
builtins[builtin_i.unset] = builtin_assign.Unset(mem, exec_opts, procs,
parse_ctx, arith_ev, errfmt)
builtins[builtin_i.eval] = builtin_meta.Eval(parse_ctx, exec_opts, cmd_ev,
tracer)
source_builtin = builtin_meta.Source(parse_ctx, search_path, cmd_ev,
fd_state, tracer, errfmt)
builtins[builtin_i.source] = source_builtin
builtins[builtin_i.dot] = source_builtin
shell_native.AddMeta(builtins, shell_ex, mutable_opts, mem, procs, aliases,
search_path, errfmt)
shell_native.AddBlock(builtins, mem, mutable_opts, dir_stack, cmd_ev,
errfmt)
# Another block builtin
builtins[builtin_i.json] = builtin_oil.Json(mem, cmd_ev, errfmt)
spec_builder = builtin_comp.SpecBuilder(cmd_ev, parse_ctx, word_ev, splitter,
comp_lookup)
complete_builtin = builtin_comp.Complete(spec_builder, comp_lookup)
builtins[builtin_i.complete] = complete_builtin
builtins[builtin_i.compgen] = builtin_comp.CompGen(spec_builder)
builtins[builtin_i.compopt] = builtin_comp.CompOpt(compopt_state, errfmt)
builtins[builtin_i.compadjust] = builtin_comp.CompAdjust(mem)
builtins[builtin_i.trap] = builtin_process.Trap(sig_state, cmd_deps.traps,
cmd_deps.trap_nodes,
parse_ctx, tracer, errfmt)
# History evaluation is a no-op if line_input is None.
hist_ev = history.Evaluator(line_input, hist_ctx, debug_f)
if flag.c is not None:
arena.PushSource(source.CFlag())
line_reader = reader.StringLineReader(flag.c, arena) # type: reader._Reader
if flag.i: # -c and -i can be combined
mutable_opts.set_interactive()
elif flag.i: # force interactive
arena.PushSource(source.Stdin(' -i'))
line_reader = py_reader.InteractiveLineReader(
arena, prompt_ev, hist_ev, line_input, prompt_state)
mutable_opts.set_interactive()
else:
if script_name is None:
if flag.headless:
arena.PushSource(source.Headless())
line_reader = None # unused!
# Not setting '-i' flag for now. Some people's bashrc may want it?
else:
stdin = mylib.Stdin()
if stdin.isatty():
arena.PushSource(source.Interactive())
line_reader = py_reader.InteractiveLineReader(
arena, prompt_ev, hist_ev, line_input, prompt_state)
mutable_opts.set_interactive()
else:
arena.PushSource(source.Stdin(''))
line_reader = reader.FileLineReader(stdin, arena)
else:
arena.PushSource(source.MainFile(script_name))
try:
f = fd_state.Open(script_name)
except OSError as e:
stderr_line("osh: Couldn't open %r: %s", script_name,
posix.strerror(e.errno))
return 1
line_reader = reader.FileLineReader(f, arena)
# TODO: assert arena.NumSourcePaths() == 1
# TODO: .rc file needs its own arena.
c_parser = parse_ctx.MakeOshParser(line_reader)
# Calculate ~/.config/oil/oshrc or oilrc. Used for both -i and --headless
# We avoid cluttering the user's home directory. Some users may want to ln
# -s ~/.config/oil/oshrc ~/oshrc or ~/.oshrc.
# https://unix.stackexchange.com/questions/24347/why-do-some-applications-use-config-appname-for-their-config-data-while-other
home_dir = pyos.GetMyHomeDir()
assert home_dir is not None
rc_path = flag.rcfile or os_path.join(home_dir, '.config/oil/%src' % lang)
if flag.headless:
mutable_opts.set_redefine_proc()
mutable_opts.set_redefine_module()
# This is like an interactive shell, so we copy some initialization from
# below. Note: this may need to be tweaked.
_InitDefaultCompletions(cmd_ev, complete_builtin, comp_lookup)
# NOTE: called AFTER _InitDefaultCompletions.
try:
SourceStartupFile(fd_state, rc_path, lang, parse_ctx, cmd_ev)
except util.UserExit as e:
return e.status
loop = main_loop.Headless(cmd_ev, parse_ctx, errfmt)
try:
# TODO: What other exceptions happen here?
status = loop.Loop()
except util.UserExit as e:
status = e.status
# Same logic as interactive shell
box = [status]
cmd_ev.MaybeRunExitTrap(box)
status = box[0]
return status
if exec_opts.interactive():
# bash: 'set -o emacs' is the default only in the interactive shell
mutable_opts.set_emacs()
mutable_opts.set_redefine_proc()
mutable_opts.set_redefine_module()
if line_input:
# NOTE: We're using a different WordEvaluator here.
ev = word_eval.CompletionWordEvaluator(mem, exec_opts, mutable_opts,
splitter, errfmt)
ev.arith_ev = arith_ev
ev.expr_ev = expr_ev
ev.prompt_ev = prompt_ev
ev.CheckCircularDeps()
root_comp = completion.RootCompleter(ev, mem, comp_lookup, compopt_state,
comp_ui_state, comp_ctx, debug_f)
term_width = 0
if flag.completion_display == 'nice':
try:
term_width = libc.get_terminal_width()
except IOError: # stdin not a terminal
pass
if term_width != 0:
display = comp_ui.NiceDisplay(term_width, comp_ui_state, prompt_state,
debug_f, line_input) # type: comp_ui._IDisplay
else:
display = comp_ui.MinimalDisplay(comp_ui_state, prompt_state, debug_f)
history_filename = os_path.join(home_dir, '.config/oil/history_%s' % lang)
comp_ui.InitReadline(line_input, history_filename, root_comp, display,
debug_f)
_InitDefaultCompletions(cmd_ev, complete_builtin, comp_lookup)
else: # Without readline module
display = comp_ui.MinimalDisplay(comp_ui_state, prompt_state, debug_f)
sig_state.InitInteractiveShell(display)
# NOTE: called AFTER _InitDefaultCompletions.
try:
SourceStartupFile(fd_state, rc_path, lang, parse_ctx, cmd_ev)
except util.UserExit as e:
return e.status
line_reader.Reset() # After sourcing startup file, render $PS1
prompt_plugin = prompt.UserPlugin(mem, parse_ctx, cmd_ev)
try:
status = main_loop.Interactive(flag, cmd_ev, c_parser, display,
prompt_plugin, errfmt)
except util.UserExit as e:
status = e.status
box = [status]
cmd_ev.MaybeRunExitTrap(box)
status = box[0]
return status
if flag.rcfile: # bash doesn't have this warning, but it's useful
stderr_line('osh warning: --rcfile ignored in non-interactive shell')
if exec_opts.noexec():
status = 0
try:
node = main_loop.ParseWholeFile(c_parser)
except error.Parse as e:
ui.PrettyPrintError(e, arena)
status = 2
if status == 0 :
if flag.parser_mem_dump is not None: # only valid in -n mode
input_path = '/proc/%d/status' % posix.getpid()
pyutil.CopyFile(input_path, flag.parser_mem_dump)
ui.PrintAst(node, flag)
else:
if flag.parser_mem_dump is not None:
raise error.Usage('--parser-mem-dump can only be used with -n')
try:
status = main_loop.Batch(cmd_ev, c_parser, arena,
cmd_flags=cmd_eval.IsMainProgram)
except util.UserExit as e:
status = e.status
box = [status]
cmd_ev.MaybeRunExitTrap(box)
status = box[0]
# NOTE: 'exit 1' is ControlFlow and gets here, but subshell/commandsub
# don't because they call sys.exit().
if flag.runtime_mem_dump is not None:
input_path = '/proc/%d/status' % posix.getpid()
pyutil.CopyFile(input_path, flag.runtime_mem_dump)
# NOTE: We haven't closed the file opened with fd_state.Open
return status
| 34.887283
| 129
| 0.68619
|
cc8b5733e7c2490bd66cd2444e296424fd581101
| 1,813
|
py
|
Python
|
venv/lib/python3.8/site-packages/pyqtgraph/exporters/HDF5Exporter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 23
|
2017-09-04T13:20:38.000Z
|
2022-03-08T08:15:17.000Z
|
venv/lib/python3.8/site-packages/pyqtgraph/exporters/HDF5Exporter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 102
|
2021-01-20T11:14:21.000Z
|
2021-12-12T17:34:42.000Z
|
venv/lib/python3.8/site-packages/pyqtgraph/exporters/HDF5Exporter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 5
|
2017-11-26T19:40:46.000Z
|
2021-03-11T17:25:23.000Z
|
from ..Qt import QtGui, QtCore
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
import numpy
try:
import h5py
HAVE_HDF5 = True
except ImportError:
HAVE_HDF5 = False
__all__ = ['HDF5Exporter']
class HDF5Exporter(Exporter):
Name = "HDF5 Export: plot (x,y)"
windows = []
allowCopy = False
def __init__(self, item):
Exporter.__init__(self, item)
self.params = Parameter(name='params', type='group', children=[
{'name': 'Name', 'type': 'str', 'value': 'Export',},
{'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},
])
def parameters(self):
return self.params
def export(self, fileName=None):
if not HAVE_HDF5:
raise RuntimeError("This exporter requires the h5py package, "
"but it was not importable.")
if not isinstance(self.item, PlotItem):
raise Exception("Must have a PlotItem selected for HDF5 export.")
if fileName is None:
self.fileSaveDialog(filter=["*.h5", "*.hdf", "*.hd5"])
return
dsname = self.params['Name']
fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to "delete/overwrite"
data = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
for i,c in enumerate(self.item.curves):
d = c.getData()
if appendAllX or i == 0:
data.append(d[0])
data.append(d[1])
fdata = numpy.array(data).astype('double')
dset = fd.create_dataset(dsname, data=fdata)
fd.close()
if HAVE_HDF5:
HDF5Exporter.register()
| 30.728814
| 108
| 0.557639
|
e5f270ec96878a308026b83752557eb51b15879e
| 3,873
|
py
|
Python
|
tests/sa/test_sa_default.py
|
ghostebony/aiomysql
|
6df06f3bdd6fb7363bf3aafae8cf7fc4f2a8fbc6
|
[
"MIT"
] | null | null | null |
tests/sa/test_sa_default.py
|
ghostebony/aiomysql
|
6df06f3bdd6fb7363bf3aafae8cf7fc4f2a8fbc6
|
[
"MIT"
] | 11
|
2022-01-17T01:47:38.000Z
|
2022-03-31T01:42:22.000Z
|
tests/sa/test_sa_default.py
|
ghostebony/aiomysql
|
6df06f3bdd6fb7363bf3aafae8cf7fc4f2a8fbc6
|
[
"MIT"
] | null | null | null |
import datetime
import pytest
from sqlalchemy import MetaData, Table, Column, Integer, String
from sqlalchemy import func, DateTime, Boolean
from aiomysql import sa
meta = MetaData()
table = Table('sa_tbl_default_test', meta,
Column('id', Integer, nullable=False, primary_key=True),
Column('string_length', Integer,
default=func.length('qwerty')),
Column('number', Integer, default=100, nullable=False),
Column('description', String(255), nullable=False,
default='default test'),
Column('created_at', DateTime,
default=datetime.datetime.now),
Column('enabled', Boolean, default=True))
@pytest.fixture()
def make_engine(mysql_params, connection):
async def _make_engine(**kwargs):
if "unix_socket" in mysql_params:
conn_args = {"unix_socket": mysql_params["unix_socket"]}
else:
conn_args = {
"host": mysql_params['host'],
"port": mysql_params['port'],
}
return (await sa.create_engine(db=mysql_params['db'],
user=mysql_params['user'],
password=mysql_params['password'],
minsize=10,
**conn_args,
**kwargs))
return _make_engine
async def start(engine):
async with engine.acquire() as conn:
await conn.execute("DROP TABLE IF EXISTS sa_tbl_default_test")
await conn.execute("CREATE TABLE sa_tbl_default_test "
"(id integer,"
" string_length integer, "
"number integer,"
" description VARCHAR(255), "
"created_at DATETIME(6), "
"enabled TINYINT)")
@pytest.mark.run_loop
async def test_default_fields(make_engine):
engine = await make_engine()
await start(engine)
async with engine.acquire() as conn:
await conn.execute(table.insert().values())
res = await conn.execute(table.select())
row = await res.fetchone()
assert row.string_length == 6
assert row.number == 100
assert row.description == 'default test'
assert row.enabled is True
assert type(row.created_at) == datetime.datetime
@pytest.mark.run_loop
async def test_default_fields_isnull(make_engine):
engine = await make_engine()
await start(engine)
async with engine.acquire() as conn:
created_at = None
enabled = False
await conn.execute(table.insert().values(
enabled=enabled,
created_at=created_at,
))
res = await conn.execute(table.select())
row = await res.fetchone()
assert row.number == 100
assert row.string_length == 6
assert row.description == 'default test'
assert row.enabled == enabled
assert row.created_at == created_at
async def test_default_fields_edit(make_engine):
engine = await make_engine()
await start(engine)
async with engine.acquire() as conn:
created_at = datetime.datetime.now()
description = 'new descr'
enabled = False
number = 111
await conn.execute(table.insert().values(
description=description,
enabled=enabled,
created_at=created_at,
number=number,
))
res = await conn.execute(table.select())
row = await res.fetchone()
assert row.number == number
assert row.string_length == 6
assert row.description == description
assert row.enabled == enabled
assert row.created_at == created_at
| 34.274336
| 73
| 0.571133
|
98e731fb2b350b366de1c540835480bba5858d71
| 831
|
py
|
Python
|
GPSInfo/tests/test_example.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | null | null | null |
GPSInfo/tests/test_example.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | 3
|
2021-07-01T19:36:30.000Z
|
2021-08-30T19:59:00.000Z
|
GPSInfo/tests/test_example.py
|
jgraber/Python_Scripts
|
ffdbf17d28521cf5c2a3f7aadfb817e7811f86b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: test_example
.. moduleauthor:: Johnny Graber <JG@JGraber.ch>
This is a sample test module.
"""
import pytest
"""
This is just an example test suite. It will check the current project version
numbers against the original version numbers and will start failing as soon as
the current version numbers change.
"""
def test_import_getVersions_originalVersions():
"""
Arrange: Load the primary module.
Act: Retrieve the versions.
Assert: Versions match the version numbers at the time of project creation.
"""
assert (
# fmt: off
# '0.0.1' == gpsinfo.__version__,
# fmt: on
"This test is expected to fail when the version increments. "
"It is here only as an example and you can remove it."
)
| 25.181818
| 79
| 0.672684
|
d82a1e37cb637c76b0b08ec14fbd60c86127cb25
| 5,778
|
py
|
Python
|
action-baseline/nets/anchor_target_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 24
|
2019-07-10T15:13:27.000Z
|
2021-07-08T12:12:40.000Z
|
action-baseline/nets/anchor_target_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 7
|
2019-10-06T12:22:04.000Z
|
2020-04-15T13:14:10.000Z
|
action-baseline/nets/anchor_target_layer.py
|
leaderj1001/Action-Localization
|
04d972e6dc3c07d347c70893723d91487c1c8cbd
|
[
"MIT"
] | 4
|
2019-10-31T09:01:15.000Z
|
2021-03-26T04:20:21.000Z
|
import os
from utils.config import cfg
import numpy as np
import numpy.random as npr
from nets.bbox_transform import bbox_overlaps, bbox_transform
import torch
def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):
"""Same as the anchor target layer in original Fast/er RCNN """
A = num_anchors
total_anchors = all_anchors.shape[0]
K = total_anchors / num_anchors
# allow boxes to sit over the edge by a small amount
_allowed_border = 0
# 주어진 이미지 안에 맞는 anchor만 찾음
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -_allowed_border) &
(all_anchors[:, 1] >= -_allowed_border) &
(all_anchors[:, 2] < im_info[1] + _allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + _allowed_border) # height
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes overlaps (ex, gt)
overlaps = bbox_overlaps(np.ascontiguousarray(anchors, dtype=np.float), np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
# first set the negatives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
# only the positive ones have regression targets
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) & (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT / np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) / np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
# map of shape (..., H, W)
height, width = rpn_cls_score.shape[1:3]
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
rpn_labels = labels
# bbox_targets
bbox_targets = bbox_targets.reshape((1, height, width, A * 4))
rpn_bbox_targets = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights.reshape((1, height, width, A * 4))
rpn_bbox_inside_weights = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights.reshape((1, height, width, A * 4))
rpn_bbox_outside_weights = bbox_outside_weights
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
# assert gt_rois.shape[1] == 5
return bbox_transform(torch.from_numpy(ex_rois), torch.from_numpy(gt_rois[:, :])).numpy()
| 40.978723
| 123
| 0.690897
|
4c7b06aae28834cf55e0ad57f40cf8c1394f6c17
| 3,091
|
py
|
Python
|
beers/models/drinks.py
|
nvembar/onehundredbeers
|
96af46ad946e63736f9924cb3b966b0d597254f5
|
[
"CC0-1.0"
] | 1
|
2017-07-23T22:44:29.000Z
|
2017-07-23T22:44:29.000Z
|
beers/models/drinks.py
|
nvembar/onehundredbeers
|
96af46ad946e63736f9924cb3b966b0d597254f5
|
[
"CC0-1.0"
] | 25
|
2017-07-23T21:04:17.000Z
|
2021-06-10T18:06:50.000Z
|
beers/models/drinks.py
|
nvembar/onehundredbeers
|
96af46ad946e63736f9924cb3b966b0d597254f5
|
[
"CC0-1.0"
] | 1
|
2019-02-24T01:16:08.000Z
|
2019-02-24T01:16:08.000Z
|
"""The objects that can get a player points"""
import datetime
import logging
from django.db import models
from django.utils import timezone
class BeerManager(models.Manager):
"""Manages beer data"""
def create_beer(self, name, brewery, untappd_url='',
style='', description='', brewery_url='',
brewery_city='', brewery_state=''):
"""Creates a contest with defaults on active status, creation date,
update date, beer count, and user count"""
beer = self.create(name=name, brewery=brewery,
style=style, description=description,
untappd_url=untappd_url,
brewery_city=brewery_city,
brewery_state=brewery_state,
brewery_url=brewery_url,
last_updated=timezone.now())
return beer
# Create your models here.
class Beer(models.Model):
"Represents a common beer - can be shared across contests"
name = models.CharField(max_length=250)
brewery = models.CharField(max_length=250)
style = models.CharField(max_length=250, null=True, blank=True, default='')
description = models.CharField(max_length=250, null=True, blank=True, default='')
brewery_city = models.CharField(max_length=250, null=True, blank=True, default='')
brewery_state = models.CharField(max_length=250, null=True, blank=True, default='')
brewery_country = models.CharField(max_length=250, null=True, blank=True, default='')
brewery_lat = models.FloatField(null=True, blank=True)
brewery_lon = models.FloatField(null=True, blank=True)
untappd_id = models.CharField(max_length=25, null=True, blank=True)
untappd_url = models.URLField(null=True, blank=True)
brewery_url = models.URLField(null=True, blank=True)
last_updated = models.DateTimeField()
objects = BeerManager()
def __str__(self):
return "Beer[{}<{}> / {}<{}>]".format(self.name,
self.untappd_url,
self.brewery, self.brewery_url)
class Brewery_Manager(models.Manager):
def create_brewery(self, name, untappd_url, location=None,):
return self.create(name=name,
untappd_url=untappd_url,
location=location,
last_updated=timezone.now())
class Brewery(models.Model):
name = models.CharField(max_length=250)
untappd_id = models.CharField(max_length=25, null=True, blank=True,)
untappd_url = models.URLField(null=True, blank=True,)
state = models.CharField(max_length=250)
location = models.CharField(max_length=250, null=True, blank=True, default=None)
last_updated = models.DateTimeField()
objects = Brewery_Manager()
def __str__(self):
return "Brewery[name={}, url={}, location={}]".format(self.name,
self.untappd_url,
self.location)
| 43.535211
| 89
| 0.615011
|
7a343a6529e8dd24e9cc9c56ef68eebf2705efb8
| 11,233
|
py
|
Python
|
xarray/backends/rasterio_.py
|
visr/xarray
|
9e8707d2041cfa038c31fc2284c1fe40bc3368e9
|
[
"Apache-2.0"
] | null | null | null |
xarray/backends/rasterio_.py
|
visr/xarray
|
9e8707d2041cfa038c31fc2284c1fe40bc3368e9
|
[
"Apache-2.0"
] | 1
|
2018-12-05T09:21:17.000Z
|
2018-12-05T09:21:17.000Z
|
xarray/backends/rasterio_.py
|
visr/xarray
|
9e8707d2041cfa038c31fc2284c1fe40bc3368e9
|
[
"Apache-2.0"
] | 1
|
2020-05-29T16:17:35.000Z
|
2020-05-29T16:17:35.000Z
|
import os
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy as np
from .. import DataArray
from ..core import indexing
from ..core.utils import is_scalar
from .common import BackendArray
from .file_manager import CachingFileManager
from .locks import SerializableLock
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
_ERROR_MSG = ('The kind of indexing operation you are trying to do is not '
'valid on rasterio files. Try to load your data with ds.load()'
'first.')
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
def __init__(self, manager):
self.manager = manager
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
self._shape = (riods.count, riods.height, riods.width)
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError('All bands should have the same dtype')
self._dtype = np.dtype(dtypes[0])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
def _get_indexer(self, key):
""" Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
assert len(key) == 3, 'rasterio datasets should always be 3D'
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(k, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = k.indices(n)
np_inds.append(slice(None, None, step))
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(- (2 - i))
start = k
stop = k + 1
else:
start, stop = np.min(k), np.max(k) + 1
np_inds.append(k - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(
stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
riods = self.manager.acquire()
out = riods.read(band_key, window=window)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip('{}'), dtype='float', sep=',')
def default(s):
return s.strip('{}')
parse = {'wavelength': parsevec,
'fwhm': parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None,
lock=None):
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
You can generate 2D coordinates from the file's attributes with::
from affine import Affine
da = xr.open_rasterio('path_to_file.tif')
transform = Affine.from_gdal(*da.attrs['transform'])
nx, ny = da.sizes['x'], da.sizes['y']
x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform
Parameters
----------
filename : str
Path to the file to open.
parse_coordinates : bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used to avoid issues with concurrent access to the same file when using
dask's multithreaded backend.
Returns
-------
data : DataArray
The newly created DataArray.
"""
import rasterio
manager = CachingFileManager(rasterio.open, filename, mode='r')
riods = manager.acquire()
if cache is None:
cache = chunks is None
coords = OrderedDict()
# Get bands
if riods.count < 1:
raise ValueError('Unknown dims')
coords['band'] = np.asarray(riods.indexes)
# Get coordinates
if LooseVersion(rasterio.__version__) < '1.0':
transform = riods.affine
else:
transform = riods.transform
if transform.is_rectilinear:
# 1d coordinates
parse = True if parse_coordinates is None else parse_coordinates
if parse:
nx, ny = riods.width, riods.height
# xarray coordinates are pixel centered
x, _ = (np.arange(nx) + 0.5, np.zeros(nx) + 0.5) * transform
_, y = (np.zeros(ny) + 0.5, np.arange(ny) + 0.5) * transform
coords['y'] = y
coords['x'] = x
else:
# 2d coordinates
parse = False if (parse_coordinates is None) else parse_coordinates
if parse:
warnings.warn(
"The file coordinates' transformation isn't "
"rectilinear: xarray won't parse the coordinates "
"in this case. Set `parse_coordinates=False` to "
"suppress this warning.",
RuntimeWarning, stacklevel=3)
# Attributes
attrs = dict()
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
attrs['transform'] = tuple(transform)[:6]
if hasattr(riods, 'crs') and riods.crs:
# CRS is a dict-like object specific to rasterio
# If CRS is not None, we convert it back to a PROJ4 string using
# rasterio itself
attrs['crs'] = riods.crs.to_string()
if hasattr(riods, 'res'):
# (width, height) tuple of pixels in units of CRS
attrs['res'] = riods.res
if hasattr(riods, 'is_tiled'):
# Is the TIF tiled? (bool)
# We cast it to an int for netCDF compatibility
attrs['is_tiled'] = np.uint8(riods.is_tiled)
if hasattr(riods, 'nodatavals'):
# The nodata values for the raster bands
attrs['nodatavals'] = tuple(
np.nan if nodataval is None else nodataval
for nodataval in riods.nodatavals)
# Parse extra metadata from tags, if supported
parsers = {'ENVI': _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for k, v in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if (isinstance(v, (list, np.ndarray)) and
len(v) == riods.count):
coords[k] = ('band', np.asarray(v))
else:
attrs[k] = v
data = indexing.LazilyOuterIndexedArray(RasterioArrayWrapper(manager))
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=('band', 'y', 'x'),
coords=coords, attrs=attrs)
if chunks is not None:
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
token = tokenize(filename, mtime, chunks)
name_prefix = 'open_rasterio-%s' % token
if lock is None:
lock = RASTERIO_LOCK
result = result.chunk(chunks, name_prefix=name_prefix, token=token,
lock=lock)
# Make the file closeable
result._file_obj = manager
return result
| 35.103125
| 109
| 0.611947
|
12444977af2f3f16a18597bc7a7a79535fe7283e
| 4,330
|
py
|
Python
|
iot/sway/Experiments/spl_sway.py
|
moar82/jMetalPy
|
6c5c0fa50a1e16e2b477e321341c12481ab81515
|
[
"MIT"
] | null | null | null |
iot/sway/Experiments/spl_sway.py
|
moar82/jMetalPy
|
6c5c0fa50a1e16e2b477e321341c12481ab81515
|
[
"MIT"
] | null | null | null |
iot/sway/Experiments/spl_sway.py
|
moar82/jMetalPy
|
6c5c0fa50a1e16e2b477e321341c12481ab81515
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Jianfeng Chen <jchen37@ncsu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
from Benchmarks.SPL import DimacsModel
from Algorithms.sway_sampler import sway, cont_dominate
from gmpy2 import popcount, mpz
from functools import partial
from repeats import request_new_file
import random
import time
import copy
import pycosat
import pdb
def count1(decint):
return popcount(mpz(decint))
def split_products(pop, groupC=5):
rand = random.choice(pop)
center = count1(int(rand, 2))
workloads = list()
dists = list()
for p in pop:
wl = count1(int(p, 2))
dist = count1(wl ^ center)
workloads.append(wl)
dists.append(dist)
poptuple = [(p, i, j) for p, i, j in zip(pop, workloads, dists)]
# sort by the workloads
poptuple = sorted(poptuple, key=lambda i: i[1])
n = int(len(poptuple) / groupC)
groups = [poptuple[i * n:i * n + n] for i in range(groupC)]
west, east, westItems, eastItems = list(), list(), list(), list()
for g in groups:
k = sorted(g, key=lambda i: i[2])
# filling the answers
west.append(k[0][0])
east.append(k[-1][0])
westItems.extend(map(lambda i: i[0], k[:len(k) // 2]))
eastItems.extend(map(lambda i: i[0], k[len(k) // 2:]))
return west, east, westItems, eastItems
def comparing(part1, part2):
onewin = 0
twowin = 0
for i, j in zip(part1, part2):
if cont_dominate(i, j) > 0:
onewin += 1
else:
twowin += 1
return onewin >= twowin
def sat_gen_valid_pop(fm, n):
pops = list()
cnf = copy.deepcopy(fm.cnfs)
while len(pops) < n:
for index, sol in enumerate(pycosat.itersolve(cnf)):
new_ind = fm.Individual(''.join(['1' if i > 0 else '0' for i in sol]))
pops.append(new_ind)
if index > 20:
break
for x in cnf:
random.shuffle(x)
random.shuffle(cnf)
random.shuffle(pops)
return pops
def get_sway_res(model):
# load the 10k sat solutions
# with open('./tse_rs/' + model.name + '.txt', 'r') as f:
# candidates = list()
# for l in f:
# can = model.Individual(l.strip('\n'))
# candidates.append(can)
candidates = sat_gen_valid_pop(model, 10000)
res = sway(candidates, model.eval, partial(split_products, groupC=min(15, model.featureNum // 7)), comparing)
return res
if __name__ == '__main__':
# models = ['eshop']
#models = [ 'fiasco', 'freebsd', 'linux']
models = ['webportal']
for repeat in range(1):
for name in models:
print(name)
model = DimacsModel(name)
start_time = time.time()
res = get_sway_res(model)
finish_time = time.time()
# save the results
with open(request_new_file('./tse_rs/sway', name), 'w') as f:
f.write('T:' + str(start_time) + '\n~~~\n')
f.write('T:' + str(finish_time) + '\n')
for i in res:
f.write(' '.join(map(str, i.fitness.values)))
f.write('\n')
| 30.928571
| 113
| 0.621478
|
3d538d76800ef8093488e7f84c640148d36af63f
| 5,015
|
py
|
Python
|
pyvisa/resources/usb.py
|
jpsecher/pyvisa
|
bb8fd9d21b1efa1f311e12402e21292a656a0e6a
|
[
"MIT"
] | null | null | null |
pyvisa/resources/usb.py
|
jpsecher/pyvisa
|
bb8fd9d21b1efa1f311e12402e21292a656a0e6a
|
[
"MIT"
] | null | null | null |
pyvisa/resources/usb.py
|
jpsecher/pyvisa
|
bb8fd9d21b1efa1f311e12402e21292a656a0e6a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""High level wrapper for USB resources.
This file is part of PyVISA.
:copyright: 2014-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .. import attributes, constants
from ..attributes import Attribute
from .messagebased import ControlRenMixin, MessageBasedResource
class USBCommon(MessageBasedResource):
"""Common class for USB resources."""
#: USB interface number used by the given session.
interface_number: Attribute[int] = attributes.AttrVI_ATTR_USB_INTFC_NUM()
#: USB serial number of this device.
serial_number: Attribute[str] = attributes.AttrVI_ATTR_USB_SERIAL_NUM()
#: USB protocol used by this USB interface.
usb_protocol: Attribute[int] = attributes.AttrVI_ATTR_USB_PROTOCOL()
#: Maximum size of data that will be stored by any given USB interrupt.
maximum_interrupt_size: Attribute[int] = attributes.AttrVI_ATTR_USB_MAX_INTR_SIZE()
#: Manufacturer name.
manufacturer_name: Attribute[str] = attributes.AttrVI_ATTR_MANF_NAME()
#: Manufacturer identification number of the device.
manufacturer_id: Attribute[int] = attributes.AttrVI_ATTR_MANF_ID()
#: Model name of the device.
model_name: Attribute[str] = attributes.AttrVI_ATTR_MODEL_NAME()
#: Model code for the device.
model_code: Attribute[int] = attributes.AttrVI_ATTR_MODEL_CODE()
@MessageBasedResource.register(constants.InterfaceType.usb, "INSTR")
class USBInstrument(ControlRenMixin, USBCommon):
"""USB INSTR resources USB::manufacturer ID::model code::serial number
More complex resource names can be specified with the following grammar:
USB[board]::manufacturer ID::model code::serial number[::USB interface number][::INSTR]
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Whether the device is 488.2 compliant.
is_4882_compliant: Attribute[bool] = attributes.AttrVI_ATTR_4882_COMPLIANT()
def control_in(
self,
request_type_bitmap_field: int,
request_id: int,
request_value: int,
index: int,
length: int = 0,
) -> bytes:
"""Performs a USB control pipe transfer from the device.
Parameters
----------
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
length : int
wLength parameter of the setup stage of a USB control transfer.
This value also specifies the size of the data buffer to receive
the data from the optional data stage of the control transfer.
Returns
-------
bytes
The data buffer that receives the data from the optional data stage
of the control transfer.
"""
return self.visalib.usb_control_in(
self.session,
request_type_bitmap_field,
request_id,
request_value,
index,
length,
)[0]
def control_out(
self,
request_type_bitmap_field: int,
request_id: int,
request_value: int,
index: int,
data: bytes = b"",
):
"""Performs a USB control pipe transfer to the device.
Parameters
----------
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
data : str
The data buffer that sends the data in the optional data stage of
the control transfer.
"""
return self.visalib.usb_control_out(
self.session,
request_type_bitmap_field,
request_id,
request_value,
index,
data,
)
@MessageBasedResource.register(constants.InterfaceType.usb, "RAW")
class USBRaw(USBCommon):
"""USB RAW resources: USB::manufacturer ID::model code::serial number::RAW
More complex resource names can be specified with the following grammar:
USB[board]::manufacturer ID::model code::serial number[::USB interface number]::RAW
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
| 34.115646
| 95
| 0.662014
|
c3805ccaacc75e2dc1da23d2fbe3e6e59a1d26aa
| 7,656
|
py
|
Python
|
src/encoders/encoder.py
|
ligerfotis/semantic-code-search_gpt2_tf
|
5eff2ceda0789ee265ad08b742f0d728884e3df8
|
[
"MIT"
] | 1
|
2020-09-12T04:03:12.000Z
|
2020-09-12T04:03:12.000Z
|
src/encoders/encoder.py
|
ligerfotis/semantic-code-search_gpt2_tf
|
5eff2ceda0789ee265ad08b742f0d728884e3df8
|
[
"MIT"
] | 1
|
2022-03-03T17:41:25.000Z
|
2022-03-16T01:11:02.000Z
|
src/encoders/encoder.py
|
ligerfotis/semantic-code-search_gpt2_tf
|
5eff2ceda0789ee265ad08b742f0d728884e3df8
|
[
"MIT"
] | 1
|
2021-12-07T06:55:41.000Z
|
2021-12-07T06:55:41.000Z
|
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Dict, Any, Optional, Tuple
import tensorflow as tf
class QueryType(Enum):
DOCSTRING = 'docstring_as_query'
FUNCTION_NAME = 'func_name_as_query'
class Encoder(ABC):
@classmethod
@abstractmethod
def get_default_hyperparameters(cls) -> Dict[str, Any]:
"""
Returns:
Default set of hyperparameters for encoder.
Note that at use, the hyperparameters names will be prefixed with '${label}_' for the
chosen encoder label.
"""
return {}
def __init__(self, label: str, hyperparameters: Dict[str, Any], metadata: Dict[str, Any]):
"""
Args:
label: Label for the encoder, used in names of hyperparameters.
hyperparameters: Hyperparameters used.
metadata: Dictionary with metadata (e.g., vocabularies) used by this encoder.
"""
self.__label = label
self.__hyperparameters = hyperparameters
self.__metadata = metadata
self.__placeholders = {}
@property
def label(self):
return self.__label
@property
def hyperparameters(self):
return self.__hyperparameters
@property
def metadata(self):
return self.__metadata
@property
def placeholders(self):
return self.__placeholders
@property
@abstractmethod
def output_representation_size(self) -> int:
raise Exception("Encoder.output_representation_size not implemented!")
def get_hyper(self, hyper_name: str) -> Any:
"""
Retrieve hyper parameter, prefixing the given name with the label of the encoder.
Args:
hyper_name: Some hyperparameter name.
Returns:
self.hyperparameters['%s_%s' % (self.label, hyper_name)]
"""
return self.hyperparameters['%s_%s' % (self.label, hyper_name)]
def _make_placeholders(self):
"""
Creates placeholders for encoders.
"""
self.__placeholders['dropout_keep_rate'] = \
tf.compat.v1.placeholder(tf.float32,
shape=(),
name='dropout_keep_rate')
@abstractmethod
def make_model(self, is_train: bool=False) -> tf.Tensor:
"""
Create the actual encoder model, including necessary placeholders and parameters.
Args:
is_train: Bool flag indicating if the model is used for training or inference.
Returns:
A tensor encoding the passed data.
"""
pass
@classmethod
@abstractmethod
def init_metadata(cls) -> Dict[str, Any]:
"""
Called to initialise the metadata before looking at actual data (i.e., set up Counters, lists, sets, ...)
Returns:
A dictionary that will be used to collect the raw metadata (token counts, ...).
"""
return {}
@classmethod
@abstractmethod
def load_metadata_from_sample(cls, data_to_load: Any, raw_metadata: Dict[str, Any],
use_subtokens: bool=False, mark_subtoken_end: bool=False) -> None:
"""
Called to load metadata from a single sample.
Args:
data_to_load: Raw data to load; type depens on encoder. Usually comes from a data parser such as
tokenize_python_from_string or tokenize_docstring_from_string.
raw_metadata: A dictionary that will be used to collect the raw metadata (token counts, ...).
use_subtokens: subtokenize identifiers
mark_subtoken_end: add a special marker for subtoken ends. Used only if use_subtokens=True
"""
pass
@classmethod
@abstractmethod
def finalise_metadata(cls, encoder_label: str, hyperparameters: Dict[str, Any], raw_metadata_list: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Called to finalise the metadata after looking at actual data (i.e., compute vocabularies, ...)
Args:
encoder_label: Label used for this encoder.
hyperparameters: Hyperparameters used.
raw_metadata_list: List of dictionaries used to collect the raw metadata (token counts, ...) (one per file).
Returns:
Finalised metadata (vocabs, ...).
"""
return {}
@classmethod
@abstractmethod
def load_data_from_sample(cls,
encoder_label: str,
hyperparameters: Dict[str, Any],
metadata: Dict[str, Any],
data_to_load: Any,
function_name: Optional[str],
result_holder: Dict[str, Any],
is_test: bool=True) -> bool:
"""
Called to convert a raw sample into the internal format, allowing for preprocessing.
Result will eventually be fed again into the split_data_into_minibatches pipeline.
Args:
encoder_label: Label used for this encoder.
hyperparameters: Hyperparameters used to load data.
metadata: Computed metadata (e.g. vocabularies).
data_to_load: Raw data to load; type depens on encoder. Usually comes from a data parser such as
tokenize_python_from_string or tokenize_docstring_from_string.
function_name: The name of the function.
result_holder: Dictionary used to hold the prepared data.
is_test: Flag marking if we are handling training data or not.
Returns:
Flag indicating if the example should be used (True) or dropped (False)
"""
return True
@abstractmethod
def init_minibatch(self, batch_data: Dict[str, Any]) -> None:
"""
Initialise a minibatch that will be constructed.
Args:
batch_data: The minibatch data.
"""
pass
@abstractmethod
def extend_minibatch_by_sample(self, batch_data: Dict[str, Any], sample: Dict[str, Any], is_train: bool=False,
query_type: QueryType=QueryType.DOCSTRING.value) -> bool:
"""
Extend a minibatch under construction by one sample. This is where the data may be randomly perturbed in each
epoch for data augmentation.
Args:
batch_data: The minibatch data.
sample: The sample to add.
is_train: Flag indicating if we are in train mode (which causes data augmentation)
query_type: Indicates what should be used as the query, the docstring or the function name.
Returns:
True iff the minibatch is full after this sample.
"""
return True
@abstractmethod
def minibatch_to_feed_dict(self, batch_data: Dict[str, Any], feed_dict: Dict[tf.Tensor, Any], is_train: bool) -> None:
"""
Take a collected minibatch and add it to a feed dict that can be fed directly to the constructed model.
Args:
batch_data: The minibatch data.
feed_dict: The feed dictionary that we will send to tensorflow.
is_train: Flag indicating if we are in training mode.
"""
feed_dict[self.placeholders['dropout_keep_rate']] = self.hyperparameters['dropout_keep_rate'] if is_train else 1.0
@abstractmethod
def get_token_embeddings(self) -> Tuple[tf.Tensor, List[str]]:
"""Returns the tensorflow embeddings tensor (VxD) along with a list (of size V) of the names of the
embedded elements."""
pass
| 36.631579
| 143
| 0.617947
|
079672191f91780a7588313a2b0d673b8ffd20d2
| 5,622
|
py
|
Python
|
src/pip/_internal/vcs/mercurial.py
|
jameshfisher/pip
|
8365bc3dcc21809f2fb86c4db5e40aaf2384c897
|
[
"MIT"
] | 1
|
2020-11-29T23:59:03.000Z
|
2020-11-29T23:59:03.000Z
|
src/pip/_internal/vcs/mercurial.py
|
jameshfisher/pip
|
8365bc3dcc21809f2fb86c4db5e40aaf2384c897
|
[
"MIT"
] | null | null | null |
src/pip/_internal/vcs/mercurial.py
|
jameshfisher/pip
|
8365bc3dcc21809f2fb86c4db5e40aaf2384c897
|
[
"MIT"
] | null | null | null |
import configparser
import logging
import os
from typing import TYPE_CHECKING
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs.versioncontrol import (
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if TYPE_CHECKING:
from typing import List, Optional
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import RevOptions
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = (
'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http',
)
@staticmethod
def get_base_rev_args(rev):
# type: (str) -> List[str]
return [rev]
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Hg repository at the url to the destination location"""
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir.path
)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(make_command('clone', '--noupdate', '-q', url, dest))
self.run_command(
make_command('update', '-q', rev_options.to_args()),
cwd=dest,
)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url.secret)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(['pull', '-q'], cwd=dest)
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_remote_url(cls, location):
# type: (str) -> str
url = cls.run_command(
['showconfig', 'paths.default'],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if cls._is_local_repository(url):
url = path_to_url(url)
return url.strip()
@classmethod
def get_revision(cls, location):
# type: (str) -> str
"""
Return the repository-local changeset revision number, as an integer.
"""
current_revision = cls.run_command(
['parents', '--template={rev}'],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
return current_revision
@classmethod
def get_requirement_revision(cls, location):
# type: (str) -> str
"""
Return the changeset identification hash, as a 40-character
hexadecimal string
"""
current_rev_hash = cls.run_command(
['parents', '--template={node}'],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
return current_rev_hash
@classmethod
def is_commit_id_equal(cls, dest, name):
# type: (str, Optional[str]) -> bool
"""Always assume the versions don't match"""
return False
@classmethod
def get_subdirectory(cls, location):
# type: (str) -> Optional[str]
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
repo_root = cls.run_command(
['root'], show_stdout=False, stdout_only=True, cwd=location
).strip()
if not os.path.isabs(repo_root):
repo_root = os.path.abspath(os.path.join(location, repo_root))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def get_repository_root(cls, location):
# type: (str) -> Optional[str]
loc = super().get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
['root'],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode='raise',
log_failed_cmd=False,
)
except BadCommand:
logger.debug("could not determine if %s is under hg control "
"because hg is not available", location)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip('\r\n'))
vcs.register(Mercurial)
| 31.943182
| 78
| 0.588581
|
ba5ece0136a21fe86c92336f4506e2736a0d985f
| 4,572
|
py
|
Python
|
clase_3/mini_desafios.py
|
ivigilante/curso-python-itba
|
57ce47a5202c3d4fe3783fea52f550cfec957331
|
[
"MIT"
] | null | null | null |
clase_3/mini_desafios.py
|
ivigilante/curso-python-itba
|
57ce47a5202c3d4fe3783fea52f550cfec957331
|
[
"MIT"
] | null | null | null |
clase_3/mini_desafios.py
|
ivigilante/curso-python-itba
|
57ce47a5202c3d4fe3783fea52f550cfec957331
|
[
"MIT"
] | null | null | null |
# Leer el archivo Tabla1.xlsx que contiene los puntos de un campeonato.
# El archivo tiene dos columnas, Equipo y Puntos.
# Determinar de cada equipo la diferencia de gol
# (goles a favor - goles en contra), y mostrar todas las diferencias de gol con print
import pandas
def dif_goles():
archivo = pandas.read_excel("Tabla1.xlsx", index_col = "Equipo")
archivo = archivo.to_dict("index")
for equipo, puntos in archivo.items():
print("{}: {}".format(equipo, puntos["Goles a favor"] - puntos["Goles en contra"]))
#dif_goles()
# Leer el archivo Tabla1.xlsx que contiene los puntos de un campeonato y determinar qué equipo es el campeón (1ro) y perdedor (último). El archivo tiene dos columnas, Equipo y Puntos
def campeon():
archivo = pandas.read_excel("Tabla1.xlsx", index_col = "Puntos")
archivo = archivo.to_dict("index")
print(archivo)
print(archivo.keys())
print("Ganador:",archivo[max(archivo.keys())]["Equipo"])
print("Perdedor:",archivo[min(archivo.keys())]["Equipo"])
# campeon()
# Calcular el promedio de las notas de química de todos los alumnos en el archivo Datos.xlsx.
def prom_materia(materia):
archivo = pandas.read_excel("Datos.xlsx")
archivo = archivo.to_dict("list")
promedio = round(sum(archivo[materia]) / len(archivo[materia]),2)
print(promedio)
# prom_materia("Quimica")
# Escribir una funcion que reciba como parámetros: una variable de tipo DataFrame (la tabla de alumnos) y el índice de un alumno. Luego debe devolver con return el promedio de sus notas en las diferentes materias.
def notas_alumno(dataframe, idx): # Usando records y listas
datos = dataframe.to_dict("records")
print("Notas de: {} {}".format(datos[idx]["Nombre"], datos[idx]["Apellido"]))
for materia, nota in list(datos[idx].items())[3::]:
print(materia,": " ,nota)
# Promedio de toda sus materias
prom = round(sum([nota for materia, nota in list(datos[idx].items())[3::]]) / len (list(datos[idx].keys())[3::]),2)
print(prom)
def notas(dataframe, idx): # Usando dataframe
print("Notas de {} {}".format(dataframe.loc[idx]["Nombre"],dataframe.loc[idx]["Apellido"]))
for materia, nota in dataframe.loc[idx].drop(index = ["Nombre", "Apellido", "Legajo"]).items():
print(materia,": ",nota)
notas = [nota for nota in dataframe.loc[idx][3::]]
promedio = round(sum(notas) / len(notas), 2)
print("Promedio total:",promedio)
# archivo = pandas.read_excel("Datos.xlsx")
# notas(archivo, 3)
def prom_general(dataframe, idx):
print("Notas de {} {}".format(dataframe.loc[idx]["Nombre"],dataframe.loc[idx]["Apellido"]))
notas = [nota for nota in dataframe.loc[idx][3::]]
promedio = round(sum(notas) / len(notas), 2)
print("Promedio total:",promedio)
# Obtener el promedio general sólo para aquellos alumnos que aprobaron Matematica.
def prom_si_aprobaron(materia):
datos = pandas.read_excel("Datos.xlsx")
datos = datos[datos[materia] >= 4]
for i in datos.index:
prom_general(datos,i)
# prom_si_aprobaron("Matematica")
# Encontrar la cantidad de ocurrencias de la palabras "Trump" y "the" en el texto de la noticia.
def encontrar_palabras(archivo, palabras_buscadas):
char_no_deseados = ('\ufeff', '—', '\n', '’s', ',', '.')
file = open(archivo,"r")
palabras = []
contenido = file.readlines()
for linea in contenido:
for char in char_no_deseados:
linea = linea.replace(char, "")
palabras += [palabra.upper() for palabra in linea.split()]
for palabra_buscada in palabras_buscadas:
print("Se encontraron {} ocurrencias de la palabra \"{}\"".format(palabras.count(palabra_buscada.upper()), palabra_buscada))
# encontrar_palabras("noticia.txt", ["trump","the"])
# Encontrar la palabra con mayor numero de ocurrencias en el texto de la noticia.
def encontrar_mayor_repeticiones(archivo):
char_no_deseados = ('\ufeff', '—', '\n', '’s', ',', '.')
file = open(archivo,"r")
palabras = []
contenido = file.readlines()
for linea in contenido:
for char in char_no_deseados:
linea = linea.replace(char, "")
palabras += [palabra.upper() for palabra in linea.split()]
palabras_contadas = []
mayor_repeticion = {"palabra":"", "repeticiones":0}
for palabra in palabras:
if palabra not in palabras_contadas:
repeticiones = palabras.count(palabra)
palabras_contadas.append(palabra)
if repeticiones > mayor_repeticion["repeticiones"]:
mayor_repeticion["palabra"] = palabra
mayor_repeticion["repeticiones"] = repeticiones
print("La palabra con mayor repeticiones es \"{}\" con {} repeticiones".format(mayor_repeticion["palabra"],mayor_repeticion["repeticiones"]))
# encontrar_mayor_repeticiones("noticia.txt")
| 43.132075
| 213
| 0.716754
|
95879bb07bc8e4e23ae9b627ef673d90ef8aa25d
| 5,417
|
py
|
Python
|
qiskit_machine_learning/neural_networks/two_layer_qnn.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | 1
|
2021-07-07T21:23:38.000Z
|
2021-07-07T21:23:38.000Z
|
qiskit_machine_learning/neural_networks/two_layer_qnn.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | null | null | null |
qiskit_machine_learning/neural_networks/two_layer_qnn.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | 1
|
2021-04-11T14:30:32.000Z
|
2021-04-11T14:30:32.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A Two Layer Neural Network consisting of a first parametrized circuit representing a feature map
to map the input data to a quantum states and a second one representing a ansatz that can
be trained to solve a particular tasks."""
from typing import Optional, Union
from qiskit import QuantumCircuit
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.opflow import PauliSumOp, StateFn, OperatorBase, ExpectationBase
from qiskit.providers import BaseBackend, Backend
from qiskit.utils import QuantumInstance
from .opflow_qnn import OpflowQNN
from ..exceptions import QiskitMachineLearningError
class TwoLayerQNN(OpflowQNN):
"""Two Layer Quantum Neural Network consisting of a feature map, a ansatz,
and an observable.
"""
def __init__(self, num_qubits: int = None,
feature_map: QuantumCircuit = None,
ansatz: QuantumCircuit = None,
observable: Optional[OperatorBase] = None,
exp_val: Optional[ExpectationBase] = None,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None):
r"""Initializes the Two Layer Quantum Neural Network.
Args:
num_qubits: The number of qubits to represent the network, if None and neither the
feature_map not the ansatz are given, raise exception.
feature_map: The (parametrized) circuit to be used as feature map. If None is given,
the `ZZFeatureMap` is used.
ansatz: The (parametrized) circuit to be used as ansatz. If None is given,
the `RealAmplitudes` circuit is used.
observable: observable to be measured to determine the output of the network. If None
is given, the `Z^{\otimes num_qubits}` observable is used.
Raises:
QiskitMachineLearningError: In case of inconsistent num_qubits, feature_map, ansatz.
"""
# check num_qubits, feature_map, and ansatz
if num_qubits is None and feature_map is None and ansatz is None:
raise QiskitMachineLearningError(
'Need at least one of num_qubits, feature_map, or ansatz!')
num_qubits_: int = None
feature_map_: QuantumCircuit = None
ansatz_: QuantumCircuit = None
if num_qubits:
num_qubits_ = num_qubits
if feature_map:
if feature_map.num_qubits != num_qubits:
raise QiskitMachineLearningError('Incompatible num_qubits and feature_map!')
feature_map_ = feature_map
else:
feature_map_ = ZZFeatureMap(num_qubits)
if ansatz:
if ansatz.num_qubits != num_qubits:
raise QiskitMachineLearningError('Incompatible num_qubits and ansatz!')
ansatz_ = ansatz
else:
ansatz_ = RealAmplitudes(num_qubits)
else:
if feature_map and ansatz:
if feature_map.num_qubits != ansatz.num_qubits:
raise QiskitMachineLearningError('Incompatible feature_map and ansatz!')
feature_map_ = feature_map
ansatz_ = ansatz
num_qubits_ = feature_map.num_qubits
elif feature_map:
num_qubits_ = feature_map.num_qubits
feature_map_ = feature_map
ansatz_ = RealAmplitudes(num_qubits_)
elif ansatz:
num_qubits_ = ansatz.num_qubits
ansatz_ = ansatz
feature_map_ = ZZFeatureMap(num_qubits_)
self._feature_map = feature_map_
input_params = list(self._feature_map.parameters)
self._ansatz = ansatz_
weight_params = list(self._ansatz.parameters)
# construct circuit
self._circuit = QuantumCircuit(num_qubits_)
self._circuit.append(self._feature_map, range(num_qubits_))
self._circuit.append(self._ansatz, range(num_qubits_))
# construct observable
self.observable = observable if observable else PauliSumOp.from_list([('Z'*num_qubits_, 1)])
# combine all to operator
operator = ~StateFn(self.observable) @ StateFn(self._circuit)
super().__init__(operator, input_params, weight_params, quantum_instance=quantum_instance)
@property
def feature_map(self) -> QuantumCircuit:
""" Returns the used feature map."""
return self._feature_map
@property
def ansatz(self) -> QuantumCircuit:
""" Returns the used ansatz."""
return self._ansatz
@property
def circuit(self) -> QuantumCircuit:
""" Returns the underlying quantum circuit."""
return self._circuit
@property
def num_qubits(self) -> int:
""" Returns the number of qubits used by ansatz and feature map."""
return self._circuit.num_qubits
| 41.669231
| 100
| 0.657929
|
7f8dfdc9f5694fe9f05d537c4bf1f600d45a8d4d
| 9,067
|
py
|
Python
|
one_fm/api/mobile/authentication.py
|
mohsinalimat/One-FM
|
ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3
|
[
"MIT"
] | null | null | null |
one_fm/api/mobile/authentication.py
|
mohsinalimat/One-FM
|
ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3
|
[
"MIT"
] | null | null | null |
one_fm/api/mobile/authentication.py
|
mohsinalimat/One-FM
|
ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3
|
[
"MIT"
] | null | null | null |
import frappe
import pyotp
from frappe.twofactor import get_otpsecret_for_, process_2fa_for_sms, confirm_otp_token,get_email_subject_for_2fa,get_email_body_for_2fa
from frappe.integrations.oauth2 import get_token
from frappe.utils.background_jobs import enqueue
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from frappe.frappeclient import FrappeClient
from six import iteritems
from frappe import _
import requests, json
from one_fm.api.mobile.roster import get_current_user_details
from frappe.utils.password import update_password as _update_password
from twilio.rest import Client
@frappe.whitelist(allow_guest=True)
def login(client_id, grant_type, employee_id, password):
"""
Params:
Client Id: Can be found in Social Login Key doctype.
Grant_type: implicit
Employee Id: Employee Id of user
Password: Erpnext Password
Returns:
Access token, refresh token, Enrollment status for checkin, Employee Id, Employee name, Employee image, Employee/Supervisor flag.
"""
try:
site = "https://"+frappe.utils.cstr(frappe.local.site)
# username = frappe.get_value("Employee", employee_id, "user_id")
username = frappe.get_value("Employee", {'employee_id':employee_id}, 'user_id')
if not username:
return {'error': _('Employee ID is incorrect. Please check again.')}
args = {
'client_id': client_id,
'grant_type': grant_type,
'username': username,
'password': password
}
headers = {'Accept': 'application/json'}
session = requests.Session()
# Login
auth_api = site + "/api/method/frappe.integrations.oauth2.get_token"
response = session.post(
auth_api,
data=args, headers=headers
)
if response.status_code == 200:
conn = FrappeClient(site,username=username, password=password)
user, user_roles, user_employee = conn.get_api("one_fm.api.mobile.roster.get_current_user_details")
res = response.json()
res.update(user_employee)
res.update({"roles": user_roles})
if "Operations Manager" in user_roles or "Projects Manager" in user_roles or "Site Supervisor" in user_roles:
res.update({"supervisor": 1})
else:
res.update({"supervisor": 0})
return res
else:
frappe.throw(_(response.text))
except Exception as e:
return frappe.utils.response.report_error(e.http_status_code)
def change_enroll_status(employee):
doc = frappe.get_doc("Employee", employee)
doc.enrolled = 0
doc.save(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist(allow_guest=True)
def forgot_password(employee_id,OTP_source):
"""
Params:
employee_id: employee ID
OTP_source: SMS, Email or WhatsApp
Returns:
Temp Id: To be used in next api call for verifying the SMS/Email OTP.
Sends an OTP to mobile number assosciated with User
"""
try:
# employee_user_id = frappe.get_value("Employee", employee_id, "user_id")
employee_user_id = frappe.get_value("Employee", {'employee_id':employee_id}, 'user_id')
otp_secret = get_otpsecret_for_(employee_user_id)
token = int(pyotp.TOTP(otp_secret).now())
tmp_id = frappe.generate_hash(length=8)
cache_2fa_data(employee_user_id, token, otp_secret, tmp_id)
if OTP_source=="SMS":
verification_obj = process_2fa_for_sms(employee_user_id, token, otp_secret)
return {
'message': _('Password reset instruction sms has been sent to your registered mobile number.'),
'temp_id': tmp_id
}
elif OTP_source=="Email":
verification_obj = process_2fa_for_email(employee_user_id, token, otp_secret)
return {
'message': _('Password reset instruction email has been sent to your Email Address.'),
'temp_id': tmp_id
}
elif OTP_source=="WhatsApp":
verification_obj = process_2fa_for_whatsapp(employee_user_id, token, otp_secret)
return {
'message': _('Password reset instruction message has been sent to your WhatsApp.'),
'temp_id': tmp_id
}
else:
return ('Please Select where you want your OTP to be sent.')
# Save data in local
# frappe.local.response['verification'] = verification_obj
# frappe.local.response['tmp_id'] = tmp_id
except Exception as e:
return frappe.utils.response.report_error(e.http_status_code)
@frappe.whitelist(allow_guest=True)
def update_password(otp, id, employee_id, new_password):
"""
Params:
otp: OTP received via SMS
id: Temp Id returned in forgot_password call response
employee_id
new_password : new password to update
"""
try:
login_manager = frappe.local.login_manager
if confirm_otp_token(login_manager, otp, id):
user_id = frappe.get_value("Employee", {'employee_id':employee_id}, ["user_id"])
_update_password(user_id, new_password)
return {
'message': _('Password Updated!')
}
except Exception as e:
return frappe.utils.response.report_error(e.http_status_code)
def cache_2fa_data(user, token, otp_secret, tmp_id):
'''Cache and set expiry for data.'''
# pwd = frappe.form_dict.get('pwd')
#hardcode the pwd for time being.
pwd = '12345'
# set increased expiry time for SMS and Email
expiry_time = 1800
frappe.cache().set(tmp_id + '_token', token)
frappe.cache().expire(tmp_id + '_token', expiry_time)
for k, v in iteritems({'_usr': user, '_pwd': pwd, '_otp_secret': otp_secret}):
frappe.cache().set("{0}{1}".format(tmp_id, k), v)
frappe.cache().expire("{0}{1}".format(tmp_id, k), expiry_time)
#Not needed or being used
def signup(employee_id):
try:
user = frappe.get_value("Employee", {'employee_id':employee_id}, 'user_id')
if user=="Administrator":
return 'not allowed'
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
reset_password(user)
return {
'message': _('Password reset instruction sms has been sent to your registered mobile number.')
}
except frappe.DoesNotExistError:
frappe.clear_messages()
return frappe.utils.response.report_error(e.http_status_code)
# Not needed or being used
def reset_password(user, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
user.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
msg = """Dear {username},
Please click on the following link to reset your password:
{link}
""".format(username=user.full_name, link=link)
send_sms([user.mobile_no], msg)
def process_2fa_for_whatsapp(user, token, otp_secret):
'''Process sms method for 2fa.'''
phone = frappe.db.get_value('User', user, ['phone', 'mobile_no'], as_dict=1)
phone = phone.mobile_no or phone.phone
status = send_token_via_whatsapp(otp_secret, token=token, phone_no=phone)
verification_obj = {
'token_delivery': status,
'prompt': status and 'Enter verification code sent to {}'.format(phone[:4] + '******' + phone[-3:]),
'method': 'SMS',
'setup': status
}
return verification_obj
def send_token_via_whatsapp(otpsecret, token=None, phone_no=None):
Twilio= frappe.db.get_value('Twilio Setting', filters=None, fieldname=['sid','token','t_number'])
account_sid = Twilio[0]
auth_token = Twilio[2]
client = Client(account_sid, auth_token)
From = 'whatsapp:' + Twilio[1]
to = 'whatsapp:+' + phone_no
hotp = pyotp.HOTP(otpsecret)
body= 'Your verification code {}.'.format(hotp.at(int(token)))
message = client.messages.create(
from_=From,
body=body,
to=to
)
print(message.sid)
return True
def process_2fa_for_email(user, token, otp_secret, method='Email'):
otp_issuer = frappe.db.get_value('System Settings', 'System Settings', 'otp_issuer_name')
'''Process Email method for 2fa.'''
subject = None
message = None
status = True
prompt = ''
'''Sending email verification'''
prompt = _('Verification code has been sent to your registered email address.')
status = send_token_via_email(user, token, otp_secret, otp_issuer, subject=subject, message=message)
verification_obj = {
'token_delivery': status,
'prompt': status and prompt,
'method': 'Email',
'setup': status
}
return verification_obj
def send_token_via_email(user, token, otp_secret, otp_issuer, subject=None, message=None):
'''Send token to user as email.'''
user_email = frappe.db.get_value('Employee', {"user_id":user}, 'personal_email')
if not user_email:
return False
hotp = pyotp.HOTP(otp_secret)
otp = hotp.at(int(token))
template_args = {'otp': otp, 'otp_issuer': otp_issuer}
if not subject:
subject = get_email_subject_for_2fa(template_args)
if not message:
message = get_email_body_for_2fa(template_args)
email_args = {
'recipients': user_email,
'sender': None,
'subject': subject,
'message': message,
'header': [_('Verfication Code'), 'blue'],
'delayed': False,
'retry':3
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, event=None,
is_async=True, job_name=None, now=False, **email_args)
return True
| 32.732852
| 136
| 0.717437
|
95822f9ebf6e0d2d116705a87dcc095aa3b4b992
| 22
|
py
|
Python
|
btd6_memory_info/generated/NinjaKiwi/LiNK/DataModels/data_models.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/NinjaKiwi/LiNK/DataModels/data_models.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/NinjaKiwi/LiNK/DataModels/data_models.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class DataModels: pass
| 22
| 22
| 0.863636
|
62a93f25a7f92ece47e53bc59f627688995310c4
| 482
|
py
|
Python
|
recipes/Python/577699_Linux_cat_command_backward/recipe-577699.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/577699_Linux_cat_command_backward/recipe-577699.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/577699_Linux_cat_command_backward/recipe-577699.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
#!/usr/bin/python
import sys
import os.path
import os
if len(sys.argv) < 2:
print('Usage: %s file_name' % sys.argv[0])
fp = open(sys.argv[1],'r')
#size = os.path.getsize(sys.argv[1])
fp.seek(-1, 2)
try:
data = ''
data = fp.read(1)
while True:
fp.seek(-2, 1)
t = fp.read(1)
if t == '\n':
print data
data = ''
else:
data = t+data
except Exception, e:
print data
sys.exit(0)
print data
| 16.62069
| 46
| 0.508299
|
9bcbae400db34cdde23edc19918025358558a309
| 70,327
|
py
|
Python
|
test/install_and_test_tool_shed_repositories/base/util.py
|
gmauro/galaxy-central
|
52d50d1b6bbd7db821fffff89607b7c483779f3e
|
[
"CC-BY-3.0"
] | null | null | null |
test/install_and_test_tool_shed_repositories/base/util.py
|
gmauro/galaxy-central
|
52d50d1b6bbd7db821fffff89607b7c483779f3e
|
[
"CC-BY-3.0"
] | 1
|
2015-02-21T18:48:19.000Z
|
2015-02-27T15:50:32.000Z
|
test/install_and_test_tool_shed_repositories/base/util.py
|
gmauro/galaxy-central
|
52d50d1b6bbd7db821fffff89607b7c483779f3e
|
[
"CC-BY-3.0"
] | 3
|
2015-02-22T13:34:16.000Z
|
2020-10-01T01:28:04.000Z
|
import os
import sys
cwd = os.getcwd()
sys.path.append( cwd )
new_path = [ os.path.join( cwd, "lib" ),
os.path.join( cwd, 'test' ),
os.path.join( cwd, 'lib', 'tool_shed', 'scripts', 'api' ) ]
new_path.extend( sys.path )
sys.path = new_path
from galaxy import eggs
eggs.require( 'mercurial' )
eggs.require( "nose" )
import json
import logging
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import nose
import platform
import string
import time
import tool_shed.repository_types.util as rt_util
import tool_shed.util.shed_util_common as suc
import urllib
from datetime import datetime
from datetime import timedelta
from common import get_api_url
from common import get_latest_downloadable_changeset_revision_via_api
from common import get_repository_dict
from common import json_from_url
from common import submit
from common import update
from galaxy.util import asbool
from galaxy.util import listify
from galaxy.util import unicodify
import galaxy.webapps.tool_shed.model.mapping
from nose.plugins import Plugin
from tool_shed.util import common_util
from tool_shed.util import hg_util
from tool_shed.util import tool_dependency_util
from tool_shed.util.xml_util import parse_xml
from mercurial import hg
from mercurial import ui
log = logging.getLogger(__name__)
# Set up a job_conf.xml that explicitly limits jobs to 10 minutes.
job_conf_xml = '''<?xml version="1.0"?>
<!-- A test job config that explicitly configures job running the way it is configured by default (if there is no explicit config). -->
<job_conf>
<plugins>
<plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
</plugins>
<handlers>
<handler id="main"/>
</handlers>
<destinations>
<destination id="local" runner="local"/>
</destinations>
<limits>
<limit type="walltime">00:10:00</limit>
</limits>
</job_conf>
'''
# Create a blank shed_tool_conf.xml to define the installed repositories.
shed_tool_conf_xml_template = '''<?xml version="1.0"?>
<toolbox tool_path="${shed_tool_path}">
</toolbox>
'''
# Since we will be running functional tests we'll need the upload tool, but the rest can be omitted.
tool_conf_xml = '''<?xml version="1.0"?>
<toolbox>
<section name="Get Data" id="getext">
<tool file="data_source/upload.xml"/>
</section>
</toolbox>
'''
# Set up an empty shed_tool_data_table_conf.xml.
tool_data_table_conf_xml_template = '''<?xml version="1.0"?>
<tables>
</tables>
'''
# Optionally set the environment variable GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF to the location of a
# tool shed's configuration file that includes the tool shed from which repositories will be installed.
tool_sheds_conf_xml = '''<?xml version="1.0"?>
<tool_sheds>
<tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/>
<tool_shed name="Galaxy test tool shed" url="http://testtoolshed.g2.bx.psu.edu/"/>
</tool_sheds>
'''
# Should this serve static resources (scripts, images, styles, etc.)?
STATIC_ENABLED = True
# If we have a tool_data_table_conf.test.xml, set it up to be loaded when the UniverseApplication is started.
# This allows one to specify a set of tool data that is used exclusively for testing, and not loaded into any
# Galaxy instance. By default, this will be in the test-data-repo/location directory generated by buildbot_setup.sh.
if os.path.exists( 'tool_data_table_conf.test.xml' ):
additional_tool_data_tables = 'tool_data_table_conf.test.xml'
additional_tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_EXTRA_TOOL_DATA_PATH',
os.path.join( 'test-data-repo', 'location' ) )
else:
additional_tool_data_tables = None
additional_tool_data_path = None
tool_data_table_conf = None
# Set up default tool data tables. If a non-sample version is in config/, use that. Otherwise iterate through lower
# priority versions.
for conf in [ 'tool_data_table_conf.test.xml',
'config/tool_data_table_conf.xml',
'config/tool_data_table_conf.xml.sample',
'tool_data_table_conf.xml',
'tool_data_table_conf.xml.sample' ]:
if os.path.exists( conf ):
tool_data_table_conf = conf
break
# The GALAXY_INSTALL_TEST_TOOL_SHED_URL and GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY environment variables must be
# set for this script to work correctly. If the value of GALAXY_INSTALL_TEST_TOOL_SHED_URL does not refer to one
# of the defaults, the GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF must refer to a tool shed configuration file that contains
# a definition for that tool shed.
galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', None )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
galaxy_encode_secret = 'changethisinproductiontoo'
os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ] = galaxy_encode_secret
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
default_galaxy_test_port_min = 10000
default_galaxy_test_port_max = 10999
default_galaxy_test_host = '127.0.0.1'
# The following should be an actual value (not None). If developers manually specify their
# tests to use the API it will not work unless a master API key is specified.
default_galaxy_master_api_key = '123456'
testing_single_repository_dict = {}
if 'repository_name' in os.environ and 'repository_owner' in os.environ:
testing_single_repository_dict[ 'name' ] = str( os.environ[ 'repository_name' ] )
testing_single_repository_dict[ 'owner' ] = str( os.environ[ 'repository_owner' ] )
if 'repository_revision' in os.environ:
testing_single_repository_dict[ 'changeset_revision' ] = str( os.environ[ 'repository_revision' ] )
else:
testing_single_repository_dict[ 'changeset_revision' ] = None
# Test frameworks that use this utility module.
REPOSITORIES_WITH_TOOLS = 'repositories_with_tools'
TOOL_DEPENDENCY_DEFINITIONS = 'tool_dependency_definitions'
class ReportResults( Plugin ):
'''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
name = "reportresults"
passed = {}
def options( self, parser, env=os.environ ):
super( ReportResults, self ).options( parser, env=env )
def configure(self, options, conf):
super( ReportResults, self ).configure( options, conf )
if not self.enabled:
return
def addSuccess( self, test ):
'''Only record test IDs that correspond to tool functional tests.'''
if 'TestForTool' in test.id():
test_id = test.id()
# Rearrange the test ID to match the format that is produced in test_results.failures
test_id_parts = test_id.split( '.' )
fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) )
test_parts = fixed_test_id.split( '/' )
owner = test_parts[ -4 ]
name = test_parts[ -3 ]
test_identifier = '%s/%s' % ( owner, name )
if test_identifier not in self.passed:
self.passed[ test_identifier ] = []
self.passed[ test_identifier ].append( fixed_test_id )
def getTestStatus( self, test_identifier ):
if test_identifier in self.passed:
passed_tests = self.passed[ test_identifier ]
del self.passed[ test_identifier ]
return passed_tests
return []
class RepositoryMetadataApplication( object ):
"""Application that enables updating repository_metadata table records in the Tool Shed."""
def __init__( self, config ):
self.config = config
if self.config.database_connection is False:
self.config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % str( config.database )
log.debug( 'Using database connection: %s' % str( self.config.database_connection ) )
# Setup the database engine and ORM
self.model = galaxy.webapps.tool_shed.model.mapping.init( self.config.file_path,
self.config.database_connection,
engine_options={},
create_tables=False )
self.hgweb_config_manager = self.model.hgweb_config_manager
self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
log.debug( 'Using hgweb.config file: %s' % str( self.hgweb_config_manager.hgweb_config ) )
@property
def sa_session( self ):
"""Returns a SQLAlchemy session."""
return self.model.context.current
def shutdown( self ):
pass
def display_repositories_by_owner( repository_tups ):
"""Group summary display by repository owner."""
repository_tups_by_owner = {}
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
if owner:
if owner in repository_tups_by_owner:
processed_repository_tups_by_owner = repository_tups_by_owner.get( owner, [] )
if repository_tup not in processed_repository_tups_by_owner:
repository_tups_by_owner[ owner ].append( repository_tup )
else:
repository_tups_by_owner[ owner ] = [ repository_tup ]
# Display grouped summary.
for owner, repository_tups in repository_tups_by_owner.items():
print "# "
for repository_tup in repository_tups:
name, owner, changeset_revision = repository_tup
print "# Revision %s of repository %s owned by %s" % ( changeset_revision, name, owner )
def display_tool_dependencies_by_name( tool_dependency_tups ):
"""Group summary display by repository owner."""
tool_dependency_tups_by_name = {}
for tool_dependency_tup in tool_dependency_tups:
name, type, version = tool_dependency_tup
if name:
if name in tool_dependency_tups_by_name:
processed_tool_dependency_tups_by_name = tool_dependency_tups_by_name.get( name, [] )
if tool_dependency_tup not in processed_tool_dependency_tups_by_name:
tool_dependency_tups_by_name[ name ].append( tool_dependency_tup )
else:
tool_dependency_tups_by_name[ name ] = [ tool_dependency_tup ]
# Display grouped summary.
for name, tool_dependency_tups in tool_dependency_tups_by_name.items():
print "# "
for tool_dependency_tup in tool_dependency_tups:
name, type, version = tool_dependency_tup
print "# %s %s version %s" % ( type, name, version )
def get_database_version( app ):
'''
This method returns the value of the version column from the
migrate_version table, using the provided app's SQLAlchemy session to
determine which table to get that from. This way, it's provided with an
instance of a Galaxy UniverseApplication, it will return the Galaxy
instance's database migration version. If a tool shed UniverseApplication
is provided, it returns the tool shed's database migration version.
'''
sa_session = app.model.context.current
result = sa_session.execute( 'SELECT version FROM migrate_version LIMIT 1' )
# This query will return the following structure:
# row = [ column 0, column 1, ..., column n ]
# rows = [ row 0, row 1, ..., row n ]
# The first column in the first row is the version number we want.
for row in result:
version = row[ 0 ]
break
return version
def get_missing_repository_dependencies( repository, all_missing_repository_dependencies=None ):
"""
Return the entire list of missing repository dependencies for the received repository. The entire
dependency tree will be inspected.
"""
if all_missing_repository_dependencies is None:
all_missing_repository_dependencies = []
print 'Checking revision %s of repository %s owned by %s for missing repository dependencies.' % \
( repository.changeset_revision, repository.name, repository.owner )
all_missing_repository_dependencies.extend( repository.missing_repository_dependencies )
for missing_required_repository in repository.missing_repository_dependencies:
print 'Revision %s of required repository %s owned by %s has status %s.' % \
( missing_required_repository.changeset_revision,
missing_required_repository.name,
missing_required_repository.owner,
missing_required_repository.status )
for repository_dependency in repository.repository_dependencies:
if repository_dependency.missing_repository_dependencies:
all_missing_repository_dependencies.extend( get_missing_repository_dependencies( repository_dependency,
all_missing_repository_dependencies ) )
return all_missing_repository_dependencies
def get_missing_tool_dependencies( repository, all_missing_tool_dependencies=None ):
"""
Return the entire list of missing tool dependencies for the received repository. The entire
dependency tree will be inspected.
"""
if all_missing_tool_dependencies is None:
all_missing_tool_dependencies = []
print 'Checking revision %s of repository %s owned by %s for missing tool dependencies.' % \
( repository.changeset_revision, repository.name, repository.owner )
all_missing_tool_dependencies.extend( repository.missing_tool_dependencies )
for missing_tool_dependency in repository.missing_tool_dependencies:
print 'Tool dependency %s version %s has status %s.' % \
( missing_tool_dependency.name, missing_tool_dependency.version, missing_tool_dependency.status )
for repository_dependency in repository.repository_dependencies:
if repository_dependency.missing_tool_dependencies:
all_missing_tool_dependencies.extend( get_missing_tool_dependencies( repository_dependency,
all_missing_tool_dependencies ) )
return all_missing_tool_dependencies
def get_repositories_to_install( tool_shed_url, test_framework ):
"""
Get a list of repository info dicts to install. This method expects a json list of dicts with the following structure:
[{ "changeset_revision": <revision>,
"encoded_repository_id": <encoded repository id from the tool shed>,
"name": <name>,
"owner": <owner>,
"tool_shed_url": <url> }]
"""
error_message = ''
latest_revision_only = '-check_all_revisions' not in sys.argv
if latest_revision_only:
print 'Testing is restricted to the latest downloadable revision in this test run.'
repository_dicts = []
parts = [ 'repository_revisions' ]
# We'll filter out deprecated repositories from testing since testing them is necessary only if reproducibility
# is guaranteed and we currently do not guarantee reproducibility.
if test_framework == REPOSITORIES_WITH_TOOLS:
params = dict( do_not_test='false',
downloadable='true',
includes_tools='true',
malicious='false',
missing_test_components='false',
skip_tool_test='false' )
elif test_framework == TOOL_DEPENDENCY_DEFINITIONS:
params = dict( do_not_test='false',
downloadable='true',
malicious='false',
skip_tool_test='false' )
api_url = get_api_url( base=tool_shed_url, parts=parts, params=params )
baseline_repository_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
for baseline_repository_dict in baseline_repository_dicts:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
repository_dict, error_message = get_repository_dict( galaxy_tool_shed_url, baseline_repository_dict )
if error_message:
print 'Error getting additional details from the API: %s' % error_message
else:
deprecated = asbool( repository_dict.get( 'deprecated', False ) )
# Filter deprecated repositories in the initial query. Repositories included in the query may have
# repository dependencies that are deprecated though.
if not deprecated:
changeset_revision = baseline_repository_dict.get( 'changeset_revision', hg_util.INITIAL_CHANGELOG_HASH )
if changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
# If testing repositories of type tool_dependency_definition, filter accordingly.
if test_framework == TOOL_DEPENDENCY_DEFINITIONS and \
repository_dict.get( 'type', None ) != rt_util.TOOL_DEPENDENCY_DEFINITION:
continue
# Merge the dictionary returned from /api/repository_revisions with the detailed repository_dict and
# append it to the list of repository_dicts to install and test.
if latest_revision_only:
latest_revision = repository_dict.get( 'latest_revision', None )
if changeset_revision == latest_revision:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
else:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
if testing_single_repository_dict:
tsr_name = testing_single_repository_dict.get( 'name', None )
tsr_owner = testing_single_repository_dict.get( 'owner', None )
tsr_changeset_revision = testing_single_repository_dict.get( 'changeset_revision', None )
print 'Testing single repository with name %s and owner %s.' % ( tsr_name, tsr_owner )
for repository_to_install in repository_dicts:
rti_name = repository_to_install.get( 'name', None )
rti_owner = repository_to_install.get( 'owner', None )
rti_changeset_revision = repository_to_install.get( 'changeset_revision', None )
if rti_name == tsr_name and rti_owner == tsr_owner:
if tsr_changeset_revision is None:
return [ repository_to_install ], error_message
else:
if tsr_changeset_revision == rti_changeset_revision:
return repository_dicts, error_message
return repository_dicts, error_message
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL
# environment variable.
print "The Tool Shed's API url...\n%s" % api_url
print "...retrieved %d repository revisions for installation and possible testing." % len( repository_dicts )
print "Repository revisions for testing:"
for repository_dict in repository_dicts:
repository_id = str( repository_dict.get( 'repository_id', None ) )
repository_metadata_id = str( repository_dict.get( 'id', None ) )
name = str( repository_dict.get( 'name', None ) )
owner = str( repository_dict.get( 'owner', None ) )
changeset_revision = str( repository_dict.get( 'changeset_revision', None ) )
print "Revision %s of repository %s owned by %s with repository_id %s, (repository_metadata) id %s" % \
( changeset_revision, name, owner, repository_id, repository_metadata_id )
return repository_dicts, error_message
def get_repository( name, owner, changeset_revision ):
"""Return a repository record associated with the received name, owner, changeset_revision if one exists."""
repository = None
try:
repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
except:
# The repository may not have been installed in a previous test.
pass
return repository
def get_repository_current_revision( repo_path ):
"""This method uses the python mercurial API to get the current working directory's mercurial changeset hash."""
# Initialize a mercurial repo object from the provided path.
repo = hg.repository( ui.ui(), repo_path )
# Get the working directory's change context.
ctx = repo[ None ]
# Extract the changeset hash of the first parent of that change context (the most recent changeset to which the
# working directory was updated).
changectx = ctx.parents()[ 0 ]
# Also get the numeric revision, so we can return the customary id:hash changeset identifiers.
ctx_rev = changectx.rev()
hg_id = '%d:%s' % ( ctx_rev, str( changectx ) )
return hg_id
def get_repository_dependencies_dicts( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the list of dictionaries that define all repository dependencies of the repository_metadata
record associated with the received encoded_repository_metadata_id via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id, 'repository_dependencies' ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_dependencies_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
return repository_dependencies_dicts, error_message
def get_repository_tuple_from_elem( elem ):
attributes = elem.attrib
name = attributes.get( 'name', None )
owner = attributes.get( 'owner', None )
changeset_revision = attributes.get( 'changeset_revision', None )
return ( name, owner, changeset_revision )
def get_static_settings():
"""
Return a dictionary of the settings necessary for a Galaxy application to be wrapped in the static
middleware. This mainly consists of the file system locations of url-mapped static resources.
"""
cwd = os.getcwd()
static_dir = os.path.join( cwd, 'static' )
#TODO: these should be copied from galaxy.ini
#TODO: static_enabled needed here?
return dict( static_enabled = True,
static_cache_time = 360,
static_dir = static_dir,
static_images_dir = os.path.join( static_dir, 'images', '' ),
static_favicon_dir = os.path.join( static_dir, 'favicon.ico' ),
static_scripts_dir = os.path.join( static_dir, 'scripts', '' ),
static_style_dir = os.path.join( static_dir, 'june_2007_style', 'blue' ),
static_robots_txt = os.path.join( static_dir, 'robots.txt' ) )
def get_time_last_tested( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the datetime value stored in the Tool Shed's repository_metadata.time_last_tested column
via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_metadata_dict, error_message = json_from_url( api_url )
if error_message:
return None, error_message
if isinstance( repository_metadata_dict, dict ):
# The tool_test_results used to be stored as a single dictionary rather than a list, but we currently
# return a list.
time_last_tested = repository_metadata_dict.get( 'time_last_tested', None )
return time_last_tested, error_message
else:
error_message = 'The url %s returned the invalid repository_metadata_dict %s' % ( str( api_url ), str( repository_metadata_dict ) )
return None, error_message
def get_tool_test_results_dict( tool_test_results_dicts ):
if tool_test_results_dicts:
# Inspect the tool_test_results_dict for the last test run to make sure it contains only a test_environment
# entry. If it contains more entries, then the script ~/tool_shed/api/check_repositories_for_functional_tests.py
# was not executed in preparation for this script's execution, so we'll just create an empty dictionary.
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
# We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
# a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
# since it will be re-inserted later.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
elif len( tool_test_results_dict ) == 2 and \
'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict:
# We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
# In this case, some tools are missing tests components while others are not.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
# The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
tool_test_results_dict = {}
else:
# Create a new dictionary for this test test run,
tool_test_results_dict = {}
return tool_test_results_dict
def get_tool_test_results_dicts( tool_shed_url, encoded_repository_metadata_id ):
"""
Return the list of dictionaries contained in the Tool Shed's repository_metadata.tool_test_results
column via the Tool Shed API.
"""
error_message = ''
parts = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=parts )
repository_metadata_dict, error_message = json_from_url( api_url )
if error_message:
return None, error_message
if isinstance( repository_metadata_dict, dict ):
# The tool_test_results used to be stored as a single dictionary rather than a list, but we currently
# return a list.
tool_test_results = listify( repository_metadata_dict.get( 'tool_test_results', [] ) )
return tool_test_results, error_message
else:
error_message = 'The url %s returned the invalid repository_metadata_dict %s' % ( str( api_url ), str( repository_metadata_dict ) )
return None, error_message
def get_webapp_global_conf():
"""Return the global_conf dictionary sent as the first argument to app_factory."""
global_conf = {}
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
def initialize_install_and_test_statistics_dict():
# Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = {}
install_and_test_statistics_dict[ 'total_repositories_processed' ] = 0
install_and_test_statistics_dict[ 'successful_repository_installations' ] = []
install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ] = []
install_and_test_statistics_dict[ 'repositories_with_installation_error' ] = []
install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ] = []
install_and_test_statistics_dict[ 'all_tests_passed' ] = []
install_and_test_statistics_dict[ 'at_least_one_test_failed' ] = []
return install_and_test_statistics_dict
def initialize_tool_tests_results_dict( app, tool_test_results_dict ):
test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
if len( test_environment_dict ) == 0:
# Set information about the tool shed to nothing since we cannot currently determine it from here.
# We could eventually add an API method...
test_environment_dict = dict( tool_shed_database_version='',
tool_shed_mercurial_version='',
tool_shed_revision='' )
# Add the current time as the approximate time that this test run occurs. A similar value will also be
# set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
# may be configured to store multiple test run results, so each must be associated with a time stamp.
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
# Add information about the current platform.
test_environment_dict[ 'time_tested' ] = now
test_environment_dict[ 'python_version' ] = platform.python_version()
test_environment_dict[ 'architecture' ] = platform.machine()
operating_system, hostname, operating_system_version, uname, arch, processor = platform.uname()
test_environment_dict[ 'system' ] = '%s %s' % ( operating_system, operating_system_version )
# Add information about the current Galaxy environment.
test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
# Initialize and populate the tool_test_results_dict.
tool_test_results_dict[ 'test_environment' ] = test_environment_dict
tool_test_results_dict[ 'passed_tests' ] = []
tool_test_results_dict[ 'failed_tests' ] = []
tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[],
repository_dependencies=[],
tool_dependencies=[] )
tool_test_results_dict[ 'successful_installations' ] = dict( current_repository=[],
repository_dependencies=[],
tool_dependencies=[] )
return tool_test_results_dict
def install_repository( app, repository_dict ):
"""Install a repository defined by the received repository_dict from the tool shed into Galaxy."""
name = str( repository_dict.get( 'name', None ) )
owner = str( repository_dict.get( 'owner', None ) )
changeset_revision = str( repository_dict.get( 'changeset_revision', None ) )
error_message = ''
repository = None
print "Installing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner )
# Use the repository information dictionary to generate an install method that will install the repository into the
# embedded Galaxy application, with tool dependencies and repository dependencies, if any.
test_install_repositories.generate_install_method( repository_dict )
# Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to
# install the specified repository with tool and repository dependencies also selected for installation.
result, _ = run_tests( test_config )
# Get the repository record now that the tests that install it have completed.
repository = get_repository( name, owner, changeset_revision )
if repository is None:
error_message = 'Error getting revision %s of repository %s owned by %s: An entry for the repository was not found in the database.' % ( changeset_revision, name, owner )
log.error( error_message )
return repository, error_message
def is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ):
"""
Return True if the repository defined by the received name, owner, changeset_revision should
be excluded from testing for any reason.
"""
for exclude_dict in exclude_list_dicts:
reason = exclude_dict.get( 'reason', '' )
exclude_repositories = exclude_dict.get( 'repositories', None )
# 'repositories':
# [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]
if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
print 'Revision %s of repository %s owned by %s is excluded from testing because:\n%s' % \
( changeset_revision, name, owner, reason )
return True, reason
# Skip this repository if it has a repository dependency that is in the exclude list.
repository_dependency_dicts, error_message = \
get_repository_dependencies_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
if error_message:
print 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
( changeset_revision, name, owner )
print error_message
else:
for repository_dependency_dict in repository_dependency_dicts:
rd_name = repository_dependency_dict.get( 'name', '' )
rd_owner = repository_dependency_dict.get( 'owner', '' )
rd_changeset_revision = repository_dependency_dict.get( 'changeset_revision', '' )
if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
( rd_name, rd_owner, None ) in exclude_repositories:
print 'Revision %s of repository %s owned by %s is excluded from testing because ' % \
( changeset_revision, name, owner, reason )
print 'it requires revision %s of repository %s owned by %s (which is excluded from testing).' % \
( rd_changeset_revision, rd_name, rd_owner )
reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
( rd_changeset_revision, rd_name, rd_owner )
return True, reason
break
return False, None
def is_latest_downloadable_revision( url, repository_dict ):
"""
Return True if the changeset_revision defined in the received repository_dict is the latest
installable revision for the repository.
"""
error_message = ''
name = repository_dict.get( 'name', None )
owner = repository_dict.get( 'owner', None )
changeset_revision = repository_dict.get( 'changeset_revision', None )
if name is not None and owner is not None and changeset_revision is not None:
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
latest_revision, error_message = get_latest_downloadable_changeset_revision_via_api( url, name=name, owner=owner )
if latest_revision is None or error_message:
return None, error_message
is_latest_downloadable = changeset_revision == str( latest_revision )
return is_latest_downloadable, error_message
def parse_exclude_list( xml_filename ):
"""Return a list of repositories to exclude from testing."""
# This method expects an xml document that looks something like this:
# <?xml version="1.0"?>
# <blacklist>
# <repositories tool_shed="http://testtoolshed.g2.bx.psu.edu">
# <reason>
# <text>Some reason</text>
# <repository name="some_name" owner="some_owner" />
# </reason>
# </repositories>
# </blacklist>
# A list is returned with the following structure:
# [{ 'reason': The default reason or the reason specified in this section,
# 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]}]
exclude_list = []
exclude_tups = []
xml_tree, error_message = parse_xml( xml_filename )
if error_message:
print 'The exclude file %s is invalid, so no repositories will be excluded from testing: %s' % ( xml_filename, error_message )
return exclude_list
tool_sheds = xml_tree.findall( 'repositories' )
xml_element = []
exclude_count = 0
for tool_shed in tool_sheds:
if galaxy_tool_shed_url != tool_shed.attrib[ 'tool_shed' ]:
continue
else:
xml_element = tool_shed
for reason_section in xml_element:
reason_text = reason_section.find( 'text', None )
if reason_text is not None:
reason = str( reason_text.text )
else:
reason = 'No reason provided.'
repositories = reason_section.findall( 'repository' )
exclude_dict = dict( reason=reason, repositories=[] )
for repository in repositories:
repository_tuple = get_repository_tuple_from_elem( repository )
if repository_tuple not in exclude_dict[ 'repositories' ]:
exclude_tups.append( repository_tuple )
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
if exclude_tups:
print 'The exclude file %s defines the following %d repositories to be excluded from testing:' % ( xml_filename, exclude_count )
for name, owner, changeset_revision in exclude_tups:
if changeset_revision:
print 'Revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
print 'All revisions of repository %s owned by %s.' % ( name, owner )
else:
print 'The exclude file %s defines no repositories to be excluded from testing.' % xml_filename
return exclude_list
def populate_dependency_install_containers( app, repository, repository_identifier_tup, install_and_test_statistics_dict,
tool_test_results_dict ):
"""
Populate the installation containers (successful or errors) for the received repository's (which
itself was successfully installed) immediate repository and tool dependencies. The entire dependency
tree is not handled here.
"""
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
print 'Populating dependency install containers for revision %s of repository %s owned by %s.' % \
( repository_changeset_revision, repository_name, repository_owner )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if repository_identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_tup )
repository_identifier_dict = dict( name=repository_name, owner=repository_owner, changeset_revision=repository_changeset_revision )
tool_test_results_dict[ 'successful_installations' ][ 'current_repository' ].append( repository_identifier_dict )
params = dict( test_install_error=False,
do_not_test=False )
if repository.missing_repository_dependencies:
print 'The following repository dependencies for revision %s of repository %s owned by %s have installation errors:' % \
( repository_changeset_revision, repository_name, repository_owner )
params[ 'test_install_error' ] = True
# Keep statistics for this repository's repository dependencies that resulted in installation errors.
for missing_repository_dependency in repository.missing_repository_dependencies:
tool_shed = str( missing_repository_dependency.tool_shed )
name = str( missing_repository_dependency.name )
owner = str( missing_repository_dependency.owner )
changeset_revision = str( missing_repository_dependency.changeset_revision )
error_message = unicodify( missing_repository_dependency.error_message )
print 'Revision %s of repository %s owned by %s has the following installation error:' % ( changeset_revision, name, owner )
# Use log.debug here instead of print because print will throw UnicodeEncodeError exceptions.
log.debug( '%s' % error_message )
identity_tup = ( name, owner, changeset_revision )
processed_repositories_with_installation_error = \
install_and_test_statistics_dict.get( 'repositories_with_installation_error', [] )
if identity_tup not in processed_repositories_with_installation_error:
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( identity_tup )
missing_repository_dependency_info_dict = dict( tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision,
error_message=error_message )
tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( missing_repository_dependency_info_dict )
if repository.missing_tool_dependencies:
print 'The following tool dependencies for revision %s of repository %s owned by %s have installation errors:' % \
( repository_changeset_revision, repository_name, repository_owner )
params[ 'test_install_error' ] = True
# Keep statistics for this repository's tool dependencies that resulted in installation errors.
for missing_tool_dependency in repository.missing_tool_dependencies:
name = str( missing_tool_dependency.name )
type = str( missing_tool_dependency.type )
version = str( missing_tool_dependency.version )
error_message = unicodify( missing_tool_dependency.error_message )
print 'Version %s of tool dependency %s %s has the following installation error:' % ( version, type, name )
# Use log.debug here instead of print because print will throw UnicodeEncodeError exceptions.
log.debug( '%s' % error_message )
identity_tup = ( name, type, version )
processed_tool_dependencies_with_installation_error = \
install_and_test_statistics_dict.get( 'tool_dependencies_with_installation_error', [] )
if identity_tup not in processed_tool_dependencies_with_installation_error:
install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ].append( identity_tup )
missing_tool_dependency_info_dict = dict( type=type,
name=name,
version=version,
error_message=error_message )
tool_test_results_dict[ 'installation_errors' ][ 'tool_dependencies' ].append( missing_tool_dependency_info_dict )
if repository.installed_repository_dependencies:
print 'The following repository dependencies for revision %s of repository %s owned by %s are installed:' % \
( repository_changeset_revision, repository_name, repository_owner )
# Keep statistics for this repository's tool dependencies that resulted in successful installations.
for repository_dependency in repository.installed_repository_dependencies:
tool_shed = str( repository_dependency.tool_shed )
name = str( repository_dependency.name )
owner = str( repository_dependency.owner )
changeset_revision = str( repository_dependency.changeset_revision )
print 'Revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
identifier_tup = ( name, owner, changeset_revision )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( identifier_tup )
repository_dependency_info_dict = dict( tool_shed=tool_shed,
name=name,
owner=owner,
changeset_revision=changeset_revision )
tool_test_results_dict[ 'successful_installations' ][ 'repository_dependencies' ].append( repository_dependency_info_dict )
if repository.installed_tool_dependencies:
print 'The following tool dependencies for revision %s of repository %s owned by %s are installed:' % \
( repository_changeset_revision, repository_name, repository_owner )
# Keep statistics for this repository's tool dependencies that resulted in successful installations.
for tool_dependency in repository.installed_tool_dependencies:
name = str( tool_dependency.name )
type = str( tool_dependency.type )
version = str( tool_dependency.version )
installation_directory = tool_dependency.installation_directory( app )
print 'Version %s of tool dependency %s %s is installed in: %s' % ( version, type, name, installation_directory )
identity_tup = ( name, type, version )
processed_successful_tool_dependency_installations = \
install_and_test_statistics_dict.get( 'successful_tool_dependency_installations', [] )
if identity_tup not in processed_successful_tool_dependency_installations:
install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ].append( identity_tup )
tool_dependency_info_dict = dict( type=type,
name=name,
version=version,
installation_directory=installation_directory )
tool_test_results_dict[ 'successful_installations' ][ 'tool_dependencies' ].append( tool_dependency_info_dict )
return params, install_and_test_statistics_dict, tool_test_results_dict
def populate_install_containers_for_repository_dependencies( app, repository, repository_metadata_id, install_and_test_statistics_dict,
can_update_tool_shed ):
"""
The handle_repository_dependencies check box is always checked when a repository is installed, so the
tool_test_results dictionary must be inspected for each dependency to make sure installation containers
(success or errors) have been populated. Since multiple repositories can depend on the same repository,
some of the containers may have been populated during a previous installation.
"""
# Get the list of dictionaries that define the received repository's repository dependencies
# via the Tool Shed API.
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
print 'Potentially populating install containers for repository dependencies of revision %s of repository %s owned by %s.' % \
( repository_changeset_revision, repository_name, repository_owner )
repository_dependencies_dicts, error_message = get_repository_dependencies_dicts( galaxy_tool_shed_url, repository_metadata_id )
if error_message:
print 'Cannot check or populate repository dependency install containers for revision %s of repository %s owned by %s ' % \
( repository_changeset_revision, repository_name, repository_owner )
print 'due to the following error getting repository_dependencies_dicts:\n%s' % str( error_message )
else:
if not repository_dependencies_dicts:
print 'Revision %s of repository %s owned by %s has no repository dependencies.' % \
( repository_changeset_revision, repository_name, repository_owner )
for repository_dependencies_dict in repository_dependencies_dicts:
if not isinstance( repository_dependencies_dict, dict ):
print 'Skipping invalid repository_dependencies_dict: %s' % str( repository_dependencies_dict )
continue
name = repository_dependencies_dict.get( 'name', None )
owner = repository_dependencies_dict.get( 'owner', None )
changeset_revision = repository_dependencies_dict.get( 'changeset_revision', None )
if name is None or owner is None or changeset_revision is None:
print 'Skipping invalid repository_dependencies_dict due to missing name, owner or changeset_revision: %s' % \
str( repository_dependencies_dict )
continue
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
print 'Checking installation containers for revision %s of repository dependency %s owned by %s' % \
( changeset_revision, name, owner )
required_repository_metadata_id = repository_dependencies_dict[ 'id' ]
# Get the current list of tool_test_results dictionaries associated with the repository_metadata
# record in the tool shed.
tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url,
required_repository_metadata_id )
if error_message:
print 'Cannot check install container for version %s of repository dependency %s owned by %s ' % \
( changeset_revision, name, owner )
print 'due to the following error getting tool_test_results:\n%s' % str( error_message )
else:
# The assumption is that the Tool Shed's install and test framework is executed no more than once per 24 hour
# period, so check the required repository's time_last_tested value to see if its tool_test_results column
# has been updated within the past 20 hours to allow for differing test run times (some may be slower than
# others). The RepositoryMetadata class's to_dict() method returns the value of time_last_tested in
# datetime.isoformat().
time_last_tested, error_message = get_time_last_tested( galaxy_tool_shed_url, required_repository_metadata_id )
print 'Value of time_last_tested: %s' % str( time_last_tested )
if time_last_tested is None:
print 'The time_last_tested column value is None for version %s of repository dependency %s owned by %s.' % \
( changeset_revision, name, owner )
else:
twenty_hours_ago = ( datetime.utcnow() - timedelta( hours=20 ) ).isoformat()
print 'Value of twenty_hours_ago: %s' % str( twenty_hours_ago )
# This is counter intuitive because the following check is on strings like this: '2014-01-21T19:46:06.953741',
# so if "time_last_tested > twenty_hours_ago" is True, then it implies that the time_last_tested column
# was actually updated less than 20 hours ago, and should not be updated again because we're likely processing
# another dependent repository, many of which can have the same repository dependency.
try:
# Be very conservative here. Our default behavior will be to assume containers have not been populated
# during the current test run.
already_populated = time_last_tested > twenty_hours_ago
except Exception, e:
log.exception( 'Error attempting to set already_populated: %s' % str( e ) )
already_populated = False
print 'Value of already_populated: %s' % str( already_populated )
if already_populated:
print 'The install containers for version %s of repository dependency %s owned by %s have been ' % \
( changeset_revision, name, owner )
print 'populated within the past 20 hours (likely in this test run), so skipping this check.'
continue
else:
print 'Version %s of repository dependency %s owned by %s was last tested more than 20 hours ago.' % \
( changeset_revision, name, owner )
# Inspect the tool_test_results_dict for the last test run to see if it has not yet been populated.
if len( tool_test_results_dicts ) == 0:
tool_test_results_dict = {}
else:
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
elif len( tool_test_results_dict ) == 2 and \
'test_environment' in tool_test_results_dict and \
'missing_test_components' in tool_test_results_dict:
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
tool_test_results_dict = {}
# Make sure all expected entries are available in the tool_test_results_dict.
tool_test_results_dict = initialize_tool_tests_results_dict( app, tool_test_results_dict )
# Get the installed repository record from the Galaxy database.
cleaned_tool_shed_url = remove_protocol_from_tool_shed_url( galaxy_tool_shed_url )
required_repository = \
suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( app,
cleaned_tool_shed_url,
name,
owner,
changeset_revision )
if required_repository is not None:
repository_identifier_tup = ( name, owner, changeset_revision )
if required_repository.is_installed:
# The required_repository was successfully installed, so populate the installation
# containers (success and error) for the repository's immediate dependencies.
params, install_and_test_statistics_dict, tool_test_results_dict = \
populate_dependency_install_containers( app,
required_repository,
repository_identifier_tup,
install_and_test_statistics_dict,
tool_test_results_dict )
save_test_results_for_changeset_revision( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dependencies_dict,
params,
can_update_tool_shed )
else:
# The required repository's installation failed.
required_repository_installation_error_dict = dict( tool_shed=galaxy_tool_shed_url,
name=name,
owner=owner,
changeset_revision=changeset_revision,
error_message=required_repository.error_message )
tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( required_repository_installation_error_dict )
params = dict( test_install_error=True,
do_not_test=False )
save_test_results_for_changeset_revision( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dependencies_dict,
params,
can_update_tool_shed )
else:
print 'Cannot retrieve revision %s of required repository %s owned by %s from the database ' % \
( changeset_revision, name, owner )
print 'so tool_test_results cannot be saved at this time.'
print 'The attributes used to retrieve the record are:'
print 'tool_shed: %s name: %s owner: %s changeset_revision: %s' % \
( cleaned_tool_shed_url, name, owner, changeset_revision )
def populate_shed_conf_file( shed_conf_file, tool_path, xml_elems=None ):
"""Populate the file defined by shed_conf_file with xml_elems or initialize it with a template string."""
if xml_elems is None:
tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
xml_elems = tool_conf_template_parser.safe_substitute( shed_tool_path=tool_path )
file( shed_conf_file, 'w' ).write( xml_elems )
def populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ):
"""
Populate the file defined by galaxy_shed_tools_dict_file with the contents of the shed_tools_dict
dictionary.
"""
if shed_tools_dict is None:
shed_tools_dict = {}
file( galaxy_shed_tools_dict_file, 'w' ).write( json.dumps( shed_tools_dict ) )
def print_install_and_test_results( install_stage_type, install_and_test_statistics_dict, error_message ):
"Print statistics for the current test run."
if error_message:
print "Error returned from install_and_test_repositories:"
print error_message
elif isinstance ( install_and_test_statistics_dict, dict ):
all_tests_passed = install_and_test_statistics_dict.get( 'all_tests_passed', None )
at_least_one_test_failed = install_and_test_statistics_dict.get( 'at_least_one_test_failed', None )
repositories_with_installation_error = \
install_and_test_statistics_dict.get( 'repositories_with_installation_error', None )
successful_repository_installations = \
install_and_test_statistics_dict.get( 'successful_repository_installations', None )
successful_tool_dependency_installations = \
install_and_test_statistics_dict.get( 'successful_tool_dependency_installations', None )
tool_dependencies_with_installation_error = \
install_and_test_statistics_dict.get( 'tool_dependencies_with_installation_error', None )
total_repositories_processed = install_and_test_statistics_dict.get( 'total_repositories_processed', None )
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - installation script for %s completed." % ( now, install_stage_type )
print "# Repository revisions processed: %s" % str( total_repositories_processed )
if successful_repository_installations:
print "# ----------------------------------------------------------------------------------"
print "# The following %d revisions were successfully installed:" % len( successful_repository_installations )
display_repositories_by_owner( successful_repository_installations )
if repositories_with_installation_error:
print "# ----------------------------------------------------------------------------------"
print "# The following %d revisions have installation errors:" % len( repositories_with_installation_error )
display_repositories_by_owner( repositories_with_installation_error )
if successful_tool_dependency_installations:
print "# ----------------------------------------------------------------------------------"
print "# The following %d tool dependencies were successfully installed:" % len( successful_tool_dependency_installations )
display_tool_dependencies_by_name( successful_tool_dependency_installations )
if tool_dependencies_with_installation_error:
print "# ----------------------------------------------------------------------------------"
print "# The following %d tool dependencies have installation errors:" % len( tool_dependencies_with_installation_error )
display_tool_dependencies_by_name( tool_dependencies_with_installation_error )
if all_tests_passed:
print '# ----------------------------------------------------------------------------------'
print "# The following %d revisions successfully passed all functional tests:" % len( all_tests_passed )
display_repositories_by_owner( all_tests_passed )
if at_least_one_test_failed:
print '# ----------------------------------------------------------------------------------'
print "# The following %d revisions failed at least 1 functional test:" % len( at_least_one_test_failed )
display_repositories_by_owner( at_least_one_test_failed )
print "####################################################################################"
def remove_protocol_from_tool_shed_url( base_url ):
"""Eliminate the protocol from the received base_url and return the possibly altered url."""
# The tool_shed value stored in the tool_shed_repository record does not include the protocol, but does
# include the port if one exists.
if base_url:
if base_url.find( '://' ) > -1:
try:
protocol, base = base_url.split( '://' )
except ValueError, e:
# The received base_url must be an invalid url.
log.debug( "Returning unchanged invalid base_url from remove_protocol_from_tool_shed_url: %s" % str( base_url ) )
return base_url
return base.rstrip( '/' )
return base_url.rstrip( '/' )
log.debug( "Returning base_url from remove_protocol_from_tool_shed_url: %s" % str( base_url ) )
return base_url
def run_tests( test_config ):
## TODO: replace whole method with...
# from base import nose_util
# result = nose_util.run( test_config, plugins=[ new ReportResults() ] )
# return result, test_config.plugins._plugins
loader = nose.loader.TestLoader( config=test_config )
test_config.plugins.addPlugin( ReportResults() )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
tests = loader.loadTestsFromNames( test_config.testNames )
test_runner = nose.core.TextTestRunner( stream=test_config.stream,
verbosity=test_config.verbosity,
config=test_config )
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
result = test_runner.run( tests )
return result, test_config.plugins._plugins
def save_test_results_for_changeset_revision( url, tool_test_results_dicts, tool_test_results_dict, repository_dict,
params, can_update_tool_shed ):
"""
Update the repository metadata tool_test_results and appropriate flags using the Tool Shed API. This method
updates tool_test_results with the received tool_test_results_dict, sets the do_not_test and tools_functionally
correct flags to the appropriate values and updates the time_last_tested field.
"""
if can_update_tool_shed:
metadata_revision_id = repository_dict.get( 'id', None )
if metadata_revision_id is not None:
name = repository_dict.get( 'name', None )
owner = repository_dict.get( 'owner', None )
changeset_revision = repository_dict.get( 'changeset_revision', None )
if name is None or owner is None or changeset_revision is None:
print 'Entries for name: ', name, ' owner: ', owner, ' or changeset_revision: ', changeset_revision, \
' missing from repository_dict:' % repository_dict
else:
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
# With regard to certification level one, the status of this repository may or may not have changed between
# this install and test run and the previous install and test run. Rather than attempting to determine if
# anything has changed here, we'll let the Tool Shed's repository registry handle the process of proper
# categorization. To enable this, we'll just remove entries from the Tool Shed's repository registry and
# then add them back. This will ensure proper categorization for this repository.
registry_params = dict( tool_shed_url=galaxy_tool_shed_url, name=name, owner=owner )
print "Removing entries for repository ", name, " owned by ", owner, "from the Tool Shed's repository registry."
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repositories', 'remove_repository_registry_entry' ) )
response_dict = submit( url, registry_params, api_key=tool_shed_api_key, return_formatted=False )
status = response_dict.get( 'status', 'ok' )
if status == 'error':
default_message = 'An unknown error occurred attempting to remove entries from the repository registry.'
error_message = response_dict.get( 'message', default_message )
print error_message
print "Adding entries for repository ", name, " owned by ", owner, "into the Tool Shed's repository registry."
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repositories', 'add_repository_registry_entry' ) )
response_dict = submit( url, registry_params, api_key=tool_shed_api_key, return_formatted=False )
status = response_dict.get( 'status', 'ok' )
if status == 'error':
default_message = 'An unknown error occurred attempting to add entries into the repository registry.'
error_message = response_dict.get( 'message', default_message )
print error_message
print '\n=============================================================\n'
print 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( tool_test_results_dict ) )
print 'Updating tool_test_results for repository_metadata id %s.' % metadata_revision_id
tool_test_results_dicts.insert( 0, tool_test_results_dict )
params[ 'tool_test_results' ] = tool_test_results_dicts
# Set the time_last_tested entry so that the repository_metadata.time_last_tested will be set in the tool shed.
params[ 'time_last_tested' ] = 'This entry will result in this value being set via the Tool Shed API.'
url = '%s' % ( common_util.url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', str( metadata_revision_id ) ) )
print 'url: ', url
print 'params: ', params
try:
response_from_update = update( tool_shed_api_key, url, params, return_formatted=False )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_from_update ) )
print '\n=============================================================\n'
except Exception, e:
log.exception( 'Error updating tool_test_results for repository_metadata id %s:\n%s' % \
( str( metadata_revision_id ), str( e ) ) )
| 61.207137
| 178
| 0.656846
|
ebcefe9914a7337dd41cbae7c274e6f6b49bc68d
| 341
|
py
|
Python
|
schedules/templatetags/calendar_month.py
|
dvek/scyp
|
0f70d6a9071edbf9e26b7cb3cfaea38905e192d1
|
[
"MIT"
] | null | null | null |
schedules/templatetags/calendar_month.py
|
dvek/scyp
|
0f70d6a9071edbf9e26b7cb3cfaea38905e192d1
|
[
"MIT"
] | 4
|
2020-06-05T18:27:22.000Z
|
2021-09-07T23:55:27.000Z
|
schedules/templatetags/calendar_month.py
|
dvek/scyp
|
0f70d6a9071edbf9e26b7cb3cfaea38905e192d1
|
[
"MIT"
] | null | null | null |
from django import template
from django.utils import timezone
from schedules.services import get_times_from_day
from schedules.models import TimeOfDay
register = template.Library()
@register.inclusion_tag('templatetags/calendar_month.html')
def calendar_month():
variable = None
print(">>>>>>")
return {'variable': variable}
| 22.733333
| 59
| 0.768328
|
ed0183a4a8a374589658494b7e71585e5c52685b
| 1,896
|
py
|
Python
|
TimeFreeze/utils.py
|
Ashish013/Image-Filters
|
c49d98232d0b44c0a77a11693658d689dcf89457
|
[
"MIT"
] | null | null | null |
TimeFreeze/utils.py
|
Ashish013/Image-Filters
|
c49d98232d0b44c0a77a11693658d689dcf89457
|
[
"MIT"
] | null | null | null |
TimeFreeze/utils.py
|
Ashish013/Image-Filters
|
c49d98232d0b44c0a77a11693658d689dcf89457
|
[
"MIT"
] | null | null | null |
import os,requests,cv2,shutil
def files_downloader(filename = "rcnn_files"):
print("Downloading files for Mask-Rcnn.....")
file_url = "http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz"
response = requests.get(file_url,stream = True)
if (os.path.exists("file.tar.gz") == False and os.path.exists(f"{filename}") == False):
with open("file.tar.gz","wb") as file:
for chunk in response.iter_content(chunk_size = 1024):
if chunk:
file.write(chunk)
shutil.unpack_archive(os.getcwd() + "\\file.tar.gz")
os.rename("mask_rcnn_inception_v2_coco_2018_01_28",f"{filename}")
os.remove('file.tar.gz')
text_file_url = "https://raw.githubusercontent.com/amikelive/coco-labels/master/coco-labels-2014_2017.txt"
response = requests.get(text_file_url,stream = True)
if(os.path.exists(f"{filename}/mscoco_labels_names.txt") == False):
with open(f"{filename}/mscoco_labels_names.txt","wb") as file:
for chunk in response.iter_content(chunk_size = 128):
if chunk:
file.write(chunk)
file_url = "https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
response = requests.get(file_url)
if(os.path.exists(f"{filename}/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt") == False):
with open(f"{filename}/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt","wb") as file:
for chunk in response.iter_content(chunk_size = 10):
if chunk:
file.write(chunk)
print("Download Completed !")
return filename
def bgr2rgb(frame):
return cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
def bgr2gray(frame):
return cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
| 46.243902
| 135
| 0.666139
|
ee7aecf1758c14298a1c9f085484f971507cbee1
| 1,189
|
py
|
Python
|
ERAutomation/steps/manager_form_steps.py
|
dboudreau4/ReimbursementSystemAutomation
|
57d8c322ac80dd26679000f8d9d67e16fd865804
|
[
"MIT"
] | null | null | null |
ERAutomation/steps/manager_form_steps.py
|
dboudreau4/ReimbursementSystemAutomation
|
57d8c322ac80dd26679000f8d9d67e16fd865804
|
[
"MIT"
] | null | null | null |
ERAutomation/steps/manager_form_steps.py
|
dboudreau4/ReimbursementSystemAutomation
|
57d8c322ac80dd26679000f8d9d67e16fd865804
|
[
"MIT"
] | null | null | null |
from behave import given, when, then
from selenium.webdriver.support.ui import Select
@given('The Manager is on the View Request page')
def open_view_request(context):
context.driver.get("C:\\Users\\david\\Documents\\ExpenseReimbursementFrontend\\viewrequest.html")
@given('The Manager has clicked the view request button')
def view_click(context):
context.manager_portal.view_request_button().click()
@when('The Manager selects {status} from the status selector')
def select_status(context, status: str):
sel: Select = context.manager_form.select()
sel.select_by_value(status)
@when('The Manager types {manager_id} into the reviewed by box')
def manager_input(context, manager_id: str):
context.manager_form.manager_id().send_keys(int(manager_id))
@when('The Manager types {message} into the message box')
def message_input(context, message: str):
context.manager_form.message().send_keys(message)
@when('The Manager clicks the submit button')
def submit_edited(context):
context.manager_form.save().click()
@when('The Manager clicks the return to home page button')
def back_to_home(context):
context.manager_form.return_to_portal().click()
| 30.487179
| 101
| 0.764508
|
39957cdbca8b485f2049f76cecf9118988ad09cd
| 2,750
|
py
|
Python
|
homeassistant/components/epson_monitor/config_flow.py
|
samsunga3888/core
|
e2785d4807bdd216982c452ceabe2e6f038cc321
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/epson_monitor/config_flow.py
|
samsunga3888/core
|
e2785d4807bdd216982c452ceabe2e6f038cc321
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/epson_monitor/config_flow.py
|
samsunga3888/core
|
e2785d4807bdd216982c452ceabe2e6f038cc321
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow for epson integration."""
import logging
import random
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT,CONF_PASSWORD
from . import validate_projector
from .const import DOMAIN
from .exceptions import CannotConnect, PoweredOff, PasswordInvaid, AuthenticationRequired
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_NAME, default=DOMAIN): str,
vol.Required(CONF_PASSWORD): str,
}
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for epson."""
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"validate_projector -> {user_input}")
if user_input is not None:
_LOGGER.debug(f"validate_projector1 -> {user_input}")
try:
projector = await validate_projector(
hass=self.hass,
host=user_input[CONF_HOST],
password=user_input[CONF_PASSWORD]
)
_LOGGER.debug(f"validate_projector2 -> {projector}")
except PasswordInvaid:
_LOGGER.debug(f"validate_projectorPasswordInvaid -> {PasswordInvaid}")
_LOGGER.warning(
"You need to check projector password for initial configuration"
)
errors["base"] = "password_invaid"
except AuthenticationRequired:
_LOGGER.debug(f"validate_projectorAuthenticateInvaid -> {AuthenticationRequired}")
_LOGGER.warning(
"You need to enable projector Authenticate for initial configuration"
)
errors["base"] = "authenticate_invaid"
except CannotConnect:
errors["base"] = "cannot_connect"
except PoweredOff:
_LOGGER.debug(f"validate_projectorPoweredOff -> {PoweredOff}")
_LOGGER.warning(
"You need to turn ON projector power for initial configuration"
)
errors["base"] = "powered_off"
else:
serial_no = random.random()
await self.async_set_unique_id(serial_no)
self._abort_if_unique_id_configured()
user_input.pop(CONF_PORT, None)
return self.async_create_entry(
title=user_input.pop(CONF_NAME), data=user_input
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
| 38.732394
| 98
| 0.605455
|
97331a116d060312acbc78e63b3298ea7c54bca5
| 290
|
py
|
Python
|
api/user/urls.py
|
depromeet/octo-palm-server
|
95028764cda0dbecdda2d24f4e38de853e67e634
|
[
"MIT"
] | null | null | null |
api/user/urls.py
|
depromeet/octo-palm-server
|
95028764cda0dbecdda2d24f4e38de853e67e634
|
[
"MIT"
] | null | null | null |
api/user/urls.py
|
depromeet/octo-palm-server
|
95028764cda0dbecdda2d24f4e38de853e67e634
|
[
"MIT"
] | 2
|
2019-01-12T07:14:34.000Z
|
2019-08-29T02:55:23.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('social/login/',
views.SocialUserLoginAPIView.as_view(),
name='social_login'
),
path('guest/login/',
views.GuestUserLoginAPIView.as_view(),
name='guest_login'
),
]
| 19.333333
| 49
| 0.610345
|
5dcbee4c5eeb1173fa44fc3112a7dc6123b59169
| 77,422
|
py
|
Python
|
tests/components/zwave_js/test_config_flow.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 1
|
2021-10-01T10:38:31.000Z
|
2021-10-01T10:38:31.000Z
|
tests/components/zwave_js/test_config_flow.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 66
|
2020-10-15T06:45:11.000Z
|
2022-03-30T23:05:28.000Z
|
tests/components/zwave_js/test_config_flow.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 1
|
2022-02-01T13:19:32.000Z
|
2022-02-01T13:19:32.000Z
|
"""Test the Z-Wave JS config flow."""
import asyncio
from unittest.mock import DEFAULT, call, patch
import aiohttp
import pytest
from zwave_js_server.version import VersionInfo
from homeassistant import config_entries, setup
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.components.zwave_js.config_flow import SERVER_VERSION_TIMEOUT, TITLE
from homeassistant.components.zwave_js.const import DOMAIN
from tests.common import MockConfigEntry
ADDON_DISCOVERY_INFO = {
"addon": "Z-Wave JS",
"host": "host1",
"port": 3001,
}
USB_DISCOVERY_INFO = {
"device": "/dev/zwave",
"pid": "AAAA",
"vid": "AAAA",
"serial_number": "1234",
"description": "zwave radio",
"manufacturer": "test",
}
NORTEK_ZIGBEE_DISCOVERY_INFO = {
"device": "/dev/zigbee",
"pid": "8A2A",
"vid": "10C4",
"serial_number": "1234",
"description": "nortek zigbee radio",
"manufacturer": "nortek",
}
CP2652_ZIGBEE_DISCOVERY_INFO = {
"device": "/dev/zigbee",
"pid": "EA60",
"vid": "10C4",
"serial_number": "",
"description": "cp2652",
"manufacturer": "generic",
}
@pytest.fixture(name="persistent_notification", autouse=True)
async def setup_persistent_notification(hass):
"""Set up persistent notification integration."""
await setup.async_setup_component(hass, "persistent_notification", {})
@pytest.fixture(name="setup_entry")
def setup_entry_fixture():
"""Mock entry setup."""
with patch(
"homeassistant.components.zwave_js.async_setup_entry", return_value=True
) as mock_setup_entry:
yield mock_setup_entry
@pytest.fixture(name="supervisor")
def mock_supervisor_fixture():
"""Mock Supervisor."""
with patch(
"homeassistant.components.zwave_js.config_flow.is_hassio", return_value=True
):
yield
@pytest.fixture(name="discovery_info")
def discovery_info_fixture():
"""Return the discovery info from the supervisor."""
return DEFAULT
@pytest.fixture(name="discovery_info_side_effect")
def discovery_info_side_effect_fixture():
"""Return the discovery info from the supervisor."""
return None
@pytest.fixture(name="get_addon_discovery_info")
def mock_get_addon_discovery_info(discovery_info, discovery_info_side_effect):
"""Mock get add-on discovery info."""
with patch(
"homeassistant.components.zwave_js.addon.async_get_addon_discovery_info",
side_effect=discovery_info_side_effect,
return_value=discovery_info,
) as get_addon_discovery_info:
yield get_addon_discovery_info
@pytest.fixture(name="server_version_side_effect")
def server_version_side_effect_fixture():
"""Return the server version side effect."""
return None
@pytest.fixture(name="get_server_version", autouse=True)
def mock_get_server_version(server_version_side_effect, server_version_timeout):
"""Mock server version."""
version_info = VersionInfo(
driver_version="mock-driver-version",
server_version="mock-server-version",
home_id=1234,
min_schema_version=0,
max_schema_version=1,
)
with patch(
"homeassistant.components.zwave_js.config_flow.get_server_version",
side_effect=server_version_side_effect,
return_value=version_info,
) as mock_version, patch(
"homeassistant.components.zwave_js.config_flow.SERVER_VERSION_TIMEOUT",
new=server_version_timeout,
):
yield mock_version
@pytest.fixture(name="server_version_timeout")
def mock_server_version_timeout():
"""Patch the timeout for getting server version."""
return SERVER_VERSION_TIMEOUT
@pytest.fixture(name="addon_setup_time", autouse=True)
def mock_addon_setup_time():
"""Mock add-on setup sleep time."""
with patch(
"homeassistant.components.zwave_js.config_flow.ADDON_SETUP_TIMEOUT", new=0
) as addon_setup_time:
yield addon_setup_time
async def test_manual(hass):
"""Test we create an entry with manual step."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"url": "ws://localhost:3000",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Z-Wave JS"
assert result2["data"] == {
"url": "ws://localhost:3000",
"usb_path": None,
"s0_legacy_key": None,
"s2_access_control_key": None,
"s2_authenticated_key": None,
"s2_unauthenticated_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result2["result"].unique_id == 1234
async def slow_server_version(*args):
"""Simulate a slow server version."""
await asyncio.sleep(0.1)
@pytest.mark.parametrize(
"flow, flow_params",
[
(
"flow",
lambda entry: {
"handler": DOMAIN,
"context": {"source": config_entries.SOURCE_USER},
},
),
("options", lambda entry: {"handler": entry.entry_id}),
],
)
@pytest.mark.parametrize(
"url, server_version_side_effect, server_version_timeout, error",
[
(
"not-ws-url",
None,
SERVER_VERSION_TIMEOUT,
"invalid_ws_url",
),
(
"ws://localhost:3000",
slow_server_version,
0,
"cannot_connect",
),
(
"ws://localhost:3000",
Exception("Boom"),
SERVER_VERSION_TIMEOUT,
"unknown",
),
],
)
async def test_manual_errors(hass, integration, url, error, flow, flow_params):
"""Test all errors with a manual set up."""
entry = integration
result = await getattr(hass.config_entries, flow).async_init(**flow_params(entry))
assert result["type"] == "form"
assert result["step_id"] == "manual"
result = await getattr(hass.config_entries, flow).async_configure(
result["flow_id"],
{
"url": url,
},
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
assert result["errors"] == {"base": error}
async def test_manual_already_configured(hass):
"""Test that only one unique instance is allowed."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
"url": "ws://localhost:3000",
"use_addon": True,
"integration_created_addon": True,
},
title=TITLE,
unique_id=1234,
)
entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"url": "ws://1.1.1.1:3001",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["url"] == "ws://1.1.1.1:3001"
assert entry.data["use_addon"] is False
assert entry.data["integration_created_addon"] is False
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_supervisor_discovery(
hass, supervisor, addon_running, addon_options, get_addon_discovery_info
):
"""Test flow started from Supervisor discovery."""
await setup.async_setup_component(hass, "persistent_notification", {})
addon_options["device"] = "/test"
addon_options["s0_legacy_key"] = "new123"
addon_options["s2_access_control_key"] = "new456"
addon_options["s2_authenticated_key"] = "new789"
addon_options["s2_unauthenticated_key"] = "new987"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"discovery_info, server_version_side_effect",
[({"config": ADDON_DISCOVERY_INFO}, asyncio.TimeoutError())],
)
async def test_supervisor_discovery_cannot_connect(
hass, supervisor, get_addon_discovery_info
):
"""Test Supervisor discovery and cannot connect."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_clean_discovery_on_user_create(
hass, supervisor, addon_running, addon_options, get_addon_discovery_info
):
"""Test discovery flow is cleaned up when a user flow is finished."""
await setup.async_setup_component(hass, "persistent_notification", {})
addon_options["device"] = "/test"
addon_options["s0_legacy_key"] = "new123"
addon_options["s2_access_control_key"] = "new456"
addon_options["s2_authenticated_key"] = "new789"
addon_options["s2_unauthenticated_key"] = "new987"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": False}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"url": "ws://localhost:3000",
},
)
await hass.async_block_till_done()
assert len(hass.config_entries.flow.async_progress()) == 0
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://localhost:3000",
"usb_path": None,
"s0_legacy_key": None,
"s2_access_control_key": None,
"s2_authenticated_key": None,
"s2_unauthenticated_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_abort_discovery_with_existing_entry(
hass, supervisor, addon_running, addon_options
):
"""Test discovery flow is aborted if an entry already exists."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN, data={"url": "ws://localhost:3000"}, title=TITLE, unique_id=1234
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
# Assert that the entry data is updated with discovery info.
assert entry.data["url"] == "ws://host1:3001"
async def test_abort_hassio_discovery_with_existing_flow(
hass, supervisor, addon_options
):
"""Test hassio discovery flow is aborted when another discovery has happened."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "form"
assert result["step_id"] == "usb_confirm"
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_in_progress"
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_usb_discovery(
hass,
supervisor,
install_addon,
addon_options,
get_addon_discovery_info,
set_addon_options,
start_addon,
):
"""Test usb discovery success path."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "form"
assert result["step_id"] == "usb_confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "progress"
assert result["step_id"] == "install_addon"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert install_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": True,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_usb_discovery_addon_not_running(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test usb discovery when add-on is installed but not running."""
addon_options["device"] = "/dev/incorrect_device"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "form"
assert result["step_id"] == "usb_confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
# Make sure the discovered usb device is preferred.
data_schema = result["data_schema"]
assert data_schema({}) == {
"usb_path": USB_DISCOVERY_INFO["device"],
"s0_legacy_key": "",
"s2_access_control_key": "",
"s2_authenticated_key": "",
"s2_unauthenticated_key": "",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": USB_DISCOVERY_INFO["device"],
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": USB_DISCOVERY_INFO["device"],
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": USB_DISCOVERY_INFO["device"],
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_addon_not_running(
hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon
):
"""Test discovery with add-on already installed but not running."""
addon_options["device"] = None
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["step_id"] == "hassio_confirm"
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_addon_not_installed(
hass,
supervisor,
addon_installed,
install_addon,
addon_options,
set_addon_options,
start_addon,
):
"""Test discovery with add-on not installed."""
addon_installed.return_value["version"] = None
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["step_id"] == "hassio_confirm"
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["step_id"] == "install_addon"
assert result["type"] == "progress"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert install_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": True,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_abort_usb_discovery_with_existing_flow(hass, supervisor, addon_options):
"""Test usb discovery flow is aborted when another discovery has happened."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=ADDON_DISCOVERY_INFO,
)
assert result["type"] == "form"
assert result["step_id"] == "hassio_confirm"
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_in_progress"
async def test_abort_usb_discovery_already_configured(hass, supervisor, addon_options):
"""Test usb discovery flow is aborted when there is an existing entry."""
entry = MockConfigEntry(
domain=DOMAIN, data={"url": "ws://localhost:3000"}, title=TITLE, unique_id=1234
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_usb_discovery_requires_supervisor(hass):
"""Test usb discovery flow is aborted when there is no supervisor."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "discovery_requires_supervisor"
async def test_usb_discovery_already_running(hass, supervisor, addon_running):
"""Test usb discovery flow is aborted when the addon is running."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=USB_DISCOVERY_INFO,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"discovery_info",
[CP2652_ZIGBEE_DISCOVERY_INFO],
)
async def test_abort_usb_discovery_aborts_specific_devices(
hass, supervisor, addon_options, discovery_info
):
"""Test usb discovery flow is aborted on specific devices."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USB},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "not_zwave_device"
async def test_not_addon(hass, supervisor):
"""Test opting out of add-on on Supervisor."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": False}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"url": "ws://localhost:3000",
},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://localhost:3000",
"usb_path": None,
"s0_legacy_key": None,
"s2_access_control_key": None,
"s2_authenticated_key": None,
"s2_unauthenticated_key": None,
"use_addon": False,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_addon_running(
hass,
supervisor,
addon_running,
addon_options,
get_addon_discovery_info,
):
"""Test add-on already running on Supervisor."""
addon_options["device"] = "/test"
addon_options["s0_legacy_key"] = "new123"
addon_options["s2_access_control_key"] = "new456"
addon_options["s2_authenticated_key"] = "new789"
addon_options["s2_unauthenticated_key"] = "new987"
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"discovery_info, discovery_info_side_effect, server_version_side_effect, "
"addon_info_side_effect, abort_reason",
[
(
{"config": ADDON_DISCOVERY_INFO},
HassioAPIError(),
None,
None,
"addon_get_discovery_info_failed",
),
(
{"config": ADDON_DISCOVERY_INFO},
None,
asyncio.TimeoutError,
None,
"cannot_connect",
),
(
None,
None,
None,
None,
"addon_get_discovery_info_failed",
),
(
{"config": ADDON_DISCOVERY_INFO},
None,
None,
HassioAPIError(),
"addon_info_failed",
),
],
)
async def test_addon_running_failures(
hass,
supervisor,
addon_running,
addon_options,
get_addon_discovery_info,
abort_reason,
):
"""Test all failures when add-on is running."""
addon_options["device"] = "/test"
addon_options["network_key"] = "abc123"
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "abort"
assert result["reason"] == abort_reason
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_addon_running_already_configured(
hass, supervisor, addon_running, addon_options, get_addon_discovery_info
):
"""Test that only one unique instance is allowed when add-on is running."""
addon_options["device"] = "/test_new"
addon_options["s0_legacy_key"] = "new123"
addon_options["s2_access_control_key"] = "new456"
addon_options["s2_authenticated_key"] = "new789"
addon_options["s2_unauthenticated_key"] = "new987"
entry = MockConfigEntry(
domain=DOMAIN,
data={
"url": "ws://localhost:3000",
"usb_path": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
title=TITLE,
unique_id=1234,
)
entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["url"] == "ws://host1:3001"
assert entry.data["usb_path"] == "/test_new"
assert entry.data["s0_legacy_key"] == "new123"
assert entry.data["s2_access_control_key"] == "new456"
assert entry.data["s2_authenticated_key"] == "new789"
assert entry.data["s2_unauthenticated_key"] == "new987"
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_addon_installed(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test add-on already installed but not running on Supervisor."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"discovery_info, start_addon_side_effect",
[({"config": ADDON_DISCOVERY_INFO}, HassioAPIError())],
)
async def test_addon_installed_start_failure(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test add-on start failure when add-on is installed."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "abort"
assert result["reason"] == "addon_start_failed"
@pytest.mark.parametrize(
"discovery_info, server_version_side_effect",
[
(
{"config": ADDON_DISCOVERY_INFO},
asyncio.TimeoutError,
),
(
None,
None,
),
],
)
async def test_addon_installed_failures(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test all failures when add-on is installed."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "abort"
assert result["reason"] == "addon_start_failed"
@pytest.mark.parametrize(
"set_addon_options_side_effect, discovery_info",
[(HassioAPIError(), {"config": ADDON_DISCOVERY_INFO})],
)
async def test_addon_installed_set_options_failure(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test all failures when add-on is installed."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "abort"
assert result["reason"] == "addon_set_config_failed"
assert start_addon.call_count == 0
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_addon_installed_already_configured(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test that only one unique instance is allowed when add-on is installed."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
"url": "ws://localhost:3000",
"usb_path": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
title=TITLE,
unique_id=1234,
)
entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test_new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test_new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["url"] == "ws://host1:3001"
assert entry.data["usb_path"] == "/test_new"
assert entry.data["s0_legacy_key"] == "new123"
assert entry.data["s2_access_control_key"] == "new456"
assert entry.data["s2_authenticated_key"] == "new789"
assert entry.data["s2_unauthenticated_key"] == "new987"
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_addon_not_installed(
hass,
supervisor,
addon_installed,
install_addon,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test add-on not installed."""
addon_installed.return_value["version"] = None
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "progress"
assert result["step_id"] == "install_addon"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert install_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
},
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"use_addon": True,
"integration_created_addon": True,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_install_addon_failure(hass, supervisor, addon_installed, install_addon):
"""Test add-on install failure."""
addon_installed.return_value["version"] = None
install_addon.side_effect = HassioAPIError()
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "progress"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert install_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "abort"
assert result["reason"] == "addon_install_failed"
async def test_options_manual(hass, client, integration):
"""Test manual settings in options flow."""
entry = integration
entry.unique_id = 1234
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "manual"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"url": "ws://1.1.1.1:3001"}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert entry.data["url"] == "ws://1.1.1.1:3001"
assert entry.data["use_addon"] is False
assert entry.data["integration_created_addon"] is False
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
async def test_options_manual_different_device(hass, integration):
"""Test options flow manual step connecting to different device."""
entry = integration
entry.unique_id = 5678
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "manual"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"url": "ws://1.1.1.1:3001"}
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "different_device"
async def test_options_not_addon(hass, client, supervisor, integration):
"""Test options flow and opting out of add-on on Supervisor."""
entry = integration
entry.unique_id = 1234
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": False}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
{
"url": "ws://localhost:3000",
},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert entry.data["url"] == "ws://localhost:3000"
assert entry.data["use_addon"] is False
assert entry.data["integration_created_addon"] is False
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options, disconnect_calls",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
0,
),
(
{"config": ADDON_DISCOVERY_INFO},
{"use_addon": True},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
1,
),
],
)
async def test_options_addon_running(
hass,
client,
supervisor,
integration,
addon_running,
addon_options,
set_addon_options,
restart_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
disconnect_calls,
):
"""Test options flow and add-on already running on Supervisor."""
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
new_addon_options["device"] = new_addon_options.pop("usb_path")
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": new_addon_options},
)
assert client.disconnect.call_count == disconnect_calls
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert restart_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert entry.data["url"] == "ws://host1:3001"
assert entry.data["usb_path"] == new_addon_options["device"]
assert entry.data["s0_legacy_key"] == new_addon_options["s0_legacy_key"]
assert (
entry.data["s2_access_control_key"]
== new_addon_options["s2_access_control_key"]
)
assert (
entry.data["s2_authenticated_key"] == new_addon_options["s2_authenticated_key"]
)
assert (
entry.data["s2_unauthenticated_key"]
== new_addon_options["s2_unauthenticated_key"]
)
assert entry.data["use_addon"] is True
assert entry.data["integration_created_addon"] is False
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
{
"usb_path": "/test",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
),
],
)
async def test_options_addon_running_no_changes(
hass,
client,
supervisor,
integration,
addon_running,
addon_options,
set_addon_options,
restart_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
):
"""Test options flow without changes, and add-on already running on Supervisor."""
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
await hass.async_block_till_done()
new_addon_options["device"] = new_addon_options.pop("usb_path")
assert set_addon_options.call_count == 0
assert restart_addon.call_count == 0
assert result["type"] == "create_entry"
assert entry.data["url"] == "ws://host1:3001"
assert entry.data["usb_path"] == new_addon_options["device"]
assert entry.data["s0_legacy_key"] == new_addon_options["s0_legacy_key"]
assert (
entry.data["s2_access_control_key"]
== new_addon_options["s2_access_control_key"]
)
assert (
entry.data["s2_authenticated_key"] == new_addon_options["s2_authenticated_key"]
)
assert (
entry.data["s2_unauthenticated_key"]
== new_addon_options["s2_unauthenticated_key"]
)
assert entry.data["use_addon"] is True
assert entry.data["integration_created_addon"] is False
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
async def different_device_server_version(*args):
"""Return server version for a device with different home id."""
return VersionInfo(
driver_version="mock-driver-version",
server_version="mock-server-version",
home_id=5678,
min_schema_version=0,
max_schema_version=1,
)
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options, disconnect_calls, server_version_side_effect",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
0,
different_device_server_version,
),
],
)
async def test_options_different_device(
hass,
client,
supervisor,
integration,
addon_running,
addon_options,
set_addon_options,
restart_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
disconnect_calls,
server_version_side_effect,
):
"""Test options flow and configuring a different device."""
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
assert set_addon_options.call_count == 1
new_addon_options["device"] = new_addon_options.pop("usb_path")
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": new_addon_options},
)
assert client.disconnect.call_count == disconnect_calls
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
assert restart_addon.call_count == 1
assert restart_addon.call_args == call(hass, "core_zwave_js")
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
# Legacy network key is not reset.
old_addon_options.pop("network_key")
assert set_addon_options.call_count == 2
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": old_addon_options},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
assert restart_addon.call_count == 2
assert restart_addon.call_args == call(hass, "core_zwave_js")
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "different_device"
assert entry.data == data
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options, disconnect_calls, restart_addon_side_effect",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
0,
[HassioAPIError(), None],
),
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "old123",
"s0_legacy_key": "old123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
0,
[
HassioAPIError(),
HassioAPIError(),
],
),
],
)
async def test_options_addon_restart_failed(
hass,
client,
supervisor,
integration,
addon_running,
addon_options,
set_addon_options,
restart_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
disconnect_calls,
restart_addon_side_effect,
):
"""Test options flow and add-on restart failure."""
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
assert set_addon_options.call_count == 1
new_addon_options["device"] = new_addon_options.pop("usb_path")
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": new_addon_options},
)
assert client.disconnect.call_count == disconnect_calls
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
assert restart_addon.call_count == 1
assert restart_addon.call_args == call(hass, "core_zwave_js")
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
# The legacy network key should not be reset.
old_addon_options.pop("network_key")
assert set_addon_options.call_count == 2
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": old_addon_options},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
assert restart_addon.call_count == 2
assert restart_addon.call_args == call(hass, "core_zwave_js")
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "addon_start_failed"
assert entry.data == data
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options, disconnect_calls, server_version_side_effect",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "abc123",
"s0_legacy_key": "abc123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
{
"usb_path": "/test",
"s0_legacy_key": "abc123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
"log_level": "info",
"emulate_hardware": False,
},
0,
aiohttp.ClientError("Boom"),
),
],
)
async def test_options_addon_running_server_info_failure(
hass,
client,
supervisor,
integration,
addon_running,
addon_options,
set_addon_options,
restart_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
disconnect_calls,
server_version_side_effect,
):
"""Test options flow and add-on already running with server info failure."""
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
assert entry.data == data
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize(
"discovery_info, entry_data, old_addon_options, new_addon_options, disconnect_calls",
[
(
{"config": ADDON_DISCOVERY_INFO},
{},
{
"device": "/test",
"network_key": "abc123",
"s0_legacy_key": "abc123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
0,
),
(
{"config": ADDON_DISCOVERY_INFO},
{"use_addon": True},
{
"device": "/test",
"network_key": "abc123",
"s0_legacy_key": "abc123",
"s2_access_control_key": "old456",
"s2_authenticated_key": "old789",
"s2_unauthenticated_key": "old987",
},
{
"usb_path": "/new",
"s0_legacy_key": "new123",
"s2_access_control_key": "new456",
"s2_authenticated_key": "new789",
"s2_unauthenticated_key": "new987",
"log_level": "info",
"emulate_hardware": False,
},
1,
),
],
)
async def test_options_addon_not_installed(
hass,
client,
supervisor,
addon_installed,
install_addon,
integration,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
discovery_info,
entry_data,
old_addon_options,
new_addon_options,
disconnect_calls,
):
"""Test options flow and add-on not installed on Supervisor."""
addon_installed.return_value["version"] = None
addon_options.update(old_addon_options)
entry = integration
entry.unique_id = 1234
data = {**entry.data, **entry_data}
hass.config_entries.async_update_entry(entry, data=data)
assert entry.data["url"] == "ws://test.org"
assert client.connect.call_count == 1
assert client.disconnect.call_count == 0
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.options.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "progress"
assert result["step_id"] == "install_addon"
# Make sure the flow continues when the progress task is done.
await hass.async_block_till_done()
result = await hass.config_entries.options.async_configure(result["flow_id"])
assert install_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
new_addon_options,
)
new_addon_options["device"] = new_addon_options.pop("usb_path")
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{"options": new_addon_options},
)
assert client.disconnect.call_count == disconnect_calls
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
await hass.async_block_till_done()
assert start_addon.call_count == 1
assert start_addon.call_args == call(hass, "core_zwave_js")
result = await hass.config_entries.options.async_configure(result["flow_id"])
await hass.async_block_till_done()
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert entry.data["url"] == "ws://host1:3001"
assert entry.data["usb_path"] == new_addon_options["device"]
assert entry.data["s0_legacy_key"] == new_addon_options["s0_legacy_key"]
assert entry.data["use_addon"] is True
assert entry.data["integration_created_addon"] is True
assert client.connect.call_count == 2
assert client.disconnect.call_count == 1
@pytest.mark.parametrize("discovery_info", [{"config": ADDON_DISCOVERY_INFO}])
async def test_import_addon_installed(
hass,
supervisor,
addon_installed,
addon_options,
set_addon_options,
start_addon,
get_addon_discovery_info,
):
"""Test import step while add-on already installed on Supervisor."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"usb_path": "/test/imported", "network_key": "imported123"},
)
assert result["type"] == "form"
assert result["step_id"] == "on_supervisor"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"use_addon": True}
)
assert result["type"] == "form"
assert result["step_id"] == "configure_addon"
# the default input should be the imported data
default_input = result["data_schema"]({})
assert default_input == {
"usb_path": "/test/imported",
"s0_legacy_key": "imported123",
"s2_access_control_key": "",
"s2_authenticated_key": "",
"s2_unauthenticated_key": "",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], default_input
)
assert set_addon_options.call_args == call(
hass,
"core_zwave_js",
{
"options": {
"device": "/test/imported",
"s0_legacy_key": "imported123",
"s2_access_control_key": "",
"s2_authenticated_key": "",
"s2_unauthenticated_key": "",
}
},
)
assert result["type"] == "progress"
assert result["step_id"] == "start_addon"
with patch(
"homeassistant.components.zwave_js.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zwave_js.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert start_addon.call_args == call(hass, "core_zwave_js")
assert result["type"] == "create_entry"
assert result["title"] == TITLE
assert result["data"] == {
"url": "ws://host1:3001",
"usb_path": "/test/imported",
"s0_legacy_key": "imported123",
"s2_access_control_key": "",
"s2_authenticated_key": "",
"s2_unauthenticated_key": "",
"use_addon": True,
"integration_created_addon": False,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| 31.218548
| 117
| 0.626127
|
e43ba1e7353c26535170cef8c1aa351a3a1b4bb8
| 6,901
|
py
|
Python
|
nncf/torch/sparsity/rb/algo.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 136
|
2020-06-01T14:03:31.000Z
|
2020-10-28T06:10:50.000Z
|
nncf/torch/sparsity/rb/algo.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 133
|
2020-05-26T13:48:04.000Z
|
2020-10-28T05:25:55.000Z
|
nncf/torch/sparsity/rb/algo.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 36
|
2020-05-28T08:18:39.000Z
|
2020-10-27T14:46:58.000Z
|
"""
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from typing import List
import torch
import torch.distributed as dist
from nncf import NNCFConfig
from nncf.config.extractors import extract_algo_specific_config
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.api.compression import CompressionStage
from nncf.common.graph import NNCFNode
from nncf.torch.compression_method_api import PTCompressionAlgorithmController
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.sparsity.base_algo import BaseSparsityAlgoBuilder, BaseSparsityAlgoController, SparseModuleInfo
from nncf.torch.sparsity.rb.layers import RBSparsifyingWeight
from nncf.torch.sparsity.rb.loss import SparseLoss, SparseLossForPerLayerSparsity
from nncf.torch.utils import get_model_device
from nncf.torch.utils import get_world_size
from nncf.common.accuracy_aware_training.training_loop import ADAPTIVE_COMPRESSION_CONTROLLERS
from nncf.torch.sparsity.collector import PTSparseModelStatisticsCollector
from nncf.common.sparsity.schedulers import SPARSITY_SCHEDULERS
from nncf.common.schedulers import StubCompressionScheduler
from nncf.common.sparsity.statistics import RBSparsityStatistics
from nncf.common.statistics import NNCFStatistics
@PT_COMPRESSION_ALGORITHMS.register('rb_sparsity')
class RBSparsityBuilder(BaseSparsityAlgoBuilder):
def create_weight_sparsifying_operation(self, target_module_node: NNCFNode, compression_lr_multiplier: float):
return RBSparsifyingWeight(target_module_node.layer_attributes.get_weight_shape(), frozen=False,
compression_lr_multiplier=compression_lr_multiplier)
def _build_controller(self, model: NNCFNetwork) -> PTCompressionAlgorithmController:
return RBSparsityController(model, self._sparsified_module_info, self.config)
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('pt_rb_sparsity')
class RBSparsityController(BaseSparsityAlgoController):
def __init__(self, target_model: NNCFNetwork, sparsified_module_info: List[SparseModuleInfo],
config: NNCFConfig):
super().__init__(target_model, sparsified_module_info)
algo_config = extract_algo_specific_config(config, 'rb_sparsity')
params = deepcopy(algo_config.get('params', {}))
self._distributed = False
self._mode = params.get('sparsity_level_setting_mode', 'global')
self._check_sparsity_masks = params.get('check_sparsity_masks', False)
sparsify_operations = [m.operand for m in self.sparsified_module_info]
if self._mode == 'local':
self._loss = SparseLossForPerLayerSparsity(sparsify_operations)
self._scheduler = StubCompressionScheduler()
else:
self._loss = SparseLoss(sparsify_operations)
sparsity_init = algo_config.get('sparsity_init', 0)
params['sparsity_init'] = sparsity_init
scheduler_cls = SPARSITY_SCHEDULERS.get(params.get('schedule', 'exponential'))
self._scheduler = scheduler_cls(self, params)
self.set_sparsity_level(sparsity_init)
def set_sparsity_level(self, sparsity_level, target_sparsified_module_info: SparseModuleInfo = None):
if target_sparsified_module_info is None:
#pylint:disable=no-value-for-parameter
self._loss.set_target_sparsity_loss(sparsity_level)
else:
sparse_op = target_sparsified_module_info.operand
self._loss.set_target_sparsity_loss(sparsity_level, sparse_op)
def compression_stage(self) -> CompressionStage:
if self._mode == 'local':
return CompressionStage.FULLY_COMPRESSED
if self.scheduler.current_sparsity_level == 0:
return CompressionStage.UNCOMPRESSED
if self.scheduler.current_sparsity_level >= self.scheduler.target_level:
return CompressionStage.FULLY_COMPRESSED
return CompressionStage.PARTIALLY_COMPRESSED
def freeze(self):
self._loss.disable()
def distributed(self):
if not dist.is_initialized():
raise KeyError('Could not set distributed mode for the compression algorithm '
'because the default process group has not been initialized.')
if 'cuda' in get_model_device(self._model).type:
state = torch.cuda.get_rng_state()
if dist.get_backend() == dist.Backend.NCCL:
state = state.cuda()
torch.distributed.broadcast(state, src=0)
torch.cuda.set_rng_state(state.cpu())
else:
state = torch.get_rng_state()
torch.distributed.broadcast(state, src=0)
torch.set_rng_state(state)
self._distributed = True
def _check_distributed_masks(self):
if not self._distributed or get_world_size() == 1:
return 1
nvalues = 0
ncor_values = 0
eps = 1e-4
for minfo in self.sparsified_module_info:
mask = minfo.operand.mask
mask_list = [torch.empty_like(mask) for _ in range(get_world_size())]
# nccl does not support gather, send, recv operations
dist.all_gather(mask_list, mask)
for i in range(1, len(mask_list)):
rel_error = (mask_list[0] - mask_list[i]) / mask_list[0]
ncor_values = ncor_values + (rel_error.abs() < eps).sum(dtype=mask.dtype)
nvalues = nvalues + mask_list[i].numel()
return ncor_values / nvalues
def statistics(self, quickly_collected_only=False) -> NNCFStatistics:
collector = PTSparseModelStatisticsCollector(self.model, self.sparsified_module_info)
model_statistics = collector.collect()
target_sparsity_level = self.scheduler.current_sparsity_level if self._mode == 'global' else None
mean_sparse_prob = 1.0 - self.loss.mean_sparse_prob
stats = RBSparsityStatistics(model_statistics, target_sparsity_level, mean_sparse_prob)
nncf_stats = NNCFStatistics()
nncf_stats.register('rb_sparsity', stats)
return nncf_stats
@property
def compression_rate(self):
return self._loss.target_sparsity_rate
@compression_rate.setter
def compression_rate(self, sparsity_level: float):
self.set_sparsity_level(sparsity_level)
| 44.522581
| 114
| 0.7283
|
a72e1bb9491da0eb1b00b821f46182c9de7ec2fc
| 1,106
|
py
|
Python
|
backend/us_covid_stats/etl/load.py
|
dashmug/us-covid-stats
|
321f015cb763f0e8d62bddd866020676da732ef8
|
[
"MIT"
] | 2
|
2020-11-18T19:01:01.000Z
|
2020-11-25T02:07:22.000Z
|
backend/us_covid_stats/etl/load.py
|
dashmug/us-covid-stats
|
321f015cb763f0e8d62bddd866020676da732ef8
|
[
"MIT"
] | 565
|
2020-10-10T20:57:05.000Z
|
2021-08-04T18:05:26.000Z
|
backend/us_covid_stats/etl/load.py
|
dashmug/us-covid-stats
|
321f015cb763f0e8d62bddd866020676da732ef8
|
[
"MIT"
] | 1
|
2021-03-24T16:08:23.000Z
|
2021-03-24T16:08:23.000Z
|
from pandas import DataFrame
from us_covid_stats.infrastructure.s3 import MissingFileError
from us_covid_stats.repositories.cases import CaseData, save_cases
from us_covid_stats.repositories.csv import s3_csv_to_dataframe, dataframe_to_s3_csv
def load_data_to_database(latest_data: DataFrame) -> str:
try:
old_data: DataFrame = s3_csv_to_dataframe()
except MissingFileError:
df = latest_data
else:
df = (
old_data.merge(
latest_data,
how="outer",
on=["date", "cases", "deaths", "recoveries"],
indicator=True,
)
.loc[lambda x: x["_merge"] == "right_only"]
.drop("_merge", axis=1)
)
new_cases = (
CaseData(
date=row.Index.strftime("%Y-%m-%d"),
cases=row.cases,
deaths=row.deaths,
recoveries=row.recoveries,
)
for row in df.itertuples()
)
save_cases(new_cases)
dataframe_to_s3_csv(latest_data)
return f"Update successful. {len(df.index)} row(s) updated."
| 28.358974
| 84
| 0.598553
|
97c0eb90b923781478cd44a80b76f32947b4dd26
| 13,730
|
py
|
Python
|
src/paramiko-master/tests/test_sftp_big.py
|
zhanggen3714/zhanggen_audit
|
3913d4c0179472b5a0903ecff57ddd45a682ac20
|
[
"Apache-2.0"
] | 1
|
2020-04-19T14:13:35.000Z
|
2020-04-19T14:13:35.000Z
|
src/paramiko-master/tests/test_sftp_big.py
|
zhanggen3714/zhanggen_audit
|
3913d4c0179472b5a0903ecff57ddd45a682ac20
|
[
"Apache-2.0"
] | null | null | null |
src/paramiko-master/tests/test_sftp_big.py
|
zhanggen3714/zhanggen_audit
|
3913d4c0179472b5a0903ecff57ddd45a682ac20
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
some unit tests to make sure sftp works well with large files.
a real actual sftp server is contacted, and a new folder is created there to
do test file operations in (so no existing files will be harmed).
"""
import os
import random
import struct
import sys
import time
import unittest
from paramiko.common import o660
from .util import slow
@slow
class TestBigSFTP(object):
def test_1_lots_of_files(self, sftp):
"""
create a bunch of files over the same session.
"""
numfiles = 100
try:
for i in range(numfiles):
with sftp.open(
"%s/file%d.txt" % (sftp.FOLDER, i), "w", 1
) as f:
f.write("this is file #%d.\n" % i)
sftp.chmod("%s/file%d.txt" % (sftp.FOLDER, i), o660)
# now make sure every file is there, by creating a list of filenmes
# and reading them in random order.
numlist = list(range(numfiles))
while len(numlist) > 0:
r = numlist[random.randint(0, len(numlist) - 1)]
with sftp.open("%s/file%d.txt" % (sftp.FOLDER, r)) as f:
assert f.readline() == "this is file #%d.\n" % r
numlist.remove(r)
finally:
for i in range(numfiles):
try:
sftp.remove("%s/file%d.txt" % (sftp.FOLDER, i))
except:
pass
def test_2_big_file(self, sftp):
"""
write a 1MB file with no buffering.
"""
kblob = 1024 * b"x"
start = time.time()
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "w") as f:
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
end = time.time()
sys.stderr.write("%ds " % round(end - start))
start = time.time()
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "r") as f:
for n in range(1024):
data = f.read(1024)
assert data == kblob
end = time.time()
sys.stderr.write("%ds " % round(end - start))
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_3_big_file_pipelined(self, sftp):
"""
write a 1MB file, with no linefeeds, using pipelining.
"""
kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
start = time.time()
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "wb") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
end = time.time()
sys.stderr.write("%ds " % round(end - start))
start = time.time()
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "rb") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
# read on odd boundaries to make sure the bytes aren't getting scrambled
n = 0
k2blob = kblob + kblob
chunk = 629
size = 1024 * 1024
while n < size:
if n + chunk > size:
chunk = size - n
data = f.read(chunk)
offset = n % 1024
assert data == k2blob[offset : offset + chunk]
n += chunk
end = time.time()
sys.stderr.write("%ds " % round(end - start))
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_4_prefetch_seek(self, sftp):
kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "wb") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "rb") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
base_offset = (512 * 1024) + 17 * random.randint(
1000, 2000
)
offsets = [base_offset + j * chunk for j in range(100)]
# randomly seek around and read them out
for j in range(100):
offset = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(offset)
f.seek(offset)
data = f.read(chunk)
n_offset = offset % 1024
assert data == k2blob[n_offset : n_offset + chunk]
offset += chunk
end = time.time()
sys.stderr.write("%ds " % round(end - start))
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_5_readv_seek(self, sftp):
kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "wb") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "rb") as f:
base_offset = (512 * 1024) + 17 * random.randint(
1000, 2000
)
# make a bunch of offsets and put them in random order
offsets = [base_offset + j * chunk for j in range(100)]
readv_list = []
for j in range(100):
o = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(o)
readv_list.append((o, chunk))
ret = f.readv(readv_list)
for i in range(len(readv_list)):
offset = readv_list[i][0]
n_offset = offset % 1024
assert next(ret) == k2blob[n_offset : n_offset + chunk]
end = time.time()
sys.stderr.write("%ds " % round(end - start))
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_6_lots_of_prefetching(self, sftp):
"""
prefetch a 1MB file a bunch of times, discarding the file object
without using it, to verify that paramiko doesn't get confused.
"""
kblob = 1024 * b"x"
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "w") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
for i in range(10):
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "r") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "r") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
for n in range(1024):
data = f.read(1024)
assert data == kblob
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_7_prefetch_readv(self, sftp):
"""
verify that prefetch and readv don't conflict with each other.
"""
kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "wb") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "rb") as f:
file_size = f.stat().st_size
f.prefetch(file_size)
data = f.read(1024)
assert data == kblob
chunk_size = 793
base_offset = 512 * 1024
k2blob = kblob + kblob
chunks = [
(base_offset + (chunk_size * i), chunk_size)
for i in range(20)
]
for data in f.readv(chunks):
offset = base_offset % 1024
assert chunk_size == len(data)
assert k2blob[offset : offset + chunk_size] == data
base_offset += chunk_size
sys.stderr.write(" ")
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_8_large_readv(self, sftp):
"""
verify that a very large readv is broken up correctly and still
returned as a single blob.
"""
kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
try:
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "wb") as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write(".")
sys.stderr.write(" ")
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
with sftp.open("%s/hongry.txt" % sftp.FOLDER, "rb") as f:
data = list(f.readv([(23 * 1024, 128 * 1024)]))
assert len(data) == 1
data = data[0]
assert len(data) == 128 * 1024
sys.stderr.write(" ")
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_9_big_file_big_buffer(self, sftp):
"""
write a 1MB file, with no linefeeds, and a big buffer.
"""
mblob = 1024 * 1024 * "x"
try:
with sftp.open(
"%s/hongry.txt" % sftp.FOLDER, "w", 128 * 1024
) as f:
f.write(mblob)
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
def test_A_big_file_renegotiate(self, sftp):
"""
write a 1MB file, forcing key renegotiation in the middle.
"""
t = sftp.sock.get_transport()
t.packetizer.REKEY_BYTES = 512 * 1024
k32blob = 32 * 1024 * "x"
try:
with sftp.open(
"%s/hongry.txt" % sftp.FOLDER, "w", 128 * 1024
) as f:
for i in range(32):
f.write(k32blob)
assert (
sftp.stat("%s/hongry.txt" % sftp.FOLDER).st_size == 1024 * 1024
)
assert t.H != t.session_id
# try to read it too.
with sftp.open(
"%s/hongry.txt" % sftp.FOLDER, "r", 128 * 1024
) as f:
file_size = f.stat().st_size
f.prefetch(file_size)
total = 0
while total < 1024 * 1024:
total += len(f.read(32 * 1024))
finally:
sftp.remove("%s/hongry.txt" % sftp.FOLDER)
t.packetizer.REKEY_BYTES = pow(2, 30)
| 36.419098
| 88
| 0.46992
|
c70f29ef60251f0c0efd6ef145b03ad87d0b39f1
| 1,704
|
py
|
Python
|
enterprise_extensions/frequentist/chi_squared.py
|
achalumeau/enterprise_extensions
|
1b12f97c1be0edbc4c8431e6fa4d893970145cbf
|
[
"MIT"
] | null | null | null |
enterprise_extensions/frequentist/chi_squared.py
|
achalumeau/enterprise_extensions
|
1b12f97c1be0edbc4c8431e6fa4d893970145cbf
|
[
"MIT"
] | null | null | null |
enterprise_extensions/frequentist/chi_squared.py
|
achalumeau/enterprise_extensions
|
1b12f97c1be0edbc4c8431e6fa4d893970145cbf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.linalg as sl
def get_chi2(pta, xs):
"""Compute generalize chisq for pta:
chisq = y^T (N + F phi F^T)^-1 y
= y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
"""
params = xs if isinstance(xs, dict) else pta.map_params(xs)
# chisq = y^T (N + F phi F^T)^-1 y
# = y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
TNrs = pta.get_TNr(params)
TNTs = pta.get_TNT(params)
phiinvs = pta.get_phiinv(params, logdet=True, method='cliques')
chi2 = np.sum(ell[0] for ell in pta.get_rNr_logdet(params))
if pta._commonsignals:
raise NotImplementedError("get_chi2 does not support correlated signals")
else:
for TNr, TNT, pl in zip(TNrs, TNTs, phiinvs):
if TNr is None:
continue
phiinv, _ = pl
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
cf = sl.cho_factor(Sigma)
expval = sl.cho_solve(cf, TNr)
except sl.LinAlgError: # pragma: no cover
return -np.inf
chi2 = chi2 - np.dot(TNr, expval)
return chi2
def get_reduced_chi2(pta, xs):
"""
Compute Generalized Reduced Chi Square for PTA using degrees of freedom
(DOF), defined by dof= NTOAs - N Timing Parameters - N Model Params.
"""
keys = [ky for ky in pta._signal_dict.keys() if 'timing_model' in ky]
chi2 = get_chi2(pta, xs)
degs = np.array([pta._signal_dict[ky].get_basis().shape for ky in keys])
dof = np.sum(degs[:, 0]) - np.sum(degs[:, 1])
dof -= len(pta.param_names)
return chi2/dof
| 30.428571
| 81
| 0.567488
|
59f45df39cb05ca14445af9767a5be7c4128e067
| 5,544
|
py
|
Python
|
whirlybird/server/devices/battery_monitor.py
|
levisaya/whirlybird
|
9d65933f0e978aa9c907bc6dfa338afd28c4b972
|
[
"MIT"
] | null | null | null |
whirlybird/server/devices/battery_monitor.py
|
levisaya/whirlybird
|
9d65933f0e978aa9c907bc6dfa338afd28c4b972
|
[
"MIT"
] | null | null | null |
whirlybird/server/devices/battery_monitor.py
|
levisaya/whirlybird
|
9d65933f0e978aa9c907bc6dfa338afd28c4b972
|
[
"MIT"
] | null | null | null |
import time
import RPi.GPIO as GPIO
class BatteryMonitor(object):
"""
Device to read the voltage off an ADC.
Specifically used to monitor the lipo voltage.
"""
def __init__(self,
vin_max,
vin_min,
r1,
r2,
chip_select_pin=25,
clock_pin=8,
mosi_pin=24,
miso_pin=23):
"""
:param vin_max: The maximum input voltage. This voltage will be interpreted as 100%.
:param vin_min: The minimum input voltage. This voltage will be interpreted as 0%.
:param r1: The value of the vin to vout resistor, in ohms.
:param r2: The value of the vout to ground resistor, in ohms.
:param chip_select_pin: Chip enable pin number on the Pi (25 or 26)
:param clock_pin: Spi clock pin number on the Pi.
:param mosi_pin: MOSI pin number on the Pi.
:param miso_pin: MISO pin number on the Pi.
:return:
"""
self.chip_select_pin = chip_select_pin
self.clock_pin = clock_pin
self.mosi_pin = mosi_pin
self.miso_pin = miso_pin
self.r1 = r1
self.r2 = r2
# Setup the SPI GPIO pins.
GPIO.setmode(GPIO.BCM)
GPIO.setup(mosi_pin, GPIO.OUT)
GPIO.setup(miso_pin, GPIO.IN)
GPIO.setup(clock_pin, GPIO.OUT)
GPIO.setup(chip_select_pin, GPIO.OUT)
# Calculate the max and min voltages we should be seeing over the voltage drop.
# These values become 100% charge and 0% charge.
self.vout_max = (vin_max * r1) / (r1 + r2)
self.vout_min = (vin_min * r1) / (r1 + r2)
def _read_adc_bits(self, num_bits=12):
"""
Generator to pulse the clock and read a bit.
:param num_bits: The number of bits to read.
:return: Generator over the bits read.
"""
for _ in range(num_bits):
GPIO.output(self.clock_pin, True)
GPIO.output(self.clock_pin, False)
yield GPIO.input(self.miso_pin)
def read_adc(self, percentage=True):
"""
:param percentage: If true, the battery percentage is also returned.
:return: If percentage was False, a float representing the voltage in volts. If True, a tuple containing the
voltage and the percentage.
"""
# Initialize the clock to low, set the chip select low to select the ADC.
GPIO.output(self.chip_select_pin, True)
GPIO.output(self.clock_pin, False)
GPIO.output(self.chip_select_pin, False)
adc_command = ([1, 1, 0] if self.chip_select_pin == 25 else [1, 1, 1])
for bit in adc_command:
GPIO.output(self.mosi_pin, bit)
GPIO.output(self.clock_pin, True)
GPIO.output(self.clock_pin, False)
# Read 12 bits off the ADC.
adc_bits = [bit for bit in self._read_adc_bits()]
# Sum the bit values to get the read voltage.
# Note that we iterate up until the last bit, which is the null bit that isn't included in the value.
# The exponent is adjusted to match.
adc_voltage = sum([2**(len(adc_bits) - i - 2) * bit for i, bit in enumerate(adc_bits[:-1])])
# All done reading, flip chip select back high.
GPIO.output(self.chip_select_pin, True)
# Right now we adc_voltage is a integer ranging from 0 to 1023, mapping to the 0 to 3.3V output of the
# voltage divider. Convert back to a voltage.
read_voltage = adc_voltage * (3.3 / 1024.0)
# Reverse the voltage drop to determine vin from vout.
real_voltage = read_voltage * (self.r1 + self.r2) / self.r1
if percentage:
# Calculate the percentage with respect to the range vout_min to vout_max.
percentage = (read_voltage - self.vout_min) / (self.vout_max - self.vout_min) * 100
# Trim the percentage to 100 if we were over slightly.
if percentage > 100:
percentage = 100.0
return real_voltage, percentage
else:
return real_voltage
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Battery monitoring loop')
parser.add_argument('--vin-max', dest='vin_max', type=float, help='Maximum input voltage in volts.', required=True)
parser.add_argument('--vin-min', dest='vin_min', type=float, help='Minimum input voltage in volts.', required=True)
parser.add_argument('--r1', dest='r1', type=int, help='Resistor R1 value in ohms.', required=True)
parser.add_argument('--r2', dest='r2', type=int, help='Resistor R2 value in ohms.', required=True)
parser.add_argument('--chip-select-pin', dest='chip_select_pin', type=int,
help='Pi chip select pin.', required=True, choices=[25, 26])
parser.add_argument('--polling-period', dest='polling_period', type=float,
help='Time between readings, in seconds.', default=2.0)
args = parser.parse_args()
monitor = BatteryMonitor(args.vin_max,
args.vin_min,
args.r1,
args.r2,
args.chip_select_pin)
try:
while True:
# read the analog pin
read_adc = monitor.read_adc()
print(read_adc)
time.sleep(args.polling_period)
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
| 39.6
| 119
| 0.602633
|
d128a2f96f1afa225c64d5a14414ef590a7b2500
| 6,220
|
py
|
Python
|
pbt/members.py
|
SamuelMarks/pbt-keras
|
86506e9b7f01c73636cdc0cf442d6c2c1141dae1
|
[
"MIT"
] | 31
|
2019-07-29T09:20:41.000Z
|
2022-02-16T19:10:24.000Z
|
pbt/members.py
|
SamuelMarks/pbt-keras
|
86506e9b7f01c73636cdc0cf442d6c2c1141dae1
|
[
"MIT"
] | 7
|
2019-09-20T07:43:47.000Z
|
2022-02-10T00:33:12.000Z
|
pbt/members.py
|
SamuelMarks/pbt-keras
|
86506e9b7f01c73636cdc0cf442d6c2c1141dae1
|
[
"MIT"
] | 4
|
2019-10-05T17:27:58.000Z
|
2020-05-11T08:34:55.000Z
|
from collections import deque
import numpy as np
from pbt.hyperparameters import find_hyperparameters_layer, \
find_hyperparameters_model, Hyperparameter, FloatHyperparameter
class Member:
"""Population member.
Each member wraps an instance of a Keras model to tune. The member holds
references to the hyperparameters of this model, allowing it to change them.
Members of the same population are characterized by the behaviour of the
following methods: step, eval, ready, exploit and explore. If you think
the current implementation of these methods does not work for your problem,
just create a subclass and override them.
"""
def __init__(self, build_fn, steps_ready=None, tune_lr=False):
"""Creates a new population member.
Args:
build_fn (callable): a function that should construct, compile and
return a Keras model. At least one layer of the model should
hold a reference to a pbt.hyperparameters.Hyperparameter.
steps_ready: number of steps before the member is considered ready
to go through the exploit-and-explore process. Or 'None' if the
member should not explore new hyperparameters.
Raises:
ValueError: if the given model does not have at least one layer
holding a reference to a pbt.hyperparameters.Hyperparameter.
"""
self.model = build_fn()
self.steps_cycle = 0
self.step_ready = steps_ready
self.eval_loss = np.Inf
self.eval_metrics = [(metric.name, 0) for metric in self.model.metrics]
self.recent_losses = deque(maxlen=10)
self.hyperparameters = find_hyperparameters_model(self.model)
if tune_lr:
lr = FloatHyperparameter('lr', self.model.optimizer.lr)
self.hyperparameters.append(lr)
if not self.hyperparameters:
raise ValueError('The model has no hyperparameters to tune')
def loss_smoothed(self):
return sum(self.recent_losses) / len(self.recent_losses)
def step_on_batch(self, x, y):
"""Gradient descent update on a single batch of data.
Args:
x (numpy.ndarray): numpy array of training data.
y (numpy.ndarray): numpy array of target data.
Returns:
double: scalar train loss.
"""
scalars = self.model.train_on_batch(x, y)
train_loss, _ = scalars[0], scalars[1:]
self.steps_cycle += 1
return train_loss
def eval_on_batch(self, x, y):
"""Evaluates the model on a single batch of samples.
Args:
x (numpy.ndarray): numpy array of evaluation data.
y (numpy.ndarray): numpy array of target data.
Returns:
double: scalar evaluation loss.
"""
scalars = self.model.test_on_batch(x, y)
eval_loss, eval_metrics = scalars[0], scalars[1:]
self.eval_loss = eval_loss
for i, (metric, _) in enumerate(self.eval_metrics):
self.eval_metrics[i] = (metric, eval_metrics[i])
self.recent_losses.append(self.eval_loss)
self.steps_cycle += 1
return self.eval_loss
def test_on_batch(self, x, y):
return self.model.test_on_batch(x, y)
def ready(self):
"""Returns if the member of the population is considered ready to
go through the exploit-and-explore process.
Returns:
bool: True if this member is ready, False otherwise.
"""
if not self.step_ready or self.steps_cycle < self.step_ready:
return False
else:
self.steps_cycle = 0
return True
pass
def explore(self):
"""Randomly perturbs hyperparameters by a factor of 0.8 or 1.2.
"""
for h in self.hyperparameters:
h.perturb(None)
def exploit(self, population):
"""Truncation selection.
Ranks all the agents in the population by loss. If the current agent is
in the bottom 20% of the population, it samples another agent uniformly
from the top 20% of the population, and copies its weights and
hyperparameters.
Args:
population (List[Member]): entire population.
Returns:
True if the member was altered, False otherwise.
"""
losses = np.array([m.eval_loss for m in population])
# Lower is better. Top 20% means percentile 20 in losses
threshold_best, threshold_worst = np.percentile(losses, (20, 80))
if self.eval_loss > threshold_worst:
top_performers = [m for m in population
if m.eval_loss < threshold_best]
if top_performers:
self.replace_with(np.random.choice(top_performers))
return True
else:
return False
def replace_with(self, member):
"""Replaces the hyperparameters and weights of this member with the
hyperparameters and the weights of the given member.
Args:
member (Member): member to copy.
"""
assert len(self.hyperparameters) == len(member.hyperparameters), \
'Members do not belong to the same population!'
self.model.set_weights(member.model.get_weights())
for i, hyperparameter in enumerate(self.hyperparameters):
hyperparameter.replace_with(member.hyperparameters[i])
def get_hyperparameter_config(self):
config = {}
for idx, layer in enumerate(self.model.layers):
# layer_name = layer.get_config().get('name')
if isinstance(layer, Hyperparameter):
hyperparameters = [layer]
else:
hyperparameters = find_hyperparameters_layer(layer)
for h in hyperparameters:
for k, v in h.get_config().items():
config['{}:{}'.format(k, idx)] = v
for h in self.hyperparameters:
if isinstance(h, FloatHyperparameter):
config.update(h.get_config())
return config
def __str__(self):
return str(id(self))
| 34.555556
| 80
| 0.623151
|
19dbe065aba5f910940f9497d317b24c35de905f
| 1,588
|
py
|
Python
|
api/pokeme/commands.py
|
bahattincinic/pokeme
|
ec8b63842fbe6aa4512704f304af91cd16c97ea8
|
[
"MIT"
] | 1
|
2020-10-12T07:14:28.000Z
|
2020-10-12T07:14:28.000Z
|
api/pokeme/commands.py
|
bahattincinic/pokeme
|
ec8b63842fbe6aa4512704f304af91cd16c97ea8
|
[
"MIT"
] | null | null | null |
api/pokeme/commands.py
|
bahattincinic/pokeme
|
ec8b63842fbe6aa4512704f304af91cd16c97ea8
|
[
"MIT"
] | null | null | null |
import datetime
import schedule
import time
import pytz
from functools import partial
from apistar import Command
from apistar.backends.sqlalchemy_backend import (
create_tables, drop_tables, SQLAlchemyBackend
)
from .models import Note, User
from .utils import send_push_notification
from .settings import settings
def check_notification(session):
print("Check Notification Schedule...")
now = datetime.datetime.now(
pytz.timezone(settings['TIMEZONE'])
).strftime('%Y-%m-%d %H:%M:%S')
notes = session.query(Note).join(User, User.id == Note.user_id).filter(
Note.is_notification_send is False,
Note.reminder_date.isnot(None),
Note.reminder_date < now
)
for note in notes.all():
status = send_push_notification(
title=note.title,
text=f"Pokeme {note.title} notification",
device_token=note.device_token,
credential=settings['FIREBASE_TOKEN']
)
if status:
note.is_notification_send = True
session.commit()
print("%s note notification was sent" % note.id)
def check_schedule(backend: SQLAlchemyBackend):
session = backend.Session()
try:
schedule.every(1).minutes.do(partial(
check_notification, session=session
))
while True:
schedule.run_pending()
time.sleep(1)
finally:
session.close()
commands = [
Command('schedule', check_schedule),
Command('create_tables', create_tables),
Command('drop_tables', drop_tables)
]
| 24.8125
| 75
| 0.653652
|
9df5a0d44f0918c075c982605d430047f4b9a6ca
| 25,881
|
py
|
Python
|
trackintel_modified.py
|
dlaumer/ipa2020
|
ca71bc5c38d4a08543b12715968b31eb067948e9
|
[
"MIT"
] | null | null | null |
trackintel_modified.py
|
dlaumer/ipa2020
|
ca71bc5c38d4a08543b12715968b31eb067948e9
|
[
"MIT"
] | null | null | null |
trackintel_modified.py
|
dlaumer/ipa2020
|
ca71bc5c38d4a08543b12715968b31eb067948e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Trackintel
This file is mostly refered to the trackintel package of MIE lab except
1 - a maximum time threshold is added to detect stay points in extract_staypoints_ipa
2 - the final stay point is treated as the stay point if it meets the minimum time threshold in extract_staypoints_ipa
"""
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import LineString
from shapely.geometry import Point
from sklearn.cluster import DBSCAN
from trackintel.geogr.distances import haversine_dist
def extract_staypoints_ipa(positionfixes, method='sliding',
dist_threshold=50, time_threshold=5 * 60, timemax_threshold=12 * 3600,
epsilon=100, dist_func=haversine_dist, eps=None, num_samples=None):
"""Extract staypoints from positionfixes.
This function modifies the positionfixes and adds staypoint_ids.
Parameters
----------
num_samples
eps
positionfixes : GeoDataFrame
The positionfixes have to follow the standard definition for positionfixes DataFrames.
method : {'sliding' or 'dbscan'}
The following methods are available to extract staypoints from positionfixes:
'sliding' : Applies a sliding window over the data.
'dbscan' : Uses the DBSCAN algorithm to find clusters of staypoints.
dist_threshold : float
The distance threshold for the 'sliding' method, i.e., how far someone has to travel to
generate a new staypoint.
time_threshold : float
The time threshold for the 'sliding' method in seconds, i.e., how long someone has to
stay within an area to consider it as a staypoint.
epsilon : float
The epsilon for the 'dbscan' method.
dist_func : function
A function that expects (lon_1, lat_1, lon_2, lat_2) and computes a distance in meters.
Returns
-------
GeoDataFrame
A new GeoDataFrame containing points where a person spent some time.
Examples
--------
>>> psfs.as_positionfixes.extract_staypoints('sliding', dist_threshold=100)
References
----------
Zheng, Y. (2015). Trajectory data mining: an overview. ACM Transactions on Intelligent Systems
and Technology (TIST), 6(3), 29.
Li, Q., Zheng, Y., Xie, X., Chen, Y., Liu, W., & Ma, W. Y. (2008, November). Mining user
similarity based on location history. In Proceedings of the 16th ACM SIGSPATIAL international
conference on Advances in geographic information systems (p. 34). ACM.
"""
if 'id' not in positionfixes.columns:
positionfixes['id'] = positionfixes.index
ret_staypoints = pd.DataFrame(columns=['started_at', 'finished_at', 'geom', 'id'])
if method == 'sliding':
# Algorithm from Li et al. (2008). For details, please refer to the paper.
staypoint_id_counter = 0
positionfixes['staypoint_id'] = -1 # this marks all that are not part of a SP
for user_id_this in positionfixes['user_id'].unique():
positionfixes_user_this = positionfixes.loc[
positionfixes['user_id'] == user_id_this] # this is no copy
pfs = positionfixes_user_this.sort_values('tracked_at').to_dict('records')
num_pfs = len(pfs)
posfix_staypoint_matching = {}
i = 0
j = 0 # is zero because it gets incremented in the beginning
while i < num_pfs:
if j == num_pfs:
# We're at the end, this can happen if in the last "bin",
# the dist_threshold is never crossed anymore.
break
else:
j = i + 1
while j < num_pfs:
dist = haversine_dist(pfs[i]['geom'].x, pfs[i]['geom'].y,
pfs[j]['geom'].x, pfs[j]['geom'].y)
if (dist > dist_threshold):
delta_t = (pfs[j]['tracked_at'] - pfs[i]['tracked_at']).total_seconds()
# Compare with the maximum time threshold
if (delta_t > time_threshold):
if (delta_t > timemax_threshold):
hrdiff = []
hrsum = 0
for x in range(i,j):
hrdiff.append((pfs[x+1]['tracked_at']-pfs[x]['tracked_at']).total_seconds())
i0 = i
for mid in range(0,j-i0):
hrsum += hrdiff[mid]
if(hrsum > timemax_threshold or mid == j-i0-1):
staypoint = {}
staypoint['user_id'] = pfs[i]['user_id']
staypoint['geom'] = Point(np.mean([pfs[k]['geom'].x for k in range(i, i0+mid+1)]),
np.mean([pfs[k]['geom'].y for k in range(i, i0+mid+1)]))
if 'elevation' in pfs[i].keys():
staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, i0+mid+1)])
if 'velocity' in pfs[i].keys():
staypoint['velocity'] = np.mean([pfs[k]['velocity'] for k in range(i, i0+mid+1)])
staypoint['started_at'] = pfs[i]['tracked_at']
staypoint['finished_at'] = pfs[i0+mid+1]['tracked_at'] # TODO: should this not be j-1? because j is not part of the staypoint. DB: Changed
staypoint['id'] = staypoint_id_counter
# store matching
posfix_staypoint_matching[staypoint_id_counter] = [pfs[k]['id'] for k in range(i, i0+mid+1)]
staypoint_id_counter += 1
# add staypoint
ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)
i = i0+mid+1
hrsum = 0
else:
staypoint = {}
staypoint['user_id'] = pfs[i]['user_id']
staypoint['geom'] = Point(np.mean([pfs[k]['geom'].x for k in range(i, j)]),
np.mean([pfs[k]['geom'].y for k in range(i, j)]))
if 'elevation' in pfs[i].keys():
staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, j)])
if 'velocity' in pfs[i].keys():
staypoint['velocity'] = np.mean([pfs[k]['velocity'] for k in range(i, j)])
staypoint['started_at'] = pfs[i]['tracked_at']
staypoint['finished_at'] = pfs[j]['tracked_at'] # TODO: should this not be j-1? because j is not part of the staypoint. DB: Changed
staypoint['id'] = staypoint_id_counter
# store matching
posfix_staypoint_matching[staypoint_id_counter] = [pfs[k]['id'] for k in range(i, j)]
staypoint_id_counter += 1
# add staypoint
ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)
# TODO Discussion: Is this last point really a staypoint? As we don't know if the
# person "moves on" afterwards...
i = j
break
# If the last point meets the minimum time threshold, then it is added to the stay point
if (j == num_pfs - 1):
delta_t = (pfs[j]['tracked_at'] - pfs[i]['tracked_at']).total_seconds()
if (delta_t > time_threshold):
if (delta_t > timemax_threshold):
hrdiff = []
hrsum = 0
for x in range(i,j):
hrdiff.append((pfs[x+1]['tracked_at']-pfs[x]['tracked_at']).total_seconds())
i0 = i
for mid in range(0,j-i0):
hrsum += hrdiff[mid]
if(hrsum > timemax_threshold or mid == j-i0-1):
staypoint = {}
staypoint['user_id'] = pfs[i]['user_id']
staypoint['geom'] = Point(np.mean([pfs[k]['geom'].x for k in range(i, i0+mid+1)]),
np.mean([pfs[k]['geom'].y for k in range(i, i0+mid+1)]))
if 'elevation' in pfs[i].keys():
staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, i0+mid+1)])
if 'velocity' in pfs[i].keys():
staypoint['velocity'] = np.mean([pfs[k]['velocity'] for k in range(i, i0+mid+1)])
staypoint['started_at'] = pfs[i]['tracked_at']
staypoint['finished_at'] = pfs[i0+mid+1]['tracked_at'] # TODO: should this not be j-1? because j is not part of the staypoint. DB: Changed
staypoint['id'] = staypoint_id_counter
# store matching
posfix_staypoint_matching[staypoint_id_counter] = [pfs[k]['id'] for k in range(i, i0+mid+1)]
staypoint_id_counter += 1
# add staypoint
ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)
i = i0+mid+1
hrsum = 0
else:
staypoint = {}
staypoint['user_id'] = pfs[i]['user_id']
staypoint['geom'] = Point(np.mean([pfs[k]['geom'].x for k in range(i, j+1)]),
np.mean([pfs[k]['geom'].y for k in range(i, j+1)]))
if 'elevation' in pfs[i].keys():
staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, j+1)])
if 'velocity' in pfs[i].keys():
staypoint['velocity'] = np.mean([pfs[k]['velocity'] for k in range(i, j+1)])
staypoint['started_at'] = pfs[i]['tracked_at']
staypoint['finished_at'] = pfs[j]['tracked_at'] # TODO: should this not be j-1? because j is not part of the staypoint. DB: Changed.
staypoint['id'] = staypoint_id_counter
# store matching
posfix_staypoint_matching[staypoint_id_counter] = [pfs[k]['id'] for k in range(i, j+1)]
# posfix_staypoint_matching[staypoint_id_counter] = [
# j] # rather [k for k in range(i, j)]?
staypoint_id_counter += 1
ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)
j = j + 1
# add matching to original positionfixes (for every user)
for staypoints_id, posfix_idlist in posfix_staypoint_matching.items():
# note that we use .loc because above we have saved the id
# of the positionfixes not thier absolut position
positionfixes.loc[posfix_idlist, 'staypoint_id'] = staypoints_id
elif method == 'dbscan':
# TODO: Make sure time information is included in the clustering!
# time information is in the column 'started at', however the user should be able to
# adjust the distance metric e.g. chebychev
db = DBSCAN(eps=epsilon, min_samples=num_samples)
for user_id_this in positionfixes['user_id'].unique():
user_positionfixes = positionfixes[positionfixes['user_id'] == user_id_this] # this is not a copy!
# TODO: enable transformations to temporary (metric) system
transform_crs = None
if transform_crs is not None:
pass
# get staypoint matching
coordinates = np.array([[g.x, g.y] for g in user_positionfixes['geom']])
labels = db.fit_predict(coordinates)
# add positionfixes - staypoint matching to original positionfixes
positionfixes.loc[user_positionfixes.index, 'staypoint_id'] = labels
# create staypoints as the center of the grouped positionfixes
grouped_df = positionfixes.groupby(['user_id', 'staypoint_id'])
for combined_id, group in grouped_df:
user_id, staypoint_id = combined_id
if int(staypoint_id) != -1:
staypoint = {}
staypoint['user_id'] = user_id
staypoint['id'] = staypoint_id
# point geometry of staypoint
staypoint['geom'] = Point(group.geometry.x.mean(),
group.geometry.y.mean())
ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)
ret_staypoints = gpd.GeoDataFrame(ret_staypoints, geometry='geom',
crs=positionfixes.crs)
ret_staypoints['id'] = ret_staypoints['id'].astype('int')
return ret_staypoints
def extract_triplegs_ipa(positionfixes, staypoints=None, *args, **kwargs):
"""Extract triplegs from positionfixes. A tripleg is (for now) defined as anything
that happens between two consecutive staypoints.
**Attention**: This function requires either a column ``staypoint_id`` on the
positionfixes or passing some staypoints that correspond to the positionfixes!
This means you usually should call ``extract_staypoints()`` first.
This function modifies the positionfixes and adds a ``tripleg_id``.
Parameters
----------
positionfixes : GeoDataFrame
The positionfixes have to follow the standard definition for positionfixes DataFrames.
staypoints : GeoDataFrame, optional
The staypoints (corresponding to the positionfixes). If this is not passed, the
positionfixes need staypoint_ids associated with them.
Returns
-------
GeoDataFrame
A new GeoDataFrame containing triplegs.
Examples
--------
>>> psfs.as_positionfixes.extract_triplegs(staypoints)
"""
# Check that data adheres to contract.
if staypoints is None and len(positionfixes['staypoint_id'].unique()) < 2:
raise ValueError("If staypoints is not defined, positionfixes must have more than 1 staypoint_id.")
# if staypoints is not None:
# raise NotImplementedError("Splitting up positionfixes by timestamp is not available yet. " + \
# "Use extract_staypoints and the thus generated staypoint_ids.")
ret_triplegs = pd.DataFrame(columns=['id', 'user_id', 'started_at', 'finished_at', 'geom'])
curr_tripleg_id = 0
# Do this for each user.
for user_id_this in positionfixes['user_id'].unique():
positionfixes_user_this = positionfixes.loc[
positionfixes['user_id'] == user_id_this] # this is no copy
pfs = positionfixes_user_this.sort_values('tracked_at')
generated_triplegs = []
# Case 1: Staypoints exist and are connected to positionfixes by user id
if staypoints is not None and "staypoint_id" in pfs:
stps = staypoints.loc[staypoints['user_id'] == user_id_this].sort_values('started_at')
stps = stps.to_dict('records')
for stp1, stp2 in zip(list(stps), list(stps)[1:]):
# Get all positionfixes that lie between these two staypoints.
# get the last posfix of the first staypoint
index_first_posfix_tl = pfs[pfs.staypoint_id == stp1['id']].index[-1]
position_first_posfix_tl = pfs.index.get_loc(index_first_posfix_tl)
# get first posfix of the second staypoint
index_last_posfix_tl = pfs[pfs.staypoint_id == stp2['id']].index[0]
position_last_posfix_tl = pfs.index.get_loc(index_last_posfix_tl)
pfs_tripleg = pfs.iloc[position_first_posfix_tl:position_last_posfix_tl + 1]
# include every positionfix that brings you closer to the center
# of the staypoint
posfix_before, started_at = propagate_tripleg(pfs, stp1, position_first_posfix_tl, direction=-1)
posfix_before = posfix_before[::-1]
# add geometry of staypoint and correct the direction
posfix_after, finished_at = propagate_tripleg(pfs, stp2, position_last_posfix_tl, direction=1)
coords = list(pfs_tripleg['geom'].apply(lambda r: (r.x, r.y)))
coords = posfix_before + coords + posfix_after
if len(coords) > 1:
generated_triplegs.append({
'id': curr_tripleg_id,
'user_id': user_id_this,
'started_at': started_at, # pfs_tripleg['tracked_at'].iloc[0],
'finished_at': finished_at, # pfs_tripleg['tracked_at'].iloc[-1],
'geom': LineString(coords),
'start_stp': stp1['id'],
'end_stp': stp2['id']
})
curr_tripleg_id += 1
# Case 2: Staypoints exist but there is no user_id given
# TODO Not so efficient, always matching on the time (as things are sorted anyways).
elif staypoints is not None:
stps = staypoints.loc[staypoints['user_id'] == user_id_this].sort_values('started_at')
stps = stps.to_dict('records')
for stp1, stp2 in zip(list(stps), list(stps)[1:]):
# Get all positionfixes that lie between these two staypoints.
pfs_tripleg = pfs[(stp1['finished_at'] <= pfs['tracked_at']) & \
(pfs['tracked_at'] <= stp2['started_at'])].sort_values('tracked_at')
coords = list(pfs_tripleg['geom'].apply(lambda r: (r.x, r.y)))
if len(coords) > 1:
generated_triplegs.append({
'id': curr_tripleg_id,
'user_id': user_id_this,
'started_at': pfs_tripleg['tracked_at'].iloc[0],
'finished_at': pfs_tripleg['tracked_at'].iloc[-1],
'geom': LineString(list(pfs_tripleg['geom'].apply(lambda r: (r.x, r.y))))
})
curr_tripleg_id += 1
# case 3: Only positionfixes with staypoint id for tripleg generation
else:
prev_pf = None
curr_tripleg = {
'id': curr_tripleg_id,
'user_id': user_id_this,
'started_at': pfs['tracked_at'].iloc[0],
'finished_at': None,
'coords': []
}
for idx, pf in pfs.iterrows():
if prev_pf is not None and prev_pf['staypoint_id'] == -1 and pf['staypoint_id'] != -1:
# This tripleg ends.
pfs.loc[idx, 'tripleg_id'] = curr_tripleg_id
curr_tripleg['finished_at'] = pf['tracked_at']
curr_tripleg['coords'].append((pf['geom'].x, pf['geom'].y))
elif (prev_pf is not None and prev_pf['staypoint_id'] != -1 and pf['staypoint_id'] == -1):
# A new tripleg starts (due to a staypoint_id switch from -1 to x).
if len(curr_tripleg['coords']) > 1:
curr_tripleg['geom'] = LineString(curr_tripleg['coords'])
del curr_tripleg['coords']
generated_triplegs.append(curr_tripleg)
curr_tripleg_id += 1
curr_tripleg = {'id': curr_tripleg_id, 'user_id': user_id_this, 'started_at': None,
'finished_at': None, 'coords': []}
prev_pf['tripleg_id'] = curr_tripleg_id
pfs.loc[idx, 'tripleg_id'] = curr_tripleg_id
curr_tripleg['started_at'] = pf['tracked_at']
curr_tripleg['coords'].append((pf['geom'].x, pf['geom'].y))
elif prev_pf is not None and prev_pf['staypoint_id'] != -1 and \
pf['staypoint_id'] != -1 and prev_pf['staypoint_id'] != pf['staypoint_id']:
# A new tripleg starts (due to a staypoint_id switch from x to y).
pfs.loc[idx, 'tripleg_id'] = curr_tripleg_id
curr_tripleg['finished_at'] = pf['tracked_at']
curr_tripleg['coords'].append((pf['geom'].x, pf['geom'].y))
if len(curr_tripleg['coords']) > 1:
curr_tripleg['geom'] = LineString(curr_tripleg['coords'])
del curr_tripleg['coords']
generated_triplegs.append(curr_tripleg)
curr_tripleg_id += 1
curr_tripleg = {
'id': curr_tripleg_id,
'user_id': user_id_this,
'started_at': None,
'finished_at': None,
'coords': []
}
prev_pf['tripleg_id'] = curr_tripleg_id
pfs.loc[idx, 'tripleg_id'] = curr_tripleg_id
curr_tripleg['started_at'] = pf['tracked_at']
curr_tripleg['coords'].append((pf['geom'].x, pf['geom'].y))
elif prev_pf is not None and prev_pf['staypoint_id'] != -1 and \
prev_pf['staypoint_id'] == pf['staypoint_id']:
# This is still "at the same staypoint". Do nothing.
pass
else:
pfs.loc[idx, 'tripleg_id'] = curr_tripleg_id
curr_tripleg['coords'].append((pf['geom'].x, pf['geom'].y))
prev_pf = pf
ret_triplegs = ret_triplegs.append(generated_triplegs)
ret_triplegs = gpd.GeoDataFrame(ret_triplegs, geometry='geom', crs=positionfixes.crs)
ret_triplegs['id'] = ret_triplegs['id'].astype('int')
return ret_triplegs
def propagate_tripleg(pfs, stp, position_edge_posfix_tl, direction=1):
# propagate backwards at start
posfix_to_add = []
i = direction
if (position_edge_posfix_tl + i) >= len(pfs) or (position_edge_posfix_tl + i) < 0:
return posfix_to_add, pfs.iloc[position_edge_posfix_tl].tracked_at
geom_stp = stp['geom']
geom_edge_posfix_tl = pfs.iloc[position_edge_posfix_tl].geom
geom_candidate_posfix = pfs.iloc[position_edge_posfix_tl + i].geom
dist_edge_psf_stp = geom_stp.distance(geom_edge_posfix_tl)
# new posfix must be closer to the center of the staypoint then the current one
cond1 = (geom_stp.distance(geom_candidate_posfix) < dist_edge_psf_stp)
# new posfix must be closer then the center of the staypoint to qualify
cond2 = (geom_edge_posfix_tl.distance(geom_candidate_posfix) < dist_edge_psf_stp)
closer = cond1 and cond2
while (closer):
if (position_edge_posfix_tl + i + direction) > len(pfs) or (position_edge_posfix_tl + i + direction) < 0:
break
# insert new posfix
posfix_to_add.append((geom_candidate_posfix.x, geom_candidate_posfix.y))
# update variables
geom_edge_posfix_tl = pfs.iloc[position_edge_posfix_tl + i,:].geom
i = i + direction
geom_candidate_posfix = pfs.iloc[position_edge_posfix_tl + i,:].geom
# update closer
dist_edge_psf_stp = geom_stp.distance(geom_edge_posfix_tl)
cond1 = (geom_stp.distance(geom_candidate_posfix) < dist_edge_psf_stp)
cond2 = (geom_edge_posfix_tl.distance(geom_candidate_posfix) < dist_edge_psf_stp)
closer = cond1 and cond2
tracked_at = pfs.iloc[position_edge_posfix_tl + i].tracked_at
# posfix_to_add.append((geom_stp.x, geom_stp.y))
return posfix_to_add, tracked_at
| 53.583851
| 198
| 0.520575
|
abd466032857caa3c62309be4570b75b23f3e595
| 1,829
|
py
|
Python
|
test/data/statshelp.py
|
jackscott/python-statlib
|
168d5b9c879cf64ce93862aadff4c9e0253f007c
|
[
"MIT"
] | null | null | null |
test/data/statshelp.py
|
jackscott/python-statlib
|
168d5b9c879cf64ce93862aadff4c9e0253f007c
|
[
"MIT"
] | null | null | null |
test/data/statshelp.py
|
jackscott/python-statlib
|
168d5b9c879cf64ce93862aadff4c9e0253f007c
|
[
"MIT"
] | 1
|
2019-12-19T14:31:52.000Z
|
2019-12-19T14:31:52.000Z
|
from statlib.stats import *
from statlib import pstat
def dopaired(x,y):
"""\nRuns all paired stats that are possible to run on the data provided.
Assumes 2 columns to the input data.
Format: dopaired(x,y)
Returns: appropriate statistcs\n"""
t,p = ttest_ind(x,y)
print '\nAssuming 2 independent samples ...'
print 'Independent samples t-test: ', round(t,3),round(p,3)
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nAssuming 2 independent samples, NONPARAMETRIC, n>20 ...'
print 'Rank Sums test: ', round(z,3),round(p,3)
else:
u,p = mannwhitneyu(x,y)
print '\nAssuming 2 independent samples, NONPARAMETRIC, ns<20 ...'
print 'Mann-Whitney U-test: ', round(u,3),round(p,3)
if len(pstat.unique(x))==2:
r,p = pointbiserialr(x,y)
print '\nIf x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,3),round(p,3)
if len(x) == len(y):
t, p = ttest_rel(x,y)
print '\nAssuming 2 related samples ...'
print 'Related samples t-test: ', round(t,3),round(p,3)
t, p = wilcoxont(x,y)
print '\nAssuming 2 related samples, NONPARAMETRIC ...'
print 'Wilcoxon T-test: ', round(t,3),round(p,3)
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
print 'Slope,intercept,r,prob,stderr estimate: ',round(m,3),round(b,3),round(r,3),round(p,3),round(see,3)
r,p = pearsonr(x,y)
print '\nCorrelation for continuous variables ...'
print "Pearson's r: ",round(r,3),round(p,3)
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ", round(r,3), round(p,3)
| 36.58
| 114
| 0.581739
|
03b34cf4e36df6fa5c2fba716aff62516f5928a2
| 2,618
|
py
|
Python
|
Tools/LyTestTools/tests/unit/test_launcher_mac.py
|
BreakerOfThings/o3de
|
f4c59f868c726470ec910623facd836047d059c3
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-03-28T08:06:58.000Z
|
2022-03-28T08:06:58.000Z
|
Tools/LyTestTools/tests/unit/test_launcher_mac.py
|
BreakerOfThings/o3de
|
f4c59f868c726470ec910623facd836047d059c3
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
Tools/LyTestTools/tests/unit/test_launcher_mac.py
|
BreakerOfThings/o3de
|
f4c59f868c726470ec910623facd836047d059c3
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Unit Tests for mac launcher-wrappers: all are sanity code-path tests, since no interprocess actions should be taken
"""
import os
import pytest
import unittest.mock as mock
import ly_test_tools.launchers
pytestmark = pytest.mark.SUITE_smoke
class TestMacLauncher(object):
def test_Construct_TestDoubles_MacLauncherCreated(self):
under_test = ly_test_tools.launchers.MacLauncher(mock.MagicMock(), ["some_args"])
assert isinstance(under_test, ly_test_tools.launchers.Launcher)
assert isinstance(under_test, ly_test_tools.launchers.MacLauncher)
def test_BinaryPath_DummyPath_AddPathToApp(self):
dummy_path = "dummy_workspace_path"
dummy_project = "dummy_project"
mock_workspace = mock.MagicMock()
mock_workspace.paths.build_directory.return_value = dummy_path
mock_workspace.project = dummy_project
launcher = ly_test_tools.launchers.MacLauncher(mock_workspace, ["some_args"])
under_test = launcher.binary_path()
expected = os.path.join(f'{dummy_path}',
f"{dummy_project}.GameLauncher.app",
"Contents",
"MacOS",
f"{dummy_project}.GameLauncher")
assert under_test == expected
@mock.patch('ly_test_tools.launchers.MacLauncher.binary_path', mock.MagicMock)
@mock.patch('subprocess.Popen')
def test_Launch_DummyArgs_ArgsPassedToPopen(self, mock_subprocess):
dummy_args = ["some_args"]
launcher = ly_test_tools.launchers.MacLauncher(mock.MagicMock(), dummy_args)
launcher.launch()
mock_subprocess.assert_called_once()
name, args, kwargs = mock_subprocess.mock_calls[0]
unpacked_args = args[0] # args is a list inside a tuple
assert len(dummy_args) > 0, "accidentally removed dummy_args"
for expected_arg in dummy_args:
assert expected_arg in unpacked_args
@mock.patch('ly_test_tools.launchers.MacLauncher.is_alive')
def test_Kill_MockAliveFalse_SilentSuccess(self, mock_alive):
mock_alive.return_value = False
mock_proc = mock.MagicMock()
launcher = ly_test_tools.launchers.MacLauncher(mock.MagicMock(), ["dummy"])
launcher._proc = mock_proc
launcher.stop()
mock_proc.kill.assert_called_once()
mock_alive.assert_called()
| 37.942029
| 115
| 0.695569
|
aa00cf234d9ee69b9ec95ab6020b59348f174aa4
| 94,602
|
py
|
Python
|
pandas/tests/frame/test_constructors.py
|
big-o/pandas
|
a2178065fd5eecc2562f8c3725ac09573b687022
|
[
"BSD-3-Clause"
] | 2
|
2019-11-13T18:20:29.000Z
|
2020-04-18T02:58:39.000Z
|
pandas/tests/frame/test_constructors.py
|
big-o/pandas
|
a2178065fd5eecc2562f8c3725ac09573b687022
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/frame/test_constructors.py
|
big-o/pandas
|
a2178065fd5eecc2562f8c3725ac09573b687022
|
[
"BSD-3-Clause"
] | 1
|
2020-01-31T14:26:04.000Z
|
2020-01-31T14:26:04.000Z
|
from collections import OrderedDict, abc
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pytest
from pandas.compat import PY36, is_platform_little_endian
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
RangeIndex,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameConstructors:
@pytest.mark.parametrize(
"constructor",
[
lambda: DataFrame(),
lambda: DataFrame(None),
lambda: DataFrame({}),
lambda: DataFrame(()),
lambda: DataFrame([]),
lambda: DataFrame((_ for _ in [])),
lambda: DataFrame(range(0)),
lambda: DataFrame(data=None),
lambda: DataFrame(data={}),
lambda: DataFrame(data=()),
lambda: DataFrame(data=[]),
lambda: DataFrame(data=(_ for _ in [])),
lambda: DataFrame(data=range(0)),
],
)
def test_empty_constructor(self, constructor):
expected = DataFrame()
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"emptylike,expected_index,expected_columns",
[
([[]], RangeIndex(1), RangeIndex(0)),
([[], []], RangeIndex(2), RangeIndex(0)),
([(_ for _ in [])], RangeIndex(1), RangeIndex(0)),
],
)
def test_emptylike_constructor(self, emptylike, expected_index, expected_columns):
expected = DataFrame(index=expected_index, columns=expected_columns)
result = DataFrame(emptylike)
tm.assert_frame_equal(result, expected)
def test_constructor_mixed(self, float_string_frame):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert float_string_frame["foo"].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64)
assert foo["a"].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df["foo"] = np.ones((4, 2)).tolist()
# this is not ok
msg = "Wrong number of items passed 2, placement implies 1"
with pytest.raises(ValueError, match=msg):
df["test"] = np.ones((4, 2))
# this is ok
df["foo2"] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({"col1": [1.0], "col2": [2.0], "col3": [3.0]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df["col1"] = 200.0
assert orig_df["col1"][0] == 1.0
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, "2"], [None, "a"]], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == "2"
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame()])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=np.arange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == "int":
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d) for d in dtypes]
elif typ == "float":
dtypes = MIXED_FLOAT_DTYPES
arrays = [
np.array(np.random.randint(10, size=10), dtype=d) for d in dtypes
]
for d, a in zip(dtypes, arrays):
assert a.dtype == d
if ad is None:
ad = dict()
ad.update({d: a for d, a in zip(dtypes, arrays)})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert df.dtypes[d] == d
# mixed floating and integer coexist in the same frame
df = _make_mixed_dtypes_df("float")
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df("float", dict(A=1, B="foo", C="bar"))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df("int")
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({"a": a, "b": b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({"A": ["x", None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({"A": ["x", np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self, float_frame):
rec = float_frame.to_records(index=False)
rec.dtype.names = list(rec.dtype.names)[::-1]
index = float_frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=["C", "B"])
expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64)
result = DataFrame({"a": values})
assert result["a"].dtype == np.uint64
# see gh-2355
data_scores = [
(6311132704823138710, 273),
(2685045978526272070, 23),
(8921811264899370420, 45),
(17019687244989530680, 270),
(9930107427299601010, 273),
]
dtype = [("uid", "u8"), ("score", "u8")]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls["uid"].dtype == np.uint64
@pytest.mark.parametrize(
"values",
[
np.array([2 ** 64], dtype=object),
np.array([2 ** 65]),
[2 ** 64 + 1],
np.array([-2 ** 63 - 4], dtype=object),
np.array([-2 ** 64 - 1]),
[-2 ** 65 - 2],
],
)
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = list(range(nitems))
random.shuffle(nums)
expected = ["A{i:d}".format(i=i) for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
datetime_series = tm.makeTimeSeries(nper=30)
# test expects index shifted by 5
datetime_series_short = tm.makeTimeSeries(nper=30)[5:]
frame = DataFrame({"col1": datetime_series, "col2": datetime_series_short})
# col2 is padded with NaN
assert len(datetime_series) == 30
assert len(datetime_series_short) == 25
tm.assert_series_equal(frame["col1"], datetime_series.rename("col1"))
exp = pd.Series(
np.concatenate([[np.nan] * 5, datetime_series_short.values]),
index=datetime_series.index,
name="col2",
)
tm.assert_series_equal(exp, frame["col2"])
frame = DataFrame(
{"col1": datetime_series, "col2": datetime_series_short},
columns=["col2", "col3", "col4"],
)
assert len(frame) == len(datetime_series_short)
assert "col1" not in frame
assert isna(frame["col3"]).all()
# Corner cases
assert len(DataFrame()) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
# Length-one dict micro-optimization
frame = DataFrame({"A": {"1": 1, "2": 2}})
tm.assert_index_equal(frame.index, pd.Index(["1", "2"]))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty dict with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({"A": [], "B": []}, columns=["A", "B"])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
assert frame_none._get_value(0, "a") is None
assert frame_none_list._get_value(0, "a") is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({"a": 0.7}, columns=["a"])
@pytest.mark.parametrize("scalar", [2, np.nan, None, "D"])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({"a": scalar}, columns=["b"])
expected = DataFrame(columns=["b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ["a", value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values("a", axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values("a", axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float("nan")])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([("a", value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values((11, 21)).sort_values(("a", value), axis=1)
expected = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols
)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(("a", value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason="Insertion order for Python>=3.6")
def test_constructor_dict_order_insertion(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {"b": datetime_series_short, "a": datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list("ba"))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason="order by value for Python<3.6")
def test_constructor_dict_order_by_values(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)
# GH19018
# initialization ordering: by value if python<3.6
d = {"b": datetime_series_short, "a": datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list("ab"))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_2d_index(self):
# GH 25416
# handling of 2d index in construction
df = pd.DataFrame([[1]], columns=[[1]], index=[1, 2])
expected = pd.DataFrame(
[1, 1],
index=pd.Int64Index([1, 2], dtype="int64"),
columns=pd.MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
df = pd.DataFrame([[1]], columns=[[1]], index=[[1, 2]])
expected = pd.DataFrame(
[1, 1],
index=pd.MultiIndex(levels=[[1, 2]], codes=[[0, 1]]),
columns=pd.MultiIndex(levels=[[1]], codes=[[0]]),
)
tm.assert_frame_equal(df, expected)
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list("abc"))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(
np.arange(12).reshape((4, 3)),
columns=["foo", "bar", "baz"],
index=pd.date_range("2000-01-01", periods=3),
)
arr = np.array([[4, 5, 6]])
msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
arr = np.array([4, 5, 6])
msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)"
with pytest.raises(ValueError, match=msg):
DataFrame(index=[0], columns=range(0, 4), data=arr)
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# wrong size axis labels
msg = "Shape of passed values " r"is \(2, 3\), indices " r"imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B", "C"], index=[1])
msg = "Shape of passed values " r"is \(2, 3\), indices " r"imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=["A", "B"], index=[1, 2])
# gh-26429
msg = "2 columns passed, passed data had 10 columns"
with pytest.raises(ValueError, match=msg):
DataFrame((range(10), range(10, 20)), columns=("ones", "twos"))
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame({"a": False, "b": True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self, float_frame):
# Test for passing dict subclass to constructor
data = {
"col1": tm.TestSubDict((x, 10.0 * x) for x in range(10)),
"col2": tm.TestSubDict((x, 20.0 * x) for x in range(10)),
}
df = DataFrame(data)
refdf = DataFrame({col: dict(val.items()) for col, val in data.items()})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(data.items())
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
float_frame["B"][:10] = np.nan
for k, v in float_frame.items():
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
expected = frame.reindex(index=float_frame.index)
tm.assert_frame_equal(float_frame, expected)
def test_constructor_dict_block(self):
expected = np.array([[4.0, 3.0, 2.0, 1.0]])
df = DataFrame(
{"d": [4.0], "c": [3.0], "b": [2.0], "a": [1.0]},
columns=["d", "c", "b", "a"],
)
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame["B"].dtype == np.float64
assert frame["A"].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame["B"].dtype == np.object_
assert frame["A"].dtype == np.float64
# can't cast to float
test_data = {
"A": dict(zip(range(20), tm.makeStringIndex(20))),
"B": dict(zip(range(15), np.random.randn(15))),
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame["A"].dtype == np.object_
assert frame["B"].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {"Col1": {"Row1": "A String", "Row2": np.nan}}
df = DataFrame(d)
assert isinstance(df["Col1"]["Row2"], float)
dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {"a": (1, 2, 3), "b": (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in data.items()})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_of_ranges(self):
# GH 26356
data = {"a": range(3), "b": range(3, 6)}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_iterators(self):
# GH 26349
data = {"a": iter(range(3)), "b": reversed(range(3))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_of_generators(self):
# GH 26349
data = {"a": (i for i in (range(3))), "b": (i for i in reversed(range(3)))}
result = DataFrame(data)
expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]})
tm.assert_frame_equal(result, expected)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(
result,
expected,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True,
)
d = {
("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2},
("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4},
("b", "c"): {("i", "i"): 7, ("i", "j"): 8, ("j", "i"): 9},
}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])
).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111}
_d.insert(0, ("z", d["z"]))
expected = DataFrame(
[x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)
).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timestamp(dt) for dt in dates_as_str],
)
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i} for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, "D"))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, "D"))
expected = DataFrame(
[
{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6},
],
index=[Timedelta(td, "D") for td in td_as_int],
)
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M")
b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D")
df = pd.DataFrame({"a": a, "b": b})
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
# list of periods
df = pd.DataFrame(
{"a": a.astype(object).tolist(), "b": b.astype(object).tolist()}
)
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df._get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df._get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=["A"], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B", "C"], index=[1])
msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=["A", "B"], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match="Must pass 2-d input"):
DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))
tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))
frame = DataFrame(mat, columns=["A", "B", "C"])
tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(["foo", "bar"], index=[0, 1], columns=["A"])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert 1.0 == frame["A"][1]
assert 2.0 == frame["C"][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"][1]
assert 2 == frame["C"][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype="M8[ns]")
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert 1 == frame["A"].view("i8")[1]
assert 2 == frame["C"].view("i8")[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2])
assert frame["A"][1] is True
assert frame["C"][2] is False
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()
result = pd.DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = pd.DataFrame(
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
# Check case where mask is hard but no data are masked
mat_hard = ma.ones((2, 2), dtype=float).harden_mask()
result = pd.DataFrame(mat_hard, columns=["A", "B"], index=[1, 2])
expected = pd.DataFrame(
{"A": [1.0, 1.0], "B": [1.0, 1.0]},
columns=["A", "B"],
index=[1, 2],
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_constructor_maskedrecarray_dtype(self):
# Ensure constructor honors dtype
data = np.ma.array(
np.ma.zeros(5, dtype=[("date", "<f8"), ("price", "<f8")]), mask=[False] * 5
)
data = data.view(mrecords.mrecarray)
result = pd.DataFrame(data, dtype=int)
expected = pd.DataFrame(np.zeros((5, 2), dtype=int), columns=["date", "price"])
tm.assert_frame_equal(result, expected)
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(
tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
)
arrays = [
("float", np.array([1.5, 2.0])),
("int", np.array([1, 2])),
("str", np.array(["abc", "def"])),
]
for name, arr in arrays[:]:
arrays.append(
("masked1_" + name, np.ma.masked_array(arr, mask=[False, True]))
)
arrays.append(("masked_all", np.ma.masked_all((2,))))
arrays.append(("masked_none", np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, "filled") else v) for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize(
"data, index, columns, dtype, expected",
[
(None, list(range(10)), ["a", "b"], object, np.object_),
(None, None, ["a", "b"], "int64", np.dtype("int64")),
(None, list(range(10)), ["a", "b"], int, np.dtype("float64")),
({}, None, ["foo", "bar"], None, np.object_),
({"b": 1}, list(range(10)), list("abc"), int, np.dtype("float64")),
],
)
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {"int": 1, "bool": True, "float": 3.0, "complex": 4j, "object": "foo"}
df = DataFrame(data, index=np.arange(10))
assert df["int"].dtype == np.int64
assert df["bool"].dtype == np.bool_
assert df["float"].dtype == np.float64
assert df["complex"].dtype == np.complex128
assert df["object"].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({"a": np.random.randn(10), "b": True})
exp = DataFrame({"a": df["a"].values, "b": [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match="must pass an index"):
DataFrame({"a": False, "b": True})
def test_constructor_DataFrame(self, float_frame):
df = DataFrame(float_frame)
tm.assert_frame_equal(df, float_frame)
df_casted = DataFrame(float_frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self, float_frame):
# used to be in test_matrix.py
arr = np.random.randn(10)
dm = DataFrame(arr, columns=["A"], index=np.arange(10))
assert dm.values.ndim == 2
arr = np.random.randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=["A", "B"], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=["A", "B"])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(["foo", "bar"], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match="cast"):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(float_frame._series))
tm.assert_frame_equal(dm, float_frame)
# int cast
dm = DataFrame(
{"A": np.ones(10, dtype=int), "B": np.ones(10, dtype=np.float64)},
index=np.arange(10),
)
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=["A", "B"])
expected = DataFrame({}, columns=["A", "B"])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=["A", "B"])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, "a"], [2, "b"]], columns=["num", "str"])
assert is_integer_dtype(df["num"])
assert df["str"].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(abc.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, "a"], [2, "b"]], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({"A": array.array("i", range(10))})
expected = DataFrame({"A": list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array("i", range(10)), array.array("i", range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_range(self):
# GH26342
result = DataFrame(range(10))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_ranges(self):
result = DataFrame([range(10), range(10)])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
result = DataFrame(iter(range(10)))
expected = DataFrame(list(range(10)))
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_iterators(self):
result = DataFrame([iter(range(10)), iter(range(10))])
expected = DataFrame([list(range(10)), list(range(10))])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, "a"] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: "a"})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
result = DataFrame(data)
expected = DataFrame.from_dict(
dict(zip(range(len(data)), data)), orient="index"
)
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=["b", "a"])
data = OrderedDict()
data["b"] = [2]
data["a"] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data["b"] = 2
data["a"] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one["b"] = 2
row_one["a"] = 1
row_two = OrderedDict()
row_two["a"] = 1
row_two["b"] = 2
row_three = {"b": 2, "a": 1}
expected = DataFrame([[2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=["b", "a"])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(["x", "y"], data))
idx = Index(["a", "b", "c"])
# all named
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx, name="y"),
]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx),
]
result = DataFrame(data2)
sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# none named
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series()])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(["a", "b", "c"])
data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame(
{"b": [0, 1, 2], "a": [0, 1, 2], "c": [0, 1, 2]},
columns=["b", "a", "c"],
index=["0", "1", "2"],
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {"a": 1.5, "b": 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {"A": np.random.randn(10), "B": np.random.randn(8)}
with pytest.raises(ValueError, match="arrays must all be same length"):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(range(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self, float_frame):
df = DataFrame(float_frame["A"], index=float_frame.index, columns=["A"])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data["A"] = {"foo": 1, "bar": 2, "baz": 3}
data["B"] = Series([4, 3, 2, 1], index=["bar", "qux", "baz", "foo"])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match="ambiguous ordering"):
DataFrame({"A": ["a", "b"], "B": {"a": "a", "b": "b"}})
# this is OK though
result = DataFrame({"A": ["a", "b"], "B": Series(["a", "b"], index=["a", "b"])})
expected = DataFrame({"A": ["a", "b"], "B": ["a", "b"]}, index=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_constructor_mixed_type_rows(self):
# Issue 25075
data = [[1, 2], (3, 4)]
result = DataFrame(data)
expected = DataFrame([[1, 2], [3, 4]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"tuples,lists",
[
((), []),
((()), []),
(((), ()), [(), ()]),
(((), ()), [[], []]),
(([], []), [[], []]),
(([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]),
],
)
def test_constructor_tuple(self, tuples, lists):
# GH 25691
result = DataFrame(tuples)
expected = DataFrame(lists)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_tuples(self):
result = DataFrame({"A": [(1, 2), (3, 4)]})
expected = DataFrame({"A": Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list("ab"))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({"y": [1, 2], "z": [3, 4]})
result = DataFrame(tuples, columns=["y", "z"])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_dict_order(self):
# GH10056
data = [
{"First": 1, "Second": 4, "Third": 7, "Fourth": 10},
{"Second": 5, "First": 2, "Fourth": 11, "Third": 8},
{"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13},
]
expected = DataFrame(
{
"First": [1, 2, 3],
"Second": [4, 5, 6],
"Third": [7, 8, 9],
"Fourth": [10, 11, 12],
"YYY": [None, None, 14],
"XXX": [None, None, 13],
}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected, check_like=not PY36)
def test_constructor_orient(self, float_string_frame):
data_dict = float_string_frame.T._series
recons = DataFrame.from_dict(data_dict, orient="index")
expected = float_string_frame.reindex(index=recons.index)
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
rs = DataFrame.from_dict(a, orient="index")
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_constructor_from_ordered_dict(self):
# GH8425
a = OrderedDict(
[
("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
]
)
expected = DataFrame.from_dict(a, orient="columns").T
result = DataFrame.from_dict(a, orient="index")
tm.assert_frame_equal(result, expected)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(
OrderedDict([("A", [1, 2]), ("B", [4, 5])]),
orient="index",
columns=["one", "two"],
)
expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(
dict([("A", [1, 2]), ("B", [4, 5])]),
orient="columns",
columns=["one", "two"],
)
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(
dict([("A", [1, 2]), ("B", [4, 5])]), columns=["one", "two"]
)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=["a", "b", "c"], name="x")
df = DataFrame(a)
assert df.columns[0] == "x"
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name="x")
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame(s, columns=[1, 2])
# #2234
a = Series([], name="x")
df = DataFrame(a)
assert df.columns[0] == "x"
# series with name and w/o
s1 = Series(arr, name="x")
df = DataFrame([s1, arr]).T
expected = DataFrame({"x": s1, "Unnamed 0": arr}, columns=["x", "Unnamed 0"])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]), s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]), s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=["a", "b", "c"], name="x")
# no name
s2 = Series([1, 2, 3], index=["a", "b", "c"])
other_index = Index(["a", "b"])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == "x"
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self, float_frame):
index = list(float_frame.index[:5])
columns = list(float_frame.columns[:3])
result = DataFrame(float_frame._data, index=index, columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self, float_frame, float_string_frame):
items = [(c, float_frame[c]) for c in float_frame.columns]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, float_frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items, columns=["C", "B", "A"])
tm.assert_frame_equal(recons, float_frame.loc[:, ["C", "B", "A"]])
# orient='index'
row_items = [
(idx, float_string_frame.xs(idx)) for idx in float_string_frame.index
]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(
row_items, columns=float_string_frame.columns, orient="index"
)
tm.assert_frame_equal(recons, float_string_frame)
assert recons["A"].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items(row_items, orient="index")
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[("bar", "baz")] * len(float_string_frame)
)
float_string_frame["foo"] = arr
row_items = [
(idx, list(float_string_frame.xs(idx))) for idx in float_string_frame.index
]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(
row_items, columns=float_string_frame.columns, orient="index"
)
tm.assert_frame_equal(recons, float_string_frame)
assert isinstance(recons["foo"][0], tuple)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = DataFrame.from_items(
[("A", [1, 2, 3]), ("B", [4, 5, 6])],
orient="index",
columns=["one", "two", "three"],
)
xp = DataFrame(
[[1, 2, 3], [4, 5, 6]], index=["A", "B"], columns=["one", "two", "three"]
)
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (
r"The value in each \(key, value\) "
"pair must be an array, Series, or dict"
)
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items([("A", 1), ("B", 4)])
msg = (
r"The value in each \(key, value\) "
"pair must be an array, Series, or dict"
)
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items(
[("A", 1), ("B", 2)], columns=["col1"], orient="index"
)
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items([("A", [1, 2, 3]), ("B", [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items(
[("A", [1, 2, 3]), ("B", [4, 5, 6])],
columns=["col1", "col2", "col3"],
orient="index",
)
def test_constructor_mix_series_nonseries(self, float_frame):
df = DataFrame(
{"A": float_frame["A"], "B": list(float_frame["B"])}, columns=["A", "B"]
)
tm.assert_frame_equal(df, float_frame.loc[:, ["A", "B"]])
msg = "does not match index length"
with pytest.raises(ValueError, match=msg):
DataFrame({"A": float_frame["A"], "B": list(float_frame["B"])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=["a", "a"])
edf = DataFrame([[8, 5]])
edf.columns = ["a", "a"]
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)], columns=["a", "a"])
tm.assert_frame_equal(idf, edf)
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5")
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("float64"), df.index, df.columns)
)
df = DataFrame(0, index=[1, 2, 3], columns=["a", "b", "c"])
tm.assert_frame_equal(
df, DataFrame(np.zeros(df.shape).astype("int64"), df.index, df.columns)
)
df = DataFrame("a", index=[1, 2], columns=["a", "c"])
tm.assert_frame_equal(
df,
DataFrame(
np.array([["a", "a"], ["a", "a"]], dtype=object),
index=[1, 2],
columns=["a", "c"],
),
)
msg = "DataFrame constructor not properly called!"
with pytest.raises(ValueError, match=msg):
DataFrame("a", [1, 2])
with pytest.raises(ValueError, match=msg):
DataFrame("a", columns=["a", "c"])
msg = "incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
DataFrame("a", [1, 2], ["a", "c"], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame(
{
"A": 1,
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime(2001, 1, 2, 0, 0),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("int64")]
+ [np.dtype(objectname)] * 2
+ [np.dtype(datetime64name)] * 2,
index=list("ABCDE"),
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array(1.0, dtype=floatname),
intname: np.array(1, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
floatname: np.array([1.0] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname),
},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("object")]
+ [np.dtype("float64")]
+ [np.dtype(intname)],
index=["a", "b", "c", floatname, intname],
)
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == "M8[ns]"
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame(datetimes, columns=["datetimes"])
df["dates"] = dates
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")],
index=["datetimes", "dates"],
)
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone("US/Eastern")
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({"End Date": dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
df = DataFrame([{"End Date": dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(
df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"})
)
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
df = DataFrame({"value": dr})
assert df.iat[0, 0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
df = DataFrame({"value": dr})
assert str(df.iat[0, 0].tz) == "US/Eastern"
# GH 7822
# preserver an index with a tz on dict construction
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df["a"] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range("1/1/2011", periods=5, freq="10s")
df = DataFrame({"a": i, "b": i_no_tz})
expected = DataFrame(
{"a": i.to_series(keep_tz=True).reset_index(drop=True), "b": i_no_tz}
)
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = DataFrame(arr).dtypes
expected = Series([np.dtype("datetime64[ns]")])
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int64")] * 5)
tm.assert_series_equal(result, expected)
df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)])
result = df.dtypes
expected = Series([np.dtype("int64")] * 5)
tm.assert_series_equal(result, expected)
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({"a": [2 ** 31, 2 ** 31 + 1]})
assert df.dtypes.iloc[0] == np.dtype("int64")
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame([1.0, 2.0])
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": [1, 2]})
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": [1.0, 2.0]})
assert df.dtypes.iloc[0] == np.dtype("float64")
df = DataFrame({"a": 1}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("int64")
df = DataFrame({"a": 1.0}, index=range(3))
assert df.dtypes.iloc[0] == np.dtype("float64")
# with object list
df = DataFrame(
{
"a": [1, 2, 4, 7],
"b": [1.2, 2.3, 5.1, 6.3],
"c": list("abcd"),
"d": [datetime(2000, 1, 1) for i in range(4)],
"e": [1.0, 2, 4.0, 7],
}
)
result = df.dtypes
expected = Series(
[
np.dtype("int64"),
np.dtype("float64"),
np.dtype("object"),
np.dtype("datetime64[ns]"),
np.dtype("float64"),
],
index=list("abcde"),
)
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self, float_frame):
cop = DataFrame(float_frame, copy=True)
cop["A"] = 5
assert (cop["A"] == 5).all()
assert not (float_frame["A"] == 5).all()
def test_constructor_ndarray_copy(self, float_frame):
df = DataFrame(float_frame.values)
float_frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(float_frame.values, copy=True)
float_frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self, float_frame):
series = float_frame._series
df = DataFrame({"A": series["A"]})
df["A"][:] = 5
assert not (series["A"] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
msg = (
"cannot do label indexing on"
r" <class 'pandas\.core\.indexes\.range\.RangeIndex'>"
r" with these indexers \[nan\] of <class 'float'>"
)
with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame(
[[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]
)
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({"a": [np.nan, False]})
assert d["a"].dtype == np.object_
assert not d["a"][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
expected = Series(list("abc"), dtype="category", name="A")
tm.assert_series_equal(df["A"], expected)
# to_frame
s = Series(list("abc"), dtype="category")
result = s.to_frame()
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name="foo")
expected = Series(list("abc"), dtype="category", name="foo")
tm.assert_series_equal(result["foo"], expected)
# list-like creation
df = DataFrame(list("abc"), dtype="category")
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list("abc"))])
expected = DataFrame({0: Series(list("abc"), dtype="category")})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list("abc")), Categorical(list("abd"))])
expected = DataFrame(
{
0: Series(list("abc"), dtype="category"),
1: Series(list("abd"), dtype="category"),
},
columns=[0, 1],
)
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list("abc")), list("def")])
expected = DataFrame(
{0: Series(list("abc"), dtype="category"), 1: list("def")}, columns=[0, 1]
)
tm.assert_frame_equal(df, expected)
# invalid (shape)
msg = r"Shape of passed values is \(6, 2\), indices imply \(3, 2\)"
with pytest.raises(ValueError, match=msg):
DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))])
# ndim > 1
msg = "> 1 ndim Categorical are not supported at this time"
with pytest.raises(NotImplementedError, match=msg):
Categorical(np.array([list("abcd")]))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype("category")
res = Series(items, dtype="category")
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range("20000101", periods=3)
expected = Series(
Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"])
)
expected.index = index
expected = DataFrame({"x": expected})
df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index="f1")
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert "index" not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
assert np.isnan(df["c"][0])
def test_from_records_iterator(self):
arr = np.array(
[(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
dtype=[
("x", np.float64),
("u", np.float32),
("y", np.int64),
("z", np.int32),
],
)
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame(
{
"x": np.array([1.0, 3.0], dtype=np.float64),
"u": np.array([1.0, 3.0], dtype=np.float32),
"y": np.array([2, 4], dtype=np.int64),
"z": np.array([2, 4], dtype=np.int32),
}
)
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield (i, letters[i % len(letters)], i / length)
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield [i, letters[i % len(letters)], i / length]
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in list_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
columns = ["a", "b", "c"]
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
df = DataFrame.from_records(tuples, columns=["a"])
assert df["a"].dtype == object
df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
assert df["a"].dtype == np.float64
assert np.isnan(df["a"].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {
"order_id": order_id,
"quantity": np.random.randint(1, 10),
"price": np.random.randint(1, 10),
}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({"order_id": 10, "quantity": 5})
result = DataFrame.from_records(documents, index="order_id")
assert result.index.name == "order_id"
# MultiIndex
result = DataFrame.from_records(documents, index=["order_id", "quantity"])
assert result.index.names == ("order_id", "quantity")
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ["foo"], 2: ["bar"]}
result = DataFrame.from_records(data, columns=["a", "b"])
exp = DataFrame(data, columns=["a", "b"])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {"a": [1, 2, 3], "b": [4, 5, 6]}
result = DataFrame.from_records(data, index=["a", "b", "c"])
exp = DataFrame(data, index=["a", "b", "c"])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
result = df2_obj.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
)
tm.assert_series_equal(result, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=["a", "b", "c"])
expected = DataFrame(columns=["a", "b", "c"])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=["a", "b", "b"])
expected = DataFrame(columns=["a", "b", "b"])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(a, index="id")
tm.assert_index_equal(df.index, Index([1], name="id"))
assert df.index.name == "id"
tm.assert_index_equal(df.columns, Index(["value"]))
b = np.array([], dtype=[("id", np.int64), ("value", np.int64)])
df = DataFrame.from_records(b, index="id")
tm.assert_index_equal(df.index, Index([], name="id"))
assert df.index.name == "id"
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, pd.Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = Series({0: np.float64})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64")
# GH 26342
result = DataFrame(range(5), columns=["A"], dtype=dtype)
tm.assert_frame_equal(result, expected)
# GH 16804
result = DataFrame({"A": range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
# construction
df = DataFrame({"A": idx, "B": dr})
assert df["A"].dtype, "M8[ns, US/Eastern"
assert df["A"].name == "A"
tm.assert_series_equal(df["A"], Series(idx, name="A"))
tm.assert_series_equal(df["B"], Series(dr, name="B"))
def test_from_index(self):
# from index
idx2 = date_range("20130101", periods=3, tz="US/Eastern", name="foo")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2["foo"], Series(idx2, name="foo"))
idx2 = date_range("20130101", periods=3, tz="US/Eastern")
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range("1/1/2012", periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({"a": "foo", "b": s}, index=dr)
DataFrame({"a": "foo", "b": s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
ts = Series(dr)
# it works!
d = DataFrame({"A": "foo", "B": ts}, index=dr)
assert d["B"].isna().all()
def test_frame_timeseries_to_records(self):
index = date_range("1/1/2000", periods=10)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
result["index"].dtype == "M8[ns]"
result = df.to_records(index=False)
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start="20130101T10:00:00", periods=3, freq="T", tz="US/Eastern")
result = DataFrame(dr, columns=["timestamps"])
expected = DataFrame(
{
"timestamps": [
Timestamp("20130101T10:00:00", tz="US/Eastern"),
Timestamp("20130101T10:01:00", tz="US/Eastern"),
Timestamp("20130101T10:02:00", tz="US/Eastern"),
]
}
)
tm.assert_frame_equal(result, expected)
def test_nested_dict_construction(self):
# GH22227
columns = ["Nevada", "Ohio"]
pop = {
"Nevada": {2001: 2.4, 2002: 2.9},
"Ohio": {2000: 1.5, 2001: 1.7, 2002: 3.6},
}
result = pd.DataFrame(pop, index=[2001, 2002, 2003], columns=columns)
expected = pd.DataFrame(
[(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)],
columns=columns,
index=pd.Index([2001, 2002, 2003]),
)
tm.assert_frame_equal(result, expected)
def test_from_tzaware_object_array(self):
# GH#26825 2D object array of tzaware timestamps should not raise
dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
data = dti._data.astype(object).reshape(1, -1)
df = pd.DataFrame(data)
assert df.shape == (1, 3)
assert (df.dtypes == dti.dtype).all()
assert (df == dti).all().all()
def test_from_tzaware_mixed_object_array(self):
# GH#26825
arr = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
res = DataFrame(arr, columns=["A", "B", "C"])
expected_dtypes = [
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
"datetime64[ns, CET]",
]
assert (res.dtypes == expected_dtypes).all()
| 36.413395
| 88
| 0.552155
|
7036966b1cecbdc9588c3a671250a530156ad61c
| 117
|
py
|
Python
|
addons/payment_stripe_sca/controllers/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/payment_stripe_sca/controllers/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/payment_stripe_sca/controllers/__init__.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import main
| 39
| 74
| 0.709402
|
a1101850dd613fc30ec3216ba7c389f21ed8a565
| 1,420
|
py
|
Python
|
src/sentry/api/endpoints/internal_mail.py
|
xiaotian45123/sentry
|
93428feeaa495cc36c5dcab90f3bb2bb967dd311
|
[
"BSD-3-Clause"
] | 1
|
2020-02-27T02:46:25.000Z
|
2020-02-27T02:46:25.000Z
|
src/sentry/api/endpoints/internal_mail.py
|
xiaotian45123/sentry
|
93428feeaa495cc36c5dcab90f3bb2bb967dd311
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T14:54:44.000Z
|
2020-11-19T21:54:19.000Z
|
src/sentry/api/endpoints/internal_mail.py
|
xiaotian45123/sentry
|
93428feeaa495cc36c5dcab90f3bb2bb967dd311
|
[
"BSD-3-Clause"
] | 1
|
2017-02-09T06:36:57.000Z
|
2017-02-09T06:36:57.000Z
|
from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry import options
from sentry.utils.email import send_mail
from sentry.api.base import Endpoint
from sentry.api.permissions import SuperuserPermission
class InternalMailEndpoint(Endpoint):
permission_classes = (SuperuserPermission, )
def get(self, request):
data = {
'mailHost': options.get('mail.host'),
'mailPassword': bool(options.get('mail.password')),
'mailUsername': options.get('mail.username'),
'mailPort': options.get('mail.port'),
'mailUseTls': options.get('mail.use-tls'),
'mailFrom': options.get('mail.from'),
'mailListNamespace': options.get('mail.list-namespace'),
'testMailEmail': request.user.email,
}
return Response(data)
def post(self, request):
error = None
body = """This email was sent as a request to test the Sentry outbound email configuration."""
try:
send_mail(
'%s Test Email' % (options.get('mail.subject-prefix'), ),
body,
options.get('mail.from'), [request.user.email],
fail_silently=False
)
except Exception as e:
error = six.text_type(e)
return Response({'error': error}, status=500 if error else 200)
| 32.272727
| 102
| 0.612676
|
fa1ad641893aed158a086c101410888a31cc2fa2
| 9,904
|
py
|
Python
|
train.py
|
winterest/mot-transform
|
92cd7d55b273d603572a62173c6b5dc75156734f
|
[
"MIT"
] | null | null | null |
train.py
|
winterest/mot-transform
|
92cd7d55b273d603572a62173c6b5dc75156734f
|
[
"MIT"
] | null | null | null |
train.py
|
winterest/mot-transform
|
92cd7d55b273d603572a62173c6b5dc75156734f
|
[
"MIT"
] | null | null | null |
import argparse
import json
import time
from time import gmtime, strftime
import test
import torch
from models import Darknet, load_darknet_weights
from shutil import copyfile
from utils.datasets import JointDataset, collate_fn
from utils.utils import mkdir_if_missing, init_seeds
from utils.log import logger
from torchvision.transforms import transforms as T
import os.path as osp
from collections import defaultdict
from detr_models import build_model
def build_detr(args):
#from config import
#### args:
#### dataset_file, device, num_queries, aux_loss, masks, frozen_weights,
#### bbox_loss_coef, giou_loss_coef, mask_loss_coef, dice_loss_coef,
#### dec_layers, eos_coef,
model, criterion, postprocessors = build_model(args)
return model, criterion, postprocessors
def train(
cfg,
data_cfg,
weights_from="",
weights_to="",
save_every=10,
img_size=(1088, 608),
resume=False,
epochs=100,
batch_size=16,
accumulated_batches=1,
freeze_backbone=False,
opt=None,
):
# The function starts
NUM_WORKERS = opt.num_workers
timme = strftime("%Y-%d-%m %H:%M:%S", gmtime())
timme = timme[5:-3].replace('-', '_')
timme = timme.replace(' ', '_')
timme = timme.replace(':', '_')
weights_to = osp.join(weights_to, 'run' + timme)
mkdir_if_missing(weights_to)
mkdir_if_missing(weights_to + '/cfg/')
if resume:
latest_resume = osp.join(weights_from, 'latest.pt')
torch.backends.cudnn.benchmark = True # unsuitable for multiscale
# Configure run
f = open(data_cfg)
data_config = json.load(f)
trainset_paths = data_config['train']
dataset_root = data_config['root']
f.close()
transforms = T.Compose([T.ToTensor()])
# Get dataloader
dataset = JointDataset(dataset_root, trainset_paths, img_size, augment=True, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=NUM_WORKERS, pin_memory=True, drop_last=True, collate_fn=collate_fn)
# Initialize model
model = Darknet(cfg, dataset.nID)
cutoff = -1 # backbone reaches to cutoff layer
start_epoch = 0
if resume:
checkpoint = torch.load(latest_resume, map_location='cpu')
# Load weights to resume from
model.load_state_dict(checkpoint['model'])
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9)
start_epoch = checkpoint['epoch'] + 1
if checkpoint['optimizer'] is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
del checkpoint # current, saved
else:
# Initialize model with backbone (optional)
if cfg.endswith('yolov3.cfg'):
load_darknet_weights(model, osp.join(weights_from, 'darknet53.conv.74'))
cutoff = 75
elif cfg.endswith('yolov3-tiny.cfg'):
load_darknet_weights(model, osp.join(weights_from, 'yolov3-tiny.conv.15'))
cutoff = 15
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9,
weight_decay=1e-4)
model = torch.nn.DataParallel(model)
# Set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[int(0.5 * opt.epochs), int(0.75 * opt.epochs)],
gamma=0.1)
# An important trick for detection: freeze bn during fine-tuning
if not opt.unfreeze_bn:
for i, (name, p) in enumerate(model.named_parameters()):
p.requires_grad = False if 'batch_norm' in name else True
# model_info(model)
t0 = time.time()
for epoch in range(epochs):
epoch += start_epoch
logger.info(('%8s%12s' + '%10s' * 6) % (
'Epoch', 'Batch', 'box', 'conf', 'id', 'total', 'nTargets', 'time'))
# Freeze darknet53.conv.74 for first epoch
if freeze_backbone and (epoch < 2):
for i, (name, p) in enumerate(model.named_parameters()):
if int(name.split('.')[2]) < cutoff: # if layer < 75
p.requires_grad = False if (epoch == 0) else True
ui = -1
rloss = defaultdict(float) # running loss
## training schedule
optimizer.zero_grad()
for i, (imgs, targets, _, _, targets_len) in enumerate(dataloader):
if sum([len(x) for x in targets]) < 1: # if no targets continue
continue
# SGD burn-in
burnin = min(1000, len(dataloader))
if (epoch == 0) & (i <= burnin):
lr = opt.lr * (i / burnin) ** 4
for g in optimizer.param_groups:
g['lr'] = lr
# Compute loss, compute gradient, update parameters
loss, components = model(imgs.cuda(), targets.cuda(), targets_len.cuda())
components = torch.mean(components.view(-1, 5), dim=0)
loss = torch.mean(loss)
loss.backward()
# accumulate gradient for x batches before optimizing
if ((i + 1) % accumulated_batches == 0) or (i == len(dataloader) - 1):
optimizer.step()
optimizer.zero_grad()
# Running epoch-means of tracked metrics
ui += 1
for ii, key in enumerate(model.module.loss_names):
rloss[key] = (rloss[key] * ui + components[ii]) / (ui + 1)
# rloss indicates running loss values with mean updated at every epoch
s = ('%8s%12s' + '%10.3g' * 6) % (
'%g/%g' % (epoch, epochs - 1),
'%g/%g' % (i, len(dataloader) - 1),
rloss['box'], rloss['conf'],
rloss['id'], rloss['loss'],
rloss['nT'], time.time() - t0)
t0 = time.time()
if i % opt.print_interval == 0:
logger.info(s)
# Save latest checkpoint
checkpoint = {'epoch': epoch,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
copyfile(cfg, weights_to + '/cfg/yolo3.cfg')
copyfile(data_cfg, weights_to + '/cfg/ccmcpe.json')
latest = osp.join(weights_to, 'latest.pt')
torch.save(checkpoint, latest)
if epoch % save_every == 0 and epoch != 0:
# making the checkpoint lite
checkpoint["optimizer"] = []
torch.save(checkpoint, osp.join(weights_to, "weights_epoch_" + str(epoch) + ".pt"))
# Calculate mAP
'''
if epoch % opt.test_interval == 0:
with torch.no_grad():
mAP, R, P = test.test(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size,
print_interval=40, nID=dataset.nID)
test.test_emb(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size,
print_interval=40, nID=dataset.nID)
'''
# Call scheduler.step() after opimizer.step() with pytorch > 1.1.0
scheduler.step()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=30, help='number of epochs')
parser.add_argument('--num-workers', type=int, default=0, help='number of workers')
parser.add_argument('--batch-size', type=int, default=1, help='size of each image batch')
parser.add_argument('--accumulated-batches', type=int, default=1, help='number of batches before optimizer step')
parser.add_argument('--cfg', type=str, default='cfg/yolov3_576x320.cfg', help='cfg file path')
parser.add_argument('--weights-from', type=str, default='weights/',
help='Path for getting the trained model for resuming training (Should only be used with '
'--resume)')
parser.add_argument('--weights-to', type=str, default='weights/',
help='Store the trained weights after resuming training session. It will create a new folder '
'with timestamp in the given path')
parser.add_argument('--save-model-after', type=int, default=10,
help='Save a checkpoint of model at given interval of epochs')
parser.add_argument('--data-cfg', type=str, default='cfg/ccmcpe.json', help='coco.data file path')
parser.add_argument('--img-size', type=int, default=[576, 320], nargs='+', help='pixels')
parser.add_argument('--resume', action='store_true', help='resume training flag')
parser.add_argument('--print-interval', type=int, default=40, help='print interval')
parser.add_argument('--test-interval', type=int, default=9, help='test interval')
parser.add_argument('--lr', type=float, default=1e-3, help='init lr')
parser.add_argument('--unfreeze-bn', action='store_true', help='unfreeze bn')
opt = parser.parse_args()
init_seeds()
train(
opt.cfg,
opt.data_cfg,
weights_from=opt.weights_from,
weights_to=opt.weights_to,
save_every=opt.save_model_after,
img_size=opt.img_size,
resume=opt.resume,
epochs=opt.epochs,
batch_size=opt.batch_size,
accumulated_batches=opt.accumulated_batches,
opt=opt,
)
| 40.757202
| 126
| 0.5833
|
ac1a8ac3619065a311acab1a40daf00638474ee1
| 1,838
|
py
|
Python
|
leetcode/100.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
leetcode/100.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
leetcode/100.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
"""
File: 100.py
Title: Same Tree
Difficulty: Easy
URL: https://leetcode.com/problems/same-tree/
"""
import unittest
from typing import List
class TreeNode:
def __init__(self,
val: int = 0,
left: "TreeNode" = None,
right: "TreeNode" = None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
return self.get_nodes(p) == self.get_nodes(q)
def get_nodes(self, root: TreeNode) -> List[int]:
if root is None:
return []
nodes = [root.val]
if root.left is not None:
nodes += self.get_nodes(root.left)
else:
nodes.append(None)
if root.right is not None:
nodes += self.get_nodes(root.right)
else:
nodes.append(None)
return nodes
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
p = TreeNode(1, TreeNode(2), TreeNode(3))
q = TreeNode(1, TreeNode(2), TreeNode(3))
# Output
output = True
solution = Solution()
self.assertEqual(solution.isSameTree(p, q), output)
def test_example2(self):
# Input
p = TreeNode(1, TreeNode(2), None)
q = TreeNode(1, None, TreeNode(2))
# Output
output = False
solution = Solution()
self.assertEqual(solution.isSameTree(p, q), output)
def test_example3(self):
# Input
p = TreeNode(1, TreeNode(2), TreeNode(1))
q = TreeNode(1, TreeNode(1), TreeNode(2))
# Output
output = False
solution = Solution()
self.assertEqual(solution.isSameTree(p, q), output)
if __name__ == "__main__":
unittest.main()
| 23.87013
| 59
| 0.556039
|
73511231acaa6a4ba905680b1a0bc2ec7cbc3701
| 1,240
|
py
|
Python
|
userbot/plugins/weebify.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
userbot/plugins/weebify.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
userbot/plugins/weebify.py
|
justteen/BUZZ-USERBOT
|
55651cce150e1d04d2c61efb2565ef9f46b42933
|
[
"BSL-1.0"
] | null | null | null |
""" Weebify a text,
Ported from Saitama Bot.
By :- @PhycoNinja13b
Modified by :- @kirito6969
.weeb <text> """
from uniborg.util import lightning_cmd
normiefont = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
]
weebyfont = [
"卂",
"乃",
"匚",
"刀",
"乇",
"下",
"厶",
"卄",
"工",
"丁",
"长",
"乚",
"从",
"𠘨",
"口",
"尸",
"㔿",
"尺",
"丂",
"丅",
"凵",
"リ",
"山",
"乂",
"丫",
"乙",
]
@borg.on(lightning_cmd(pattern="weeb ?(.*)"))
async def weebify(event):
args = event.pattern_match.group(1)
if not args:
get = await event.get_reply_message()
args = get.text
if not args:
await event.edit("`What I am Supposed to Weebify U Dumb`")
return
string = " ".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await event.edit(string)
| 14.939759
| 73
| 0.452419
|
a862ec5679723325232f56b567b5f351b126f391
| 3,628
|
py
|
Python
|
mesonbuild/scripts/meson_exe.py
|
ueno/meson
|
5a0fec13b6463f45f88860d67e8fb50f34c8d739
|
[
"Apache-2.0"
] | 1
|
2019-05-02T17:44:19.000Z
|
2019-05-02T17:44:19.000Z
|
mesonbuild/scripts/meson_exe.py
|
ueno/meson
|
5a0fec13b6463f45f88860d67e8fb50f34c8d739
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/scripts/meson_exe.py
|
ueno/meson
|
5a0fec13b6463f45f88860d67e8fb50f34c8d739
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import pickle
import platform
import subprocess
from .. import mesonlib
options = None
def buildparser():
parser = argparse.ArgumentParser()
parser.add_argument('args', nargs='+')
return parser
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin():
platname = platform.system().lower()
return 'cygwin' in platname
def run_with_mono(fname):
if fname.endswith('.exe') and not (is_windows() or is_cygwin()):
return True
return False
def run_exe(exe):
if exe.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + exe.fname
elif not exe.is_cross and run_with_mono(exe.fname[0]):
cmd = ['mono'] + exe.fname
else:
if exe.is_cross:
if exe.exe_runner is None:
raise AssertionError('BUG: Can\'t run cross-compiled exe {!r}'
'with no wrapper'.format(exe.name))
elif not exe.exe_runner.found():
raise AssertionError('BUG: Can\'t run cross-compiled exe {!r} with not-found'
'wrapper {!r}'.format(exe.name, exe.exe_runner.get_path()))
else:
cmd = exe.exe_runner.get_command() + exe.fname
else:
cmd = exe.fname
child_env = os.environ.copy()
child_env.update(exe.env)
if exe.extra_paths:
child_env['PATH'] = (os.pathsep.join(exe.extra_paths + ['']) +
child_env['PATH'])
if exe.exe_runner and mesonlib.substring_is_in_list('wine', exe.exe_runner.get_command()):
wine_paths = ['Z:' + p for p in exe.extra_paths]
wine_path = ';'.join(wine_paths)
# Don't accidentally end with an `;` because that will add the
# current directory and might cause unexpected behaviour
if 'WINEPATH' in child_env:
child_env['WINEPATH'] = wine_path + ';' + child_env['WINEPATH']
else:
child_env['WINEPATH'] = wine_path
p = subprocess.Popen(cmd + exe.cmd_args, env=child_env, cwd=exe.workdir,
close_fds=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if exe.capture and p.returncode == 0:
with open(exe.capture, 'wb') as output:
output.write(stdout)
else:
sys.stdout.buffer.write(stdout)
if stderr:
sys.stderr.buffer.write(stderr)
return p.returncode
def run(args):
global options
options = buildparser().parse_args(args)
if len(options.args) != 1:
print('Test runner for Meson. Do not run on your own, mmm\'kay?')
print(sys.argv[0] + ' [data file]')
exe_data_file = options.args[0]
with open(exe_data_file, 'rb') as f:
exe = pickle.load(f)
return run_exe(exe)
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| 35.223301
| 98
| 0.616318
|
2ebee1fad349339bc3b68c3e66c6e95cfc49b165
| 1,247
|
py
|
Python
|
fn_sentinelone/fn_sentinelone/components/funct_sentinelone_abort_disk_scan.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | null | null | null |
fn_sentinelone/fn_sentinelone/components/funct_sentinelone_abort_disk_scan.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | null | null | null |
fn_sentinelone/fn_sentinelone/components/funct_sentinelone_abort_disk_scan.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2022. All Rights Reserved.
"""AppFunction implementation"""
from resilient_circuits import AppFunctionComponent, app_function, FunctionResult
from fn_sentinelone.lib.sentinelone_common import SentinelOneClient
PACKAGE_NAME = "fn_sentinelone"
FN_NAME = "sentinelone_abort_disk_scan"
class FunctionComponent(AppFunctionComponent):
"""Component that implements function 'sentinelone_abort_disk_scan'"""
def __init__(self, opts):
super(FunctionComponent, self).__init__(opts, PACKAGE_NAME)
@app_function(FN_NAME)
def _app_function(self, fn_inputs):
"""
Function: Initiate a Full Disk Scan on an agent managed by SentinelOne.
Inputs:
- fn_inputs.sentinelone_agent_id
"""
yield self.status_message("Starting App Function: '{0}'".format(FN_NAME))
sentinelone_client = SentinelOneClient(self.opts, self.options)
agent_id = fn_inputs.sentinelone_agent_id
results = sentinelone_client.abort_scan(agent_id)
yield self.status_message("Finished running App Function: '{0}'".format(FN_NAME))
yield FunctionResult(results)
| 33.702703
| 89
| 0.72494
|
9d686239047a7c9f88df45568ba70a7aec7f9be7
| 9,725
|
py
|
Python
|
octoprint_octolabel/__init__.py
|
LowieGoossens/octolabel
|
5de688da7f32e73fa9daeffb0e1f12257a61d779
|
[
"MIT"
] | 7
|
2021-03-22T09:27:50.000Z
|
2021-10-03T16:08:02.000Z
|
octoprint_octolabel/__init__.py
|
LowieGoossens/octolabel
|
5de688da7f32e73fa9daeffb0e1f12257a61d779
|
[
"MIT"
] | 4
|
2021-03-21T18:09:34.000Z
|
2021-10-06T10:16:20.000Z
|
octoprint_octolabel/__init__.py
|
LowieGoossens/octolabel
|
5de688da7f32e73fa9daeffb0e1f12257a61d779
|
[
"MIT"
] | 2
|
2021-03-21T04:19:54.000Z
|
2021-03-21T19:56:24.000Z
|
# coding=utf-8
from __future__ import absolute_import
import os
import subprocess
from datetime import timedelta
import octoprint.filemanager
# from .discord import Hook
import octoprint.plugin
import octoprint.settings
import requests
class OctolabelPlugin(octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.ProgressPlugin):
def __init__(self):
# Events definition here (better for intellisense in IDE)
# referenced in the settings too.
super().__init__()
self.events = {
"startup": {
"name": "Octoprint Startup",
"enabled": False,
"message": "{name}"
},
"shutdown": {
"name": "Octoprint Shutdown",
"enabled": False,
"message": "{name}"
},
"printer_state_operational": {
"name": "Printer state : operational",
"enabled": False,
"message": "{name}"
},
"printer_state_error": {
"name": "Printer state : error",
"enabled": False,
"message": "{name}"
},
"printer_state_unknown": {
"name": "Printer state : unknown",
"enabled": False,
"message": "{name}"
},
"printing_started": {
"name": "Printing process : started",
"enabled": False,
"message": "{name}"
},
"printing_paused": {
"name": "Printing process : paused",
"enabled": False,
"message": "{name}"
},
"printing_resumed": {
"name": "Printing process : resumed",
"enabled": False,
"message": "{name}"
},
"printing_cancelled": {
"name": "Printing process : cancelled",
"enabled": False,
"message": "{name}"
},
"printing_done": {
"name": "Printing process : done",
"enabled": True,
"message": "{name}"
},
"printing_failed": {
"name": "Printing process : failed",
"enabled": False,
"message": "{name}"
},
"printing_progress": {
"name": "Printing progress",
"enabled": False,
"message": "{name}",
"step": 10
},
"test": { # Not a real message, but we will treat it as one
"enabled": False,
"message": "{name}"
},
}
def on_after_startup(self):
self._logger.info("Octolabel is started !")
# ~~ SettingsPlugin mixin
def get_settings_defaults(self):
return {
'consumer_key': "",
'consumer_secret': "",
'access_token': "",
'access_token_secret': "",
'username': "",
'events': self.events,
'allow_scripts': False,
'script_before': '',
'script_after': ''
}
# Restricts some paths to some roles only
def get_settings_restricted_paths(self):
# settings.events.tests is a false message, so we should never see it as configurable.
# settings.url, username and avatar are admin only.
return dict(never=[["events", "test"]],
admin=[["consumer_key"], ["consumer_secret"], ["access_token"], ["access_token_secret"],
['script_before'], ['script_after']])
# ~~ AssetPlugin mixin
def get_assets(self):
# Define your plugin's asset files to automatically include in the
# core UI here.
return dict(
js=["js/octolabel.js"],
css=["css/octolabel.css"]
)
# ~~ TemplatePlugin mixin
def get_template_configs(self):
return [
dict(type="settings", custom_bindings=False)
]
# ~~ Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
octolabel=dict(
displayName="Octolabel plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="LowieGoossens",
repo="octolabel",
current=self._plugin_version,
# update method: pip
pip="https://github.com/LowieGoossens/octolabel/archive/{target_version}.zip"
)
)
# ~~ EventHandlerPlugin hook
def on_event(self, event, payload):
if event == "Startup":
return self.notify_event("startup")
if event == "Shutdown":
return self.notify_event("shutdown")
if event == "PrinterStateChanged":
if payload["state_id"] == "OPERATIONAL":
return self.notify_event("printer_state_operational")
elif payload["state_id"] == "ERROR":
return self.notify_event("printer_state_error")
elif payload["state_id"] == "UNKNOWN":
return self.notify_event("printer_state_unknown")
if event == "PrintStarted":
return self.notify_event("printing_started", payload)
if event == "PrintPaused":
return self.notify_event("printing_paused", payload)
if event == "PrintResumed":
return self.notify_event("printing_resumed", payload)
if event == "PrintCancelled":
return self.notify_event("printing_cancelled", payload)
if event == "PrintDone":
payload['time_formatted'] = str(
timedelta(seconds=int(payload["time"])))
return self.notify_event("printing_done", payload)
return True
def on_print_progress(self, location, path, progress):
self.notify_event("printing_progress", {"progress": progress})
def on_settings_save(self, data):
old_bot_settings = '{}{}{}'.format(
self._settings.get(['url'], merged=True),
self._settings.get(['avatar'], merged=True),
self._settings.get(['username'], merged=True)
)
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
new_bot_settings = '{}{}{}'.format(
self._settings.get(['url'], merged=True),
self._settings.get(['avatar'], merged=True),
self._settings.get(['username'], merged=True)
)
if (old_bot_settings != new_bot_settings):
self._logger.info("Settings have changed. Send a test message...")
self.notify_event("test")
def notify_event(self, eventID, data=None):
if data is None:
data = {}
if (eventID not in self.events):
self._logger.error(
"Tried to notifiy on inexistant eventID : ", eventID)
return False
tmpConfig = self._settings.get(["events", eventID], merged=True)
if tmpConfig["enabled"] != True:
self._logger.debug(
"Event {} is not enabled. Returning gracefully".format(eventID))
return False
# Special case for progress eventID : we check for progress and steps
if eventID == 'printing_progress' and (
int(tmpConfig["step"]) == 0
or int(data["progress"]) == 0
or int(data["progress"]) % int(tmpConfig["step"]) != 0
or (int(data["progress"]) == 100)
):
return False
tmpDataFromPrinter = self._printer.get_current_data()
if tmpDataFromPrinter["progress"] is not None and tmpDataFromPrinter["progress"]["printTimeLeft"] is not None:
data["remaining"] = int(
tmpDataFromPrinter["progress"]["printTimeLeft"])
data["remaining_formatted"] = str(
timedelta(seconds=data["remaining"]))
if tmpDataFromPrinter["progress"] is not None and tmpDataFromPrinter["progress"]["printTime"] is not None:
data["spent"] = int(tmpDataFromPrinter["progress"]["printTime"])
data["spent_formatted"] = str(timedelta(seconds=data["spent"]))
self._logger.debug("Available variables for event " +
eventID + ": " + ", ".join(list(data)))
message = ''
try:
message = tmpConfig["message"].format(**data)
except KeyError as error:
message = tmpConfig["message"] + \
"""\r\n:sos: **Octotweet Warning**""" + \
"""\r\n The variable `{""" + error.args[0] + """}` is invalid for this message: """ + \
"""\r\n Available variables: `{""" + \
'}`, `{'.join(list(data)) + "}`"
finally:
return self.send_message(eventID, message)
def exec_script(self, eventName, which=""):
# I want to be sure that the scripts are allowed by the special configuration flag
scripts_allowed = self._settings.get(["allow_scripts"], merged=True)
if scripts_allowed is None or scripts_allowed == False:
return ""
# Finding which one should be used.
script_to_exec = None
if which == "before":
script_to_exec = self._settings.get(["script_before"], merged=True)
elif which == "after":
script_to_exec = self._settings.get(["script_after"], merged=True)
# Finally exec the script
out = ""
self._logger.debug("{}:{} File to start: '{}'".format(
eventName, which, script_to_exec))
try:
if script_to_exec is not None and len(script_to_exec) > 0 and os.path.exists(script_to_exec):
out = subprocess.check_output(script_to_exec)
except (OSError, subprocess.CalledProcessError) as err:
out = err
finally:
self._logger.debug(
"{}:{} > Output: '{}'".format(eventName, which, out))
return out
def send_message(self, eventID, message):
# return false if no URL is provided
# if "http" not in self._settings.get(["url"],merged=True):
# return False
# exec "before" script if any
self.exec_script(eventID, "before")
post_result ="ok"
url = 'http://' + self._settings.get(["printerip"], merged=True) + ':8765/api/print/namebadge'
myobj = {'first': self._settings.get(["username"], merged=True), 'last': message, 'company': ''}
# sending post request
requests.get(url, data=myobj)
return post_result
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "octolabel"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = OctolabelPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| 29.64939
| 117
| 0.673111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.