hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f03d9eaee97208cafc1e4372dee477f66cf18ca4
| 1,403
|
py
|
Python
|
tests/explorers_tests/test_boltzmann.py
|
ummavi/pfrl-1
|
e856a7cca30fcc3871024cdf7522d066006a5f0c
|
[
"MIT"
] | 824
|
2020-07-29T00:30:14.000Z
|
2022-03-29T13:12:34.000Z
|
tests/explorers_tests/test_boltzmann.py
|
ummavi/pfrl-1
|
e856a7cca30fcc3871024cdf7522d066006a5f0c
|
[
"MIT"
] | 124
|
2020-07-30T01:53:47.000Z
|
2022-03-15T07:05:38.000Z
|
tests/explorers_tests/test_boltzmann.py
|
ummavi/pfrl-1
|
e856a7cca30fcc3871024cdf7522d066006a5f0c
|
[
"MIT"
] | 122
|
2020-07-29T04:33:35.000Z
|
2022-03-13T13:29:41.000Z
|
import unittest
import numpy as np
import torch
import pfrl
def count_actions_selected_by_boltzmann(T, q_values):
def greedy_action_func():
raise RuntimeError("Must not be called")
explorer = pfrl.explorers.Boltzmann(T=T)
action_value = pfrl.action_value.DiscreteActionValue(q_values)
action_count = [0] * 3
for t in range(10000):
a = explorer.select_action(t, greedy_action_func, action_value)
action_count[a] += 1
return action_count
class TestBoltzmann(unittest.TestCase):
def test_boltzmann(self):
# T=1
q_values = torch.from_numpy(np.asarray([[-1, 1, 0]], dtype=np.float32))
action_count = count_actions_selected_by_boltzmann(1, q_values)
print("T=1", action_count)
# Actions with larger values must be selected more often
self.assertGreater(action_count[1], action_count[2])
self.assertGreater(action_count[2], action_count[0])
# T=0.5
action_count_t05 = count_actions_selected_by_boltzmann(0.5, q_values)
print("T=0.5", action_count_t05)
# Actions with larger values must be selected more often
self.assertGreater(action_count_t05[1], action_count_t05[2])
self.assertGreater(action_count_t05[2], action_count_t05[0])
# T=0.5 must be more greedy than T=1
self.assertGreater(action_count_t05[1], action_count[1])
| 31.177778
| 79
| 0.693514
|
bf5c650c655a13b2ab25577fe5e0fc310e0dcb90
| 15,963
|
py
|
Python
|
projects/views.py
|
rasmunk/projects
|
6f067a6a9d5c8a785281d61a9d9c0b4ace3e7fef
|
[
"MIT"
] | null | null | null |
projects/views.py
|
rasmunk/projects
|
6f067a6a9d5c8a785281d61a9d9c0b4ace3e7fef
|
[
"MIT"
] | null | null | null |
projects/views.py
|
rasmunk/projects
|
6f067a6a9d5c8a785281d61a9d9c0b4ace3e7fef
|
[
"MIT"
] | 1
|
2020-04-11T13:04:12.000Z
|
2020-04-11T13:04:12.000Z
|
import os
import datetime
from flask import render_template, request, flash, redirect, url_for, jsonify
from flask_login import login_user, logout_user, login_required, current_user
from flask_mail import Message
from werkzeug.datastructures import CombinedMultiDict
from werkzeug.utils import secure_filename
from bcrypt import hashpw, gensalt
from projects import mail, projects_blueprint, app, form_manager
from projects.conf import config
from projects.models import Project, User
from projects.forms import (
AuthRequestForm,
PasswordResetRequestForm,
LoginForm,
PasswordResetForm,
FileRequired,
)
from projects.helpers import (
unique_name_encoding,
unique_name_decode,
generate_confirmation_token,
confirm_token,
)
from projects_base.base.forms import TagsSearchForm
# Routes
@projects_blueprint.route("/")
@projects_blueprint.route("/index", methods=["GET"])
def projects():
form = TagsSearchForm()
entities = Project.get_all()
tags = Project.get_top_with("tags", num=10)
return render_template(
"projects/projects.html",
title=config.get("PROJECTS", "title"),
grid_header="{} {}".format(config.get("PROJECTS", "title"), "Projects"),
tags=list(tags.keys()),
objects=entities,
form=form,
)
@projects_blueprint.route("/my_projects", methods=["GET"])
@login_required
def my_projects():
form = TagsSearchForm()
entities = [
project for project in Project.get_all() if project._id in current_user.projects
]
return render_template("projects/projects.html", objects=entities, form=form)
@projects_blueprint.route("/show/<object_id>", methods=["GET"])
def show(object_id):
form_class = form_manager.get_form_class("config_form")
form = form_class()
entity = Project.get(object_id)
if entity is None:
flash("That project doesn't exist", "danger")
return redirect(url_for(".projects"))
owner = False
if current_user.is_authenticated and object_id in current_user.projects:
owner = True
for attr in entity.__dict__:
if attr != "_id" and attr != "_type":
form[attr].data = entity.__dict__[attr]
# Workaround for image upload required, set the label
# to the currently used image and disable it as a required field
if attr == "image":
form[attr].label.text = "Stored image is: " + unique_name_decode(
entity.__dict__[attr]
)
form[attr].flags = None
else:
# Translate area keys to values
if hasattr(entity, "area"):
entity.area = [
area[1] for area in form.area.choices if area[0] in entity.area
]
return render_template(
"projects/project.html", object=entity, owner=owner, form=form
)
@projects_blueprint.route("/create_project", methods=["GET", "POST"])
@login_required
def create():
form_class = form_manager.get_form_class("config_form")
form = form_class(CombinedMultiDict((request.files, request.form)))
if form.validate_on_submit():
f = form.image.data
# Make sure the saved image is filename is unique
filename = secure_filename(unique_name_encoding(f.filename))
f.save(os.path.join(config.get("PROJECTS", "upload_folder"), filename))
# Remove special fields
if form.__contains__("csrf_token"):
form._fields.pop("csrf_token")
form._fields.pop("image")
# Save new instance
new_instance = {
key: field.data
for key, field in form.__dict__.items()
if hasattr(field, "data")
}
new_instance["image"] = filename
entity = Project(**new_instance)
entity_id = entity.save()
# Update user with new instance
current_user.projects.append(entity_id)
current_user.save()
url = url_for("projects.show", object_id=entity_id, _external=True)
flash(
"Your submission has been received,"
" your metadata can be found at: " + url,
"success",
)
return redirect(url)
return render_template("projects/create_project.html", form=form)
@projects_blueprint.route("/update/<object_id>", methods=["POST"])
@login_required
def update(object_id):
entity = Project.get(object_id)
if entity is None:
flash("That entity dosen't exist", "danger")
return redirect(url_for("projects.projects"))
if object_id not in current_user.projects:
flash("Your trying to update an entity that's not yours", "danger")
return redirect(url_for("projects.projects"))
form_class = form_manager.get_form_class("config_form")
form = form_class(CombinedMultiDict((request.files, request.form)))
# Strip image upload validation on upload (Optional)
form.image.validators = [
validator
for validator in form.image.validators
if type(validator) is not FileRequired
]
if form.validate_on_submit():
# Only save the image if a new was submitted, else keep the old name
f = form.image.data
if f and hasattr(f, "filename") and f.filename != "":
filename = secure_filename(unique_name_encoding(f.filename))
f.save(os.path.join(config.get("PROJECTS", "upload_folder"), filename))
# Remove old
os.remove(
os.path.join(config.get("PROJECTS", "upload_folder"), entity.image)
)
else:
filename = entity.image
# Update every attribute except _id, _type, and image
disabled_updates = ["_id", "_type", "image"]
for attr in entity.__dict__:
if attr not in disabled_updates:
entity.__dict__[attr] = form[attr].data
entity.__dict__["image"] = filename
entity_id = entity.save()
url = url_for("projects.show", object_id=entity_id, _external=True)
flash("Update Success, your data can be found at: " + url, "success")
return redirect(url)
form.image.flags = None
return render_template("projects/project.html", object=entity, form=form)
@projects_blueprint.route("/delete/<object_id>", methods=["POST"])
@login_required
def delete(object_id):
entity = Project.get(object_id)
if entity is None:
flash("That entity dosen't exist", "danger")
return redirect(url_for("projects.projects"))
if object_id in current_user.projects:
Project.remove(object_id)
current_user.projects.remove(object_id)
os.remove(os.path.join(config.get("PROJECTS", "upload_folder"), entity.image))
current_user.save()
flash("Entity: " + entity.name + " has been deleted", "success")
else:
flash("Your trying to delete an entity you don't own", "danger")
return redirect(url_for("projects.projects"))
# Sends approval emails to every app.config['ADMINS_EMAIL']
@projects_blueprint.route("/request_auth", methods=["POST"])
def request_auth():
form = AuthRequestForm(request.form)
if form.validate_on_submit():
# Send confirmation token
user = User.get_with_first("email", form.email.data)
if user is None:
data = form.data
# Remove csrf_token
del data["csrf_token"]
subject = "{} requests {} access".format(
form.email.data, config.get("PROJECTS", "title")
)
token = generate_confirmation_token(data=form.data)
confirm_url = url_for("projects.approve_auth", token=token, _external=True)
html = render_template(
"projects/email/activate_user.html",
email=form.data,
confirm_url=confirm_url,
)
msg = Message(
subject=subject,
html=html,
recipients=[app.config["ADMINS_EMAIL"]],
sender=app.config["MAIL_USERNAME"],
)
try:
mail.send(msg)
except TimeoutError:
return jsonify(
data={
"danger": "Timed out before request could be sent"
" to an admin for approval"
}
)
return jsonify(
data={
"success": "Request successfully submitted"
", awaiting admin approval"
}
)
else:
response = jsonify(
data={"danger": "That email has already been granted access"}
)
response.status_code = 400
return response
response = jsonify(
data={
"danger": ", ".join(
[
"{} - {}".format(attr, r_msg)
for attr, errors in form.errors.items()
for r_msg in errors
]
)
}
)
response.status_code = 400
return response
@projects_blueprint.route("/request_password_reset", methods=["POST"])
def request_password_reset():
form = PasswordResetRequestForm(request.form)
if form.validate_on_submit():
user = User.get_with_first("email", form.email.data)
if user is None:
response = jsonify(data={"danger": "That user does not exist"})
response.status_code = 400
return response
else:
email = user.email
token = generate_confirmation_token(data=email)
reset_url = url_for("projects.reset_password", token=token, _external=True)
html = render_template(
"projects/email/reset_password.html",
email=email,
reset_password_url=reset_url,
)
msg = Message(
subject="{} Reset Password".format(config.get("PROJECTS", "title")),
html=html,
recipients=[email],
sender=app.config["MAIL_USERNAME"],
)
mail.send(msg)
return jsonify(
data={
"success": "A password reset link has been sent to {}".format(email)
}
)
response = jsonify(
data={
"danger": ", ".join(
[
"{} - {}".format(attr, r_msg)
for attr, errors in form.errors.items()
for r_msg in errors
]
)
}
)
response.status_code = 400
return response
# Accepts approval from admin's
@projects_blueprint.route("/approve_auth/<token>")
def approve_auth(token):
data = confirm_token(token)
if data is False:
flash("Confirmation failed, either it is invalid or expired.", "danger")
return redirect(url_for("projects.projects"))
if "email" not in data:
flash("Confirmation failed, required email is not present", "danger")
return redirect(url_for("projects.projects"))
user = User.get_with_first("email", data["email"])
if user is not None:
flash("That email has already been registered")
return redirect(url_for("projects.projects"))
else:
# Setup
user = User(
email=data["email"],
password=hashpw(os.urandom(24), gensalt()),
projects=[],
is_active=False,
is_authenticated=False,
is_anonymous=False,
confirmed_on=datetime.datetime.now(),
)
user.save()
token = generate_confirmation_token(data=data["email"])
reset_url = url_for("projects.reset_password", token=token, _external=True)
html = render_template(
"projects/email/reset_password.html",
email=data["email"],
reset_password_url=reset_url,
)
msg = Message(
subject="{} Projects Account approval".format(
config.get("PROJECTS", "title")
),
html=html,
recipients=[data["email"]],
sender=app.config["MAIL_USERNAME"],
)
mail.send(msg)
flash(
"The account {} has been approved and created".format(data["email"]),
"success",
)
return redirect(url_for("projects.projects"))
@projects_blueprint.route("/reset_password/<token>", methods=["POST", "GET"])
def reset_password(token):
email = confirm_token(token)
if email is False:
flash(
"Attempted password reset failed,"
" the request is either invalid or expired",
"danger",
)
return redirect(url_for("projects.login"))
form = PasswordResetForm(request.form)
if form.validate_on_submit():
user = User.get_with_first("email", email)
user.is_active = True
user.is_authenticated = True
user.is_anonymous = False
user.email = email
user.password = hashpw(bytes(form.password.data, "utf-8"), gensalt())
user.save()
flash("Your password has now been updated", "success")
return redirect(url_for("projects.projects"))
return render_template("projects/reset_password_form.html", form=form, email=email)
# @app.route('/login', methods=['GET', 'POST'])
@projects_blueprint.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
valid_user = User.valid_user(form.email.data, form.password.data)
if valid_user is not None:
flash("Logged in successfully.", "success")
login_user(valid_user)
return redirect(url_for("projects.projects"))
else:
flash("Invalid Credentials", "danger")
return render_template("projects/login.html", form=form)
@projects_blueprint.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("projects.projects"))
@projects_blueprint.route("/tag/<tag>", methods=["GET"])
def tag_search(tag):
form = TagsSearchForm(data={"tag": tag}, meta={"csrf": False})
entities = {}
tags = Project.get_top_with("tags")
if form.validate():
entities = Project.get_with_search("tags", form.tag.data)
return render_template(
"projects/projects.html", tags=list(tags.keys()), objects=entities, form=form
)
# TODO -> refactor with fair search forms in common views instead.
@projects_blueprint.route("/search", methods=["GET"])
def tag_external_search():
form = TagsSearchForm(request.args, meta={"csrf": False})
entities = {}
# The return form should contain a csrf_token
return_form = TagsSearchForm()
return_form.tag = form.tag
if form.validate():
entities = Project.get_with_search("tags", form.tag.data)
return render_template(
"projects/projects.html", objects=entities, form=return_form
)
# pass on errors
return_form._errors = form.errors
return_form._fields["tag"] = form._fields["tag"]
return render_template("projects/projects.html", objects=entities, form=return_form)
# TODO -> refactor with fair search forms in common views instead.
@projects_blueprint.route("/search", methods=["POST"])
def tag_native_search():
form = TagsSearchForm(request.form)
if form.validate_on_submit():
result = {}
tag = request.form["tag"]
entities = Project.get_with_search("tags", tag)
if len(entities) > 0:
result = [entity.serialize() for entity in entities]
return jsonify(data=result)
response = jsonify(
data={
"danger": ", ".join(
[msg for attr, errors in form.errors.items() for msg in errors]
)
}
)
response.status_code = 400
return response
| 35.238411
| 88
| 0.606653
|
fb40d10ea564ef33dd673ec540e4c52545cff43a
| 4,635
|
py
|
Python
|
python/common/operation_tickets.py
|
Azure/iot-sdk-longhaul
|
5c77a9f120610433ae99c39130f0d454d31c4a7f
|
[
"MIT"
] | null | null | null |
python/common/operation_tickets.py
|
Azure/iot-sdk-longhaul
|
5c77a9f120610433ae99c39130f0d454d31c4a7f
|
[
"MIT"
] | 7
|
2020-09-14T16:30:22.000Z
|
2021-08-10T14:33:13.000Z
|
python/common/operation_tickets.py
|
Azure/iot-sdk-longhaul
|
5c77a9f120610433ae99c39130f0d454d31c4a7f
|
[
"MIT"
] | 1
|
2020-12-27T11:12:19.000Z
|
2020-12-27T11:12:19.000Z
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import weakref
import threading
import uuid
import logging
logger = logging.getLogger("thief.{}".format(__name__))
class OperationTicketBase(object):
"""
Base class for running operations
"""
def __del__(self):
self.remove_from_owning_list(True)
def remove_from_owning_list(self, in_dunder_del=False):
"""
remove an operation fron the OperationTicketList which owns it.
"""
owner = self.owner_weakref and self.owner_weakref()
if owner:
if in_dunder_del:
logger.warning("Abandoning an incompleted operation: id={}".format(self.id))
owner.remove(self.id)
self.owner_weakref = None
class EventBasedOperationTicket(OperationTicketBase):
"""
Running operation which sets an event when it is complete.
"""
def __init__(self, owner, event_module):
self.owner_weakref = weakref.ref(owner)
self.id = str(uuid.uuid4())
self.event = event_module.Event()
self.result_message = None
def complete(self):
"""
Mark a running operation as complete. This sets the operation's event and removes
the operation from the OperationTicketList which owns it.
"""
if self.event:
self.event.set()
self.remove_from_owning_list()
else:
logger.warning("Attempt to complete already completed operation: id={}".format(self.id))
class CallbackBasedOperation(OperationTicketBase):
"""
Running operation which calls a callback when it is complete.
"""
def __init__(self, owner, callback, user_data):
self.owner_weakref = weakref.ref(owner)
self.id = str(uuid.uuid4())
self.callback = callback
self.user_data = user_data
self.result_message = None
def complete(self):
"""
Mark a running operation as complete. This calls the operation's callback and removes
the operation from the OperationTicketList which owns it.
"""
if self.callback:
self.callback(self.id, self.user_data)
self.callback = None
self.remove_from_owning_list()
else:
logger.warning("Attempt to complete already completed operation: id={}".format(self.id))
class OperationTicketList(object):
"""
Object which keeps tracks of operations which are running (in progress, not yet complete.)
The "running operation" objects in this list have two properties which make them useful:
1. They have an automatically-generated ID value (a guid) which can be passed as a operationId.
2. They have a complete method, which can either set a `threading.Event` object or call a callback.
Using these objects, we can have a guid (a `operationId`) which is like a "completion token".
When the server returns the operationId, this list can use that guid to run some "on complete"
code, which might be a callback, or might be a `threading.Event` object.
"""
def __init__(self):
self.lock = threading.Lock()
self.list = {}
def make_event_based_operation_ticket(self, event_module=threading):
"""
Make and return a running opreation object which fires an event when it is complete.
"""
operation = EventBasedOperationTicket(self, event_module=event_module)
with self.lock:
self.list[operation.id] = operation
return operation
def make_callback_operation_ticket(self, callback, user_data):
"""
Make and return a running opreation object which calls a callback when it is complete.
"""
operation = CallbackBasedOperation(self, callback, user_data)
with self.lock:
self.list[operation.id] = operation
return operation
def remove(self, id):
"""
Remove an operation object from this list. Returns the operation which removed or
`None` if that operation is not in the list.
"""
with self.lock:
if id in self.list:
operation = self.list[id]
del self.list[id]
return operation
else:
return None
def get(self, id):
"""
Get an operation object from this list. Returns `None` if an operation with this
id is not in the list.
"""
with self.lock:
return self.list.get(id, None)
| 34.333333
| 103
| 0.64315
|
45eb9be8dea425a0082a68407bb683b4665b6f55
| 1,488
|
py
|
Python
|
workers/hadoop_aggregator_driver.py
|
eggsandbeer/scheduler
|
18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82
|
[
"BSD-3-Clause"
] | null | null | null |
workers/hadoop_aggregator_driver.py
|
eggsandbeer/scheduler
|
18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82
|
[
"BSD-3-Clause"
] | null | null | null |
workers/hadoop_aggregator_driver.py
|
eggsandbeer/scheduler
|
18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import psutil
from synergy.conf import settings
from workers.abstract_cli_worker import AbstractCliWorker
class HadoopAggregatorDriver(AbstractCliWorker):
"""Python process that starts Hadoop map/reduce job, supervises its execution and updates unit_of_work"""
def __init__(self, process_name):
super(HadoopAggregatorDriver, self).__init__(process_name)
def _start_process(self, start_timeperiod, end_timeperiod, arguments):
try:
self.logger.info('start: %s {' % self.process_name)
p = psutil.Popen([settings.settings['hadoop_command'],
'jar', settings.settings['hadoop_jar'],
'-D', 'process.name=' + self.process_name,
'-D', 'timeperiod.working=' + str(start_timeperiod),
'-D', 'timeperiod.next=' + str(end_timeperiod)],
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
self.cli_process = p
self.logger.info('Started %s with pid = %r' % (self.process_name, p.pid))
except Exception:
self.logger.error('Exception on starting: %s' % self.process_name, exc_info=True)
finally:
self.logger.info('}')
| 42.514286
| 109
| 0.574597
|
643226486e219d6590de90363c69badb61b12792
| 494
|
py
|
Python
|
catalog/bindings/gmd/boolean_list.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/boolean_list.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/boolean_list.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Union
from bindings.gmd.nil_reason_enumeration_value import NilReasonEnumerationValue
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class BooleanList:
class Meta:
namespace = "http://www.opengis.net/gml"
value: List[Union[str, NilReasonEnumerationValue]] = field(
default_factory=list,
metadata={
"pattern": r"other:\w{2,}",
"tokens": True,
},
)
| 24.7
| 79
| 0.665992
|
9fa24f2164d8b4b2c60695a2c11c8f481b8c01d9
| 2,822
|
py
|
Python
|
classNotes/data_structures5/lists_as_queues.py
|
minefarmer/Think_like_a_programmer
|
d6b1363f96600445ea47f91c637c5d0bede2e8f6
|
[
"Unlicense"
] | null | null | null |
classNotes/data_structures5/lists_as_queues.py
|
minefarmer/Think_like_a_programmer
|
d6b1363f96600445ea47f91c637c5d0bede2e8f6
|
[
"Unlicense"
] | null | null | null |
classNotes/data_structures5/lists_as_queues.py
|
minefarmer/Think_like_a_programmer
|
d6b1363f96600445ea47f91c637c5d0bede2e8f6
|
[
"Unlicense"
] | null | null | null |
''' Queues
A queue is a linear list of elements in which deletions can take place only at one end, called the front.
Insertions in the Quwuw can only take place at the other end called rear.
Queues are also called FIFO (first in first out), since the first element in a queue will be the first element that can go out of tIn other words, the order in which elemente enter a qqueue, is the order in which they leave
dEFINATION TAKEN FROM: sCHAUM'S OUTLINES dATA sTRUCTURES BY sEYMOUR lIPSCHUTZ
How to implement?
To implement a queue, use "collections.deque" which was designed to have fast appends and pops from both ends
from collections import deque
Use popleft() to remove element from front
Use append() to insert new element at the end.
'''
from collections import deque
queue = deque([10, 20, 30])
while True:
print("1. Insert Element in the queue")
print("2. Remove element from queue")
print("3. Display elements in queue")
print("4. Exit")
choice = int(input("Enter your choice: "))
if choice == 1:
if len(queue) == 5:
print("Sorry, queue is already full")
else:
element = int(input("Please enter element: "))
queue.append(element)
print(element, " inserted successfully")
elif choice == 2:
if len(queue) == 0:
print("Sorry, queue is already empty")
else:
element = queue.popleft()
print(element, " was removed successfully")
elif choice == 3:
for i in queue:
print(i, end = ' ')
print()
elif choice == 4:
print("Thank You. See you later")
break
else:
print("Invalid option")
more = input("Do you want to continue?(y/n) : ")
if more == 'Y' or more == 'y':
continue
else:
print("Thank You. See you later")
break
'''
1. Insert Element in the queue
2. Remove element from queue
3. Display elements in queue
4. Exit
Enter your choice: 3
10 20 30
Do you want to continue?(y/n) : y
1. Insert Element in the queue
2. Remove element from queue
3. Display elements in queue
4. Exit
Enter your choice: 1
Please enter element: 40
40 inserted successfully
Do you want to continue?(y/n) : y
1. Insert Element in the queue
2. Remove element from queue
3. Display elements in queue
4. Exit
Enter your choice: 3
10 20 30 40
Do you want to continue?(y/n) : y
1. Insert Element in the queue
2. Remove element from queue
3. Display elements in queue
4. Exit
Enter your choice: 2
10 was removed successfully
Do you want to continue?(y/n) : y
1. Insert Element in the queue
2. Remove element from queue
3. Display elements in queue
4. Exit
Enter your choice: 3
20 30 40
Do you want to continue?(y/n) : n
Thank You. See you later
'''
| 27.398058
| 223
| 0.658044
|
79918c81eecdc5253238e43bac313a6c1fa401b2
| 60,889
|
py
|
Python
|
src/pretix/control/views/organizer.py
|
fakegit/pretix
|
b6e9e64ff967f7b4f91fe88694f4157d8a0787b4
|
[
"Apache-2.0"
] | null | null | null |
src/pretix/control/views/organizer.py
|
fakegit/pretix
|
b6e9e64ff967f7b4f91fe88694f4157d8a0787b4
|
[
"Apache-2.0"
] | 56
|
2020-05-07T07:54:17.000Z
|
2021-04-19T12:14:14.000Z
|
src/pretix/control/views/organizer.py
|
fakegit/pretix
|
b6e9e64ff967f7b4f91fe88694f4157d8a0787b4
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Bolutife Lawrence, Jakob Schnell, Sohalt
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import json
from datetime import timedelta
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.files import File
from django.db import transaction
from django.db.models import (
Count, Max, Min, OuterRef, Prefetch, ProtectedError, Subquery, Sum,
)
from django.db.models.functions import Coalesce, Greatest
from django.forms import DecimalField
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django.views import View
from django.views.generic import (
CreateView, DeleteView, DetailView, FormView, ListView, TemplateView,
UpdateView,
)
from pretix.api.models import WebHook
from pretix.base.auth import get_auth_backends
from pretix.base.models import (
CachedFile, Device, Gate, GiftCard, LogEntry, OrderPayment, Organizer,
Team, TeamInvite, User,
)
from pretix.base.models.event import Event, EventMetaProperty, EventMetaValue
from pretix.base.models.giftcards import (
GiftCardTransaction, gen_giftcard_secret,
)
from pretix.base.models.organizer import TeamAPIToken
from pretix.base.payment import PaymentException
from pretix.base.services.export import multiexport
from pretix.base.services.mail import SendMailException, mail
from pretix.base.settings import SETTINGS_AFFECTING_CSS
from pretix.base.signals import register_multievent_data_exporters
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.filter import (
EventFilterForm, GiftCardFilterForm, OrganizerFilterForm,
)
from pretix.control.forms.orders import ExporterForm
from pretix.control.forms.organizer import (
DeviceForm, EventMetaPropertyForm, GateForm, GiftCardCreateForm,
GiftCardUpdateForm, OrganizerDeleteForm, OrganizerForm,
OrganizerSettingsForm, OrganizerUpdateForm, TeamForm, WebHookForm,
)
from pretix.control.logdisplay import OVERVIEW_BANLIST
from pretix.control.permissions import (
AdministratorPermissionRequiredMixin, OrganizerPermissionRequiredMixin,
)
from pretix.control.signals import nav_organizer
from pretix.control.views import PaginationMixin
from pretix.helpers.dicts import merge_dicts
from pretix.helpers.urls import build_absolute_uri
from pretix.presale.style import regenerate_organizer_css
class OrganizerList(PaginationMixin, ListView):
model = Organizer
context_object_name = 'organizers'
template_name = 'pretixcontrol/organizers/index.html'
def get_queryset(self):
qs = Organizer.objects.all()
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
if self.request.user.has_active_staff_session(self.request.session.session_key):
return qs
else:
return qs.filter(pk__in=self.request.user.teams.values_list('organizer', flat=True))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
return ctx
@cached_property
def filter_form(self):
return OrganizerFilterForm(data=self.request.GET, request=self.request)
class InviteForm(forms.Form):
user = forms.EmailField(required=False, label=_('User'))
class TokenForm(forms.Form):
name = forms.CharField(required=False, label=_('Token name'))
class OrganizerDetailViewMixin:
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['nav_organizer'] = []
ctx['organizer'] = self.request.organizer
for recv, retv in nav_organizer.send(sender=self.request.organizer, request=self.request,
organizer=self.request.organizer):
ctx['nav_organizer'] += retv
ctx['nav_organizer'].sort(key=lambda n: n['label'])
return ctx
def get_object(self, queryset=None) -> Organizer:
return self.request.organizer
class OrganizerDetail(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = Event
template_name = 'pretixcontrol/organizers/detail.html'
permission = None
context_object_name = 'events'
paginate_by = 50
@property
def organizer(self):
return self.request.organizer
def get_queryset(self):
qs = self.request.user.get_events_with_any_permission(self.request).select_related('organizer').prefetch_related(
'organizer', '_settings_objects', 'organizer___settings_objects',
'organizer__meta_properties',
Prefetch(
'meta_values',
EventMetaValue.objects.select_related('property'),
to_attr='meta_values_cached'
)
).filter(organizer=self.request.organizer).order_by('-date_from')
qs = qs.annotate(
min_from=Min('subevents__date_from'),
max_from=Max('subevents__date_from'),
max_to=Max('subevents__date_to'),
max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from'))
).annotate(
order_from=Coalesce('min_from', 'date_from'),
order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
)
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
@cached_property
def filter_form(self):
return EventFilterForm(data=self.request.GET, request=self.request, organizer=self.organizer)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
ctx['meta_fields'] = [
self.filter_form['meta_{}'.format(p.name)] for p in self.organizer.meta_properties.all()
]
return ctx
class OrganizerTeamView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DetailView):
model = Organizer
template_name = 'pretixcontrol/organizers/teams.html'
permission = 'can_change_permissions'
context_object_name = 'organizer'
class OrganizerSettingsFormView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, FormView):
model = Organizer
permission = 'can_change_organizer_settings'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['obj'] = self.request.organizer
return kwargs
@transaction.atomic
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
form.save()
if form.has_changed():
self.request.organizer.log_action(
'pretix.organizer.settings', user=self.request.user, data={
k: (form.cleaned_data.get(k).name
if isinstance(form.cleaned_data.get(k), File)
else form.cleaned_data.get(k))
for k in form.changed_data
}
)
messages.success(self.request, _('Your changes have been saved.'))
return redirect(self.get_success_url())
else:
messages.error(self.request, _('We could not save your changes. See below for details.'))
return self.get(request)
class OrganizerDisplaySettings(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, View):
permission = None
def get(self, request, *wargs, **kwargs):
return redirect(reverse('control:organizer.edit', kwargs={
'organizer': self.request.organizer.slug,
}) + '#tab-0-3-open')
class OrganizerDelete(AdministratorPermissionRequiredMixin, FormView):
model = Organizer
template_name = 'pretixcontrol/organizers/delete.html'
context_object_name = 'organizer'
form_class = OrganizerDeleteForm
def post(self, request, *args, **kwargs):
if not self.request.organizer.allow_delete():
messages.error(self.request, _('This organizer can not be deleted.'))
return self.get(self.request, *self.args, **self.kwargs)
return super().post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def form_valid(self, form):
try:
with transaction.atomic():
self.request.user.log_action(
'pretix.organizer.deleted', user=self.request.user,
data={
'organizer_id': self.request.organizer.pk,
'name': str(self.request.organizer.name),
'logentries': list(self.request.organizer.all_logentries().values_list('pk', flat=True))
}
)
self.request.organizer.delete_sub_objects()
self.request.organizer.delete()
messages.success(self.request, _('The organizer has been deleted.'))
return redirect(self.get_success_url())
except ProtectedError:
messages.error(self.request, _('The organizer could not be deleted as some constraints (e.g. data created by '
'plug-ins) do not allow it.'))
return self.get(self.request, *self.args, **self.kwargs)
def get_success_url(self) -> str:
return reverse('control:index')
class OrganizerUpdate(OrganizerPermissionRequiredMixin, UpdateView):
model = Organizer
form_class = OrganizerUpdateForm
template_name = 'pretixcontrol/organizers/edit.html'
permission = 'can_change_organizer_settings'
context_object_name = 'organizer'
@cached_property
def object(self) -> Organizer:
return self.request.organizer
def get_object(self, queryset=None) -> Organizer:
return self.object
@cached_property
def sform(self):
return OrganizerSettingsForm(
obj=self.object,
prefix='settings',
data=self.request.POST if self.request.method == 'POST' else None,
files=self.request.FILES if self.request.method == 'POST' else None
)
def get_context_data(self, *args, **kwargs) -> dict:
context = super().get_context_data(*args, **kwargs)
context['sform'] = self.sform
return context
@transaction.atomic
def form_valid(self, form):
self.sform.save()
change_css = False
if self.sform.has_changed():
self.request.organizer.log_action(
'pretix.organizer.settings',
user=self.request.user,
data={
k: (self.sform.cleaned_data.get(k).name
if isinstance(self.sform.cleaned_data.get(k), File)
else self.sform.cleaned_data.get(k))
for k in self.sform.changed_data
}
)
if any(p in self.sform.changed_data for p in SETTINGS_AFFECTING_CSS):
change_css = True
if form.has_changed():
self.request.organizer.log_action(
'pretix.organizer.changed',
user=self.request.user,
data={k: form.cleaned_data.get(k) for k in form.changed_data}
)
if change_css:
regenerate_organizer_css.apply_async(args=(self.request.organizer.pk,))
messages.success(self.request, _('Your changes have been saved. Please note that it can '
'take a short period of time until your changes become '
'active.'))
else:
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.request.user.has_active_staff_session(self.request.session.session_key):
kwargs['domain'] = True
kwargs['change_slug'] = True
return kwargs
def get_success_url(self) -> str:
return reverse('control:organizer.edit', kwargs={
'organizer': self.request.organizer.slug,
})
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid() and self.sform.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class OrganizerCreate(CreateView):
model = Organizer
form_class = OrganizerForm
template_name = 'pretixcontrol/organizers/create.html'
context_object_name = 'organizer'
def dispatch(self, request, *args, **kwargs):
if not request.user.has_active_staff_session(self.request.session.session_key):
raise PermissionDenied() # TODO
return super().dispatch(request, *args, **kwargs)
@transaction.atomic
def form_valid(self, form):
messages.success(self.request, _('The new organizer has been created.'))
ret = super().form_valid(form)
t = Team.objects.create(
organizer=form.instance, name=_('Administrators'),
all_events=True, can_create_events=True, can_change_teams=True, can_manage_gift_cards=True,
can_change_organizer_settings=True, can_change_event_settings=True, can_change_items=True,
can_view_orders=True, can_change_orders=True, can_view_vouchers=True, can_change_vouchers=True
)
t.members.add(self.request.user)
return ret
def get_success_url(self) -> str:
return reverse('control:organizer', kwargs={
'organizer': self.object.slug,
})
class TeamListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = Team
template_name = 'pretixcontrol/organizers/teams.html'
permission = 'can_change_teams'
context_object_name = 'teams'
def get_queryset(self):
return self.request.organizer.teams.annotate(
memcount=Count('members', distinct=True),
eventcount=Count('limit_events', distinct=True),
invcount=Count('invites', distinct=True)
).all().order_by('name')
class TeamCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
model = Team
template_name = 'pretixcontrol/organizers/team_edit.html'
permission = 'can_change_teams'
form_class = TeamForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(Team, organizer=self.request.organizer, pk=self.kwargs.get('team'))
def get_success_url(self):
return reverse('control:organizer.team', kwargs={
'organizer': self.request.organizer.slug,
'team': self.object.pk
})
def form_valid(self, form):
messages.success(self.request, _('The team has been created. You can now add members to the team.'))
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
form.instance.members.add(self.request.user)
form.instance.log_action('pretix.team.created', user=self.request.user, data={
k: getattr(self.object, k) if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
})
return ret
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class TeamUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
model = Team
template_name = 'pretixcontrol/organizers/team_edit.html'
permission = 'can_change_teams'
context_object_name = 'team'
form_class = TeamForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(Team, organizer=self.request.organizer, pk=self.kwargs.get('team'))
def get_success_url(self):
return reverse('control:organizer.team', kwargs={
'organizer': self.request.organizer.slug,
'team': self.object.pk
})
def form_valid(self, form):
if form.has_changed():
self.object.log_action('pretix.team.changed', user=self.request.user, data={
k: getattr(self.object, k) if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
})
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class TeamDeleteView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DeleteView):
model = Team
template_name = 'pretixcontrol/organizers/team_delete.html'
permission = 'can_change_teams'
context_object_name = 'team'
def get_object(self, queryset=None):
return get_object_or_404(Team, organizer=self.request.organizer, pk=self.kwargs.get('team'))
def get_success_url(self):
return reverse('control:organizer.teams', kwargs={
'organizer': self.request.organizer.slug,
})
def get_context_data(self, *args, **kwargs) -> dict:
context = super().get_context_data(*args, **kwargs)
context['possible'] = self.is_allowed()
return context
def is_allowed(self) -> bool:
return self.request.organizer.teams.exclude(pk=self.kwargs.get('team')).filter(
can_change_teams=True, members__isnull=False
).exists()
@transaction.atomic
def delete(self, request, *args, **kwargs):
success_url = self.get_success_url()
self.object = self.get_object()
if self.is_allowed():
self.object.log_action('pretix.team.deleted', user=self.request.user)
self.object.delete()
messages.success(request, _('The selected team has been deleted.'))
return redirect(success_url)
else:
messages.error(request, _('The selected team cannot be deleted.'))
return redirect(success_url)
class TeamMemberView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DetailView):
template_name = 'pretixcontrol/organizers/team_members.html'
context_object_name = 'team'
permission = 'can_change_teams'
model = Team
def get_object(self, queryset=None):
return get_object_or_404(Team, organizer=self.request.organizer, pk=self.kwargs.get('team'))
@cached_property
def add_form(self):
return InviteForm(data=(self.request.POST
if self.request.method == "POST" and "user" in self.request.POST else None))
@cached_property
def add_token_form(self):
return TokenForm(data=(self.request.POST
if self.request.method == "POST" and "name" in self.request.POST else None))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['add_form'] = self.add_form
ctx['add_token_form'] = self.add_token_form
return ctx
def _send_invite(self, instance):
try:
mail(
instance.email,
_('pretix account invitation'),
'pretixcontrol/email/invitation.txt',
{
'user': self,
'organizer': self.request.organizer.name,
'team': instance.team.name,
'url': build_absolute_uri('control:auth.invite', kwargs={
'token': instance.token
})
},
event=None,
locale=self.request.LANGUAGE_CODE
)
except SendMailException:
pass # Already logged
@transaction.atomic
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if 'remove-member' in request.POST:
try:
user = User.objects.get(pk=request.POST.get('remove-member'))
except (User.DoesNotExist, ValueError):
pass
else:
other_admin_teams = self.request.organizer.teams.exclude(pk=self.object.pk).filter(
can_change_teams=True, members__isnull=False
).exists()
if not other_admin_teams and self.object.can_change_teams and self.object.members.count() == 1:
messages.error(self.request, _('You cannot remove the last member from this team as no one would '
'be left with the permission to change teams.'))
return redirect(self.get_success_url())
else:
self.object.members.remove(user)
self.object.log_action(
'pretix.team.member.removed', user=self.request.user, data={
'email': user.email,
'user': user.pk
}
)
messages.success(self.request, _('The member has been removed from the team.'))
return redirect(self.get_success_url())
elif 'remove-invite' in request.POST:
try:
invite = self.object.invites.get(pk=request.POST.get('remove-invite'))
except (TeamInvite.DoesNotExist, ValueError):
messages.error(self.request, _('Invalid invite selected.'))
return redirect(self.get_success_url())
else:
invite.delete()
self.object.log_action(
'pretix.team.invite.deleted', user=self.request.user, data={
'email': invite.email
}
)
messages.success(self.request, _('The invite has been revoked.'))
return redirect(self.get_success_url())
elif 'resend-invite' in request.POST:
try:
invite = self.object.invites.get(pk=request.POST.get('resend-invite'))
except (TeamInvite.DoesNotExist, ValueError):
messages.error(self.request, _('Invalid invite selected.'))
return redirect(self.get_success_url())
else:
self._send_invite(invite)
self.object.log_action(
'pretix.team.invite.resent', user=self.request.user, data={
'email': invite.email
}
)
messages.success(self.request, _('The invite has been resent.'))
return redirect(self.get_success_url())
elif 'remove-token' in request.POST:
try:
token = self.object.tokens.get(pk=request.POST.get('remove-token'))
except (TeamAPIToken.DoesNotExist, ValueError):
messages.error(self.request, _('Invalid token selected.'))
return redirect(self.get_success_url())
else:
token.active = False
token.save()
self.object.log_action(
'pretix.team.token.deleted', user=self.request.user, data={
'name': token.name
}
)
messages.success(self.request, _('The token has been revoked.'))
return redirect(self.get_success_url())
elif "user" in self.request.POST and self.add_form.is_valid() and self.add_form.has_changed():
try:
user = User.objects.get(email__iexact=self.add_form.cleaned_data['user'])
except User.DoesNotExist:
if self.object.invites.filter(email__iexact=self.add_form.cleaned_data['user']).exists():
messages.error(self.request, _('This user already has been invited for this team.'))
return self.get(request, *args, **kwargs)
if 'native' not in get_auth_backends():
messages.error(self.request, _('Users need to have a pretix account before they can be invited.'))
return self.get(request, *args, **kwargs)
invite = self.object.invites.create(email=self.add_form.cleaned_data['user'])
self._send_invite(invite)
self.object.log_action(
'pretix.team.invite.created', user=self.request.user, data={
'email': self.add_form.cleaned_data['user']
}
)
messages.success(self.request, _('The new member has been invited to the team.'))
return redirect(self.get_success_url())
else:
if self.object.members.filter(pk=user.pk).exists():
messages.error(self.request, _('This user already has permissions for this team.'))
return self.get(request, *args, **kwargs)
self.object.members.add(user)
self.object.log_action(
'pretix.team.member.added', user=self.request.user,
data={
'email': user.email,
'user': user.pk,
}
)
messages.success(self.request, _('The new member has been added to the team.'))
return redirect(self.get_success_url())
elif "name" in self.request.POST and self.add_token_form.is_valid() and self.add_token_form.has_changed():
token = self.object.tokens.create(name=self.add_token_form.cleaned_data['name'])
self.object.log_action(
'pretix.team.token.created', user=self.request.user, data={
'name': self.add_token_form.cleaned_data['name'],
'id': token.pk
}
)
messages.success(self.request, _('A new API token has been created with the following secret: {}\n'
'Please copy this secret to a safe place. You will not be able to '
'view it again here.').format(token.token))
return redirect(self.get_success_url())
else:
messages.error(self.request, _('Your changes could not be saved.'))
return self.get(request, *args, **kwargs)
def get_success_url(self) -> str:
return reverse('control:organizer.team', kwargs={
'organizer': self.request.organizer.slug,
'team': self.object.pk
})
class DeviceListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = Device
template_name = 'pretixcontrol/organizers/devices.html'
permission = 'can_change_organizer_settings'
context_object_name = 'devices'
def get_queryset(self):
return self.request.organizer.devices.prefetch_related(
'limit_events'
).order_by('revoked', '-device_id')
class DeviceCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
model = Device
template_name = 'pretixcontrol/organizers/device_edit.html'
permission = 'can_change_organizer_settings'
form_class = DeviceForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_success_url(self):
return reverse('control:organizer.device.connect', kwargs={
'organizer': self.request.organizer.slug,
'device': self.object.pk
})
def form_valid(self, form):
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
form.instance.log_action('pretix.device.created', user=self.request.user, data={
k: getattr(self.object, k) if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
})
return ret
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class DeviceLogView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
template_name = 'pretixcontrol/organizers/device_logs.html'
permission = 'can_change_organizer_settings'
model = LogEntry
context_object_name = 'logs'
paginate_by = 20
@cached_property
def device(self):
return get_object_or_404(Device, organizer=self.request.organizer, pk=self.kwargs.get('device'))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['device'] = self.device
return ctx
def get_queryset(self):
qs = LogEntry.objects.filter(
device_id=self.device
).select_related(
'user', 'content_type', 'api_token', 'oauth_application',
).prefetch_related(
'device', 'event'
).order_by('-datetime')
return qs
class DeviceUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
model = Device
template_name = 'pretixcontrol/organizers/device_edit.html'
permission = 'can_change_organizer_settings'
context_object_name = 'device'
form_class = DeviceForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(Device, organizer=self.request.organizer, pk=self.kwargs.get('device'))
def get_success_url(self):
return reverse('control:organizer.devices', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
if form.has_changed():
self.object.log_action('pretix.device.changed', user=self.request.user, data={
k: getattr(self.object, k) if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
})
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class DeviceConnectView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DetailView):
model = Device
template_name = 'pretixcontrol/organizers/device_connect.html'
permission = 'can_change_organizer_settings'
context_object_name = 'device'
def get_object(self, queryset=None):
return get_object_or_404(Device, organizer=self.request.organizer, pk=self.kwargs.get('device'))
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if 'ajax' in request.GET:
return JsonResponse({
'initialized': bool(self.object.initialized)
})
if self.object.initialized:
messages.success(request, _('This device has been set up successfully.'))
return redirect(reverse('control:organizer.devices', kwargs={
'organizer': self.request.organizer.slug,
}))
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['qrdata'] = json.dumps({
'handshake_version': 1,
'url': settings.SITE_URL,
'token': self.object.initialization_token,
})
return ctx
class DeviceRevokeView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DetailView):
model = Device
template_name = 'pretixcontrol/organizers/device_revoke.html'
permission = 'can_change_organizer_settings'
context_object_name = 'device'
def get_object(self, queryset=None):
return get_object_or_404(Device, organizer=self.request.organizer, pk=self.kwargs.get('device'))
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.api_token:
messages.success(request, _('This device currently does not have access.'))
return redirect(reverse('control:organizer.devices', kwargs={
'organizer': self.request.organizer.slug,
}))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.revoked = True
self.object.save()
self.object.log_action('pretix.device.revoked', user=self.request.user)
messages.success(request, _('Access for this device has been revoked.'))
return redirect(reverse('control:organizer.devices', kwargs={
'organizer': self.request.organizer.slug,
}))
class WebHookListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = WebHook
template_name = 'pretixcontrol/organizers/webhooks.html'
permission = 'can_change_organizer_settings'
context_object_name = 'webhooks'
def get_queryset(self):
return self.request.organizer.webhooks.prefetch_related('limit_events')
class WebHookCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
model = WebHook
template_name = 'pretixcontrol/organizers/webhook_edit.html'
permission = 'can_change_organizer_settings'
form_class = WebHookForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_success_url(self):
return reverse('control:organizer.webhooks', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
self.request.organizer.log_action('pretix.webhook.created', user=self.request.user, data=merge_dicts({
k: form.cleaned_data[k] if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
}, {'id': form.instance.pk}))
new_listeners = set(form.cleaned_data['events'])
for l in new_listeners:
self.object.listeners.create(action_type=l)
return ret
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class WebHookUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
model = WebHook
template_name = 'pretixcontrol/organizers/webhook_edit.html'
permission = 'can_change_organizer_settings'
context_object_name = 'webhook'
form_class = WebHookForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(WebHook, organizer=self.request.organizer, pk=self.kwargs.get('webhook'))
def get_success_url(self):
return reverse('control:organizer.webhooks', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
if form.has_changed():
self.request.organizer.log_action('pretix.webhook.changed', user=self.request.user, data=merge_dicts({
k: form.cleaned_data[k] if k != 'limit_events' else [e.id for e in getattr(self.object, k).all()]
for k in form.changed_data
}, {'id': form.instance.pk}))
current_listeners = set(self.object.listeners.values_list('action_type', flat=True))
new_listeners = set(form.cleaned_data['events'])
for l in current_listeners - new_listeners:
self.object.listeners.filter(action_type=l).delete()
for l in new_listeners - current_listeners:
self.object.listeners.create(action_type=l)
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class WebHookLogsView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = WebHook
template_name = 'pretixcontrol/organizers/webhook_logs.html'
permission = 'can_change_organizer_settings'
context_object_name = 'calls'
paginate_by = 50
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['webhook'] = self.webhook
return ctx
@cached_property
def webhook(self):
return get_object_or_404(
WebHook, organizer=self.request.organizer, pk=self.kwargs.get('webhook')
)
def get_queryset(self):
return self.webhook.calls.order_by('-datetime')
class GiftCardListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = GiftCard
template_name = 'pretixcontrol/organizers/giftcards.html'
permission = 'can_manage_gift_cards'
context_object_name = 'giftcards'
paginate_by = 50
def get_queryset(self):
s = GiftCardTransaction.objects.filter(
card=OuterRef('pk')
).order_by().values('card').annotate(s=Sum('value')).values('s')
qs = self.request.organizer.issued_gift_cards.annotate(
cached_value=Coalesce(Subquery(s), Decimal('0.00'))
).order_by('-issuance')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def post(self, request, *args, **kwargs):
if "add" in request.POST:
o = self.request.user.get_organizers_with_permission(
'can_manage_gift_cards', self.request
).exclude(pk=self.request.organizer.pk).filter(
slug=request.POST.get("add")
).first()
if o:
self.request.organizer.gift_card_issuer_acceptance.get_or_create(
issuer=o
)
self.request.organizer.log_action(
'pretix.giftcards.acceptance.added',
data={'issuer': o.slug},
user=request.user
)
messages.success(self.request, _('The selected gift card issuer has been added.'))
if "del" in request.POST:
o = Organizer.objects.filter(
slug=request.POST.get("del")
).first()
if o:
self.request.organizer.gift_card_issuer_acceptance.filter(
issuer=o
).delete()
self.request.organizer.log_action(
'pretix.giftcards.acceptance.removed',
data={'issuer': o.slug},
user=request.user
)
messages.success(self.request, _('The selected gift card issuer has been removed.'))
return redirect(reverse('control:organizer.giftcards', kwargs={'organizer': self.request.organizer.slug}))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
ctx['other_organizers'] = self.request.user.get_organizers_with_permission(
'can_manage_gift_cards', self.request
).exclude(pk=self.request.organizer.pk)
return ctx
@cached_property
def filter_form(self):
return GiftCardFilterForm(data=self.request.GET, request=self.request)
class GiftCardDetailView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DetailView):
template_name = 'pretixcontrol/organizers/giftcard.html'
permission = 'can_manage_gift_cards'
context_object_name = 'card'
def get_object(self, queryset=None) -> Organizer:
return get_object_or_404(
self.request.organizer.issued_gift_cards,
pk=self.kwargs.get('giftcard')
)
@transaction.atomic()
def post(self, request, *args, **kwargs):
self.object = GiftCard.objects.select_for_update().get(pk=self.get_object().pk)
if 'revert' in request.POST:
t = get_object_or_404(self.object.transactions.all(), pk=request.POST.get('revert'), order__isnull=False)
if self.object.value - t.value < Decimal('0.00'):
messages.error(request, _('Gift cards are not allowed to have negative values.'))
elif t.value > 0:
r = t.order.payments.create(
order=t.order,
state=OrderPayment.PAYMENT_STATE_CREATED,
amount=t.value,
provider='giftcard',
info=json.dumps({
'gift_card': self.object.pk,
'retry': True,
})
)
try:
r.payment_provider.execute_payment(request, r)
except PaymentException as e:
with transaction.atomic():
r.state = OrderPayment.PAYMENT_STATE_FAILED
r.save()
t.order.log_action('pretix.event.order.payment.failed', {
'local_id': r.local_id,
'provider': r.provider,
'error': str(e)
})
messages.error(request, _('The transaction could not be reversed.'))
else:
messages.success(request, _('The transaction has been reversed.'))
elif 'value' in request.POST:
try:
value = DecimalField(localize=True).to_python(request.POST.get('value'))
except ValidationError:
messages.error(request, _('Your input was invalid, please try again.'))
else:
if self.object.value + value < Decimal('0.00'):
messages.error(request, _('Gift cards are not allowed to have negative values.'))
else:
self.object.transactions.create(
value=value,
text=request.POST.get('text') or None,
)
self.object.log_action(
'pretix.giftcards.transaction.manual',
data={
'value': value,
'text': request.POST.get('text')
},
user=self.request.user,
)
messages.success(request, _('The manual transaction has been saved.'))
return redirect(reverse(
'control:organizer.giftcard',
kwargs={
'organizer': request.organizer.slug,
'giftcard': self.object.pk
}
))
return self.get(request, *args, **kwargs)
class GiftCardCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
template_name = 'pretixcontrol/organizers/giftcard_create.html'
permission = 'can_manage_gift_cards'
form_class = GiftCardCreateForm
success_url = 'invalid'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
any_event = self.request.organizer.events.first()
kwargs['initial'] = {
'currency': any_event.currency if any_event else settings.DEFAULT_CURRENCY,
'secret': gen_giftcard_secret(self.request.organizer.settings.giftcard_length)
}
kwargs['organizer'] = self.request.organizer
return kwargs
@transaction.atomic()
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
def form_valid(self, form):
messages.success(self.request, _('The gift card has been created and can now be used.'))
form.instance.issuer = self.request.organizer
super().form_valid(form)
form.instance.transactions.create(
value=form.cleaned_data['value']
)
form.instance.log_action('pretix.giftcards.created', user=self.request.user, data={})
if form.cleaned_data['value']:
form.instance.log_action('pretix.giftcards.transaction.manual', user=self.request.user, data={
'value': form.cleaned_data['value']
})
return redirect(reverse(
'control:organizer.giftcard',
kwargs={
'organizer': self.request.organizer.slug,
'giftcard': self.object.pk
}
))
class GiftCardUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
template_name = 'pretixcontrol/organizers/giftcard_edit.html'
permission = 'can_manage_gift_cards'
form_class = GiftCardUpdateForm
success_url = 'invalid'
context_object_name = 'card'
model = GiftCard
def get_object(self, queryset=None) -> Organizer:
return get_object_or_404(
self.request.organizer.issued_gift_cards,
pk=self.kwargs.get('giftcard')
)
@transaction.atomic()
def form_valid(self, form):
messages.success(self.request, _('The gift card has been changed.'))
super().form_valid(form)
form.instance.log_action('pretix.giftcards.modified', user=self.request.user, data=dict(form.cleaned_data))
return redirect(reverse(
'control:organizer.giftcard',
kwargs={
'organizer': self.request.organizer.slug,
'giftcard': self.object.pk
}
))
class ExportMixin:
@cached_property
def exporters(self):
exporters = []
events = self.request.user.get_events_with_permission('can_view_orders', request=self.request).filter(
organizer=self.request.organizer
)
responses = register_multievent_data_exporters.send(self.request.organizer)
id = self.request.GET.get("identifier") or self.request.POST.get("exporter")
for ex in sorted([response(events) for r, response in responses if response], key=lambda ex: str(ex.verbose_name)):
if id and ex.identifier != id:
continue
# Use form parse cycle to generate useful defaults
test_form = ExporterForm(data=self.request.GET, prefix=ex.identifier)
test_form.fields = ex.export_form_fields
test_form.is_valid()
initial = {
k: v for k, v in test_form.cleaned_data.items() if ex.identifier + "-" + k in self.request.GET
}
ex.form = ExporterForm(
data=(self.request.POST if self.request.method == 'POST' else None),
prefix=ex.identifier,
initial=initial
)
ex.form.fields = ex.export_form_fields
ex.form.fields.update([
('events',
forms.ModelMultipleChoiceField(
queryset=events,
initial=events,
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
label=_('Events'),
required=True
)),
])
exporters.append(ex)
return exporters
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['exporters'] = self.exporters
return ctx
class ExportDoView(OrganizerPermissionRequiredMixin, ExportMixin, AsyncAction, TemplateView):
known_errortypes = ['ExportError']
task = multiexport
template_name = 'pretixcontrol/organizers/export.html'
def get_success_message(self, value):
return None
def get_success_url(self, value):
return reverse('cachedfile.download', kwargs={'id': str(value)})
def get_error_url(self):
return reverse('control:organizer.export', kwargs={
'organizer': self.request.organizer.slug
})
@cached_property
def exporter(self):
for ex in self.exporters:
if ex.identifier == self.request.POST.get("exporter"):
return ex
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
return TemplateView.get(self, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not self.exporter:
messages.error(self.request, _('The selected exporter was not found.'))
return redirect('control:organizer.export', kwargs={
'organizer': self.request.organizer.slug
})
if not self.exporter.form.is_valid():
messages.error(self.request, _('There was a problem processing your input. See below for error details.'))
return self.get(request, *args, **kwargs)
cf = CachedFile(web_download=True, session_key=request.session.session_key)
cf.date = now()
cf.expires = now() + timedelta(hours=24)
cf.save()
return self.do(
organizer=self.request.organizer.id,
user=self.request.user.id,
fileid=str(cf.id),
provider=self.exporter.identifier,
device=None,
token=None,
form_data=self.exporter.form.cleaned_data
)
class ExportView(OrganizerPermissionRequiredMixin, ExportMixin, TemplateView):
template_name = 'pretixcontrol/organizers/export.html'
class GateListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = Gate
template_name = 'pretixcontrol/organizers/gates.html'
permission = 'can_change_organizer_settings'
context_object_name = 'gates'
def get_queryset(self):
return self.request.organizer.gates.all()
class GateCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
model = Gate
template_name = 'pretixcontrol/organizers/gate_edit.html'
permission = 'can_change_organizer_settings'
form_class = GateForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(Gate, organizer=self.request.organizer, pk=self.kwargs.get('gate'))
def get_success_url(self):
return reverse('control:organizer.gates', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
messages.success(self.request, _('The gate has been created.'))
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
form.instance.log_action('pretix.gate.created', user=self.request.user, data={
k: getattr(self.object, k) for k in form.changed_data
})
return ret
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class GateUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
model = Gate
template_name = 'pretixcontrol/organizers/gate_edit.html'
permission = 'can_change_organizer_settings'
context_object_name = 'gate'
form_class = GateForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def get_object(self, queryset=None):
return get_object_or_404(Gate, organizer=self.request.organizer, pk=self.kwargs.get('gate'))
def get_success_url(self):
return reverse('control:organizer.gates', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
if form.has_changed():
self.object.log_action('pretix.gate.changed', user=self.request.user, data={
k: getattr(self.object, k)
for k in form.changed_data
})
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class GateDeleteView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DeleteView):
model = Gate
template_name = 'pretixcontrol/organizers/gate_delete.html'
permission = 'can_change_organizer_settings'
context_object_name = 'gate'
def get_object(self, queryset=None):
return get_object_or_404(Gate, organizer=self.request.organizer, pk=self.kwargs.get('gate'))
def get_success_url(self):
return reverse('control:organizer.gates', kwargs={
'organizer': self.request.organizer.slug,
})
@transaction.atomic
def delete(self, request, *args, **kwargs):
success_url = self.get_success_url()
self.object = self.get_object()
self.object.log_action('pretix.gate.deleted', user=self.request.user)
self.object.delete()
messages.success(request, _('The selected gate has been deleted.'))
return redirect(success_url)
class EventMetaPropertyListView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, ListView):
model = EventMetaProperty
template_name = 'pretixcontrol/organizers/properties.html'
permission = 'can_change_organizer_settings'
context_object_name = 'properties'
def get_queryset(self):
return self.request.organizer.meta_properties.all()
class EventMetaPropertyCreateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, CreateView):
model = EventMetaProperty
template_name = 'pretixcontrol/organizers/property_edit.html'
permission = 'can_change_organizer_settings'
form_class = EventMetaPropertyForm
def get_object(self, queryset=None):
return get_object_or_404(EventMetaProperty, organizer=self.request.organizer, pk=self.kwargs.get('property'))
def get_success_url(self):
return reverse('control:organizer.properties', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
messages.success(self.request, _('The property has been created.'))
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
form.instance.log_action('pretix.property.created', user=self.request.user, data={
k: getattr(self.object, k) for k in form.changed_data
})
return ret
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class EventMetaPropertyUpdateView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, UpdateView):
model = EventMetaProperty
template_name = 'pretixcontrol/organizers/property_edit.html'
permission = 'can_change_organizer_settings'
context_object_name = 'property'
form_class = EventMetaPropertyForm
def get_object(self, queryset=None):
return get_object_or_404(EventMetaProperty, organizer=self.request.organizer, pk=self.kwargs.get('property'))
def get_success_url(self):
return reverse('control:organizer.properties', kwargs={
'organizer': self.request.organizer.slug,
})
def form_valid(self, form):
if form.has_changed():
self.object.log_action('pretix.property.changed', user=self.request.user, data={
k: getattr(self.object, k)
for k in form.changed_data
})
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes could not be saved.'))
return super().form_invalid(form)
class EventMetaPropertyDeleteView(OrganizerDetailViewMixin, OrganizerPermissionRequiredMixin, DeleteView):
model = EventMetaProperty
template_name = 'pretixcontrol/organizers/property_delete.html'
permission = 'can_change_organizer_settings'
context_object_name = 'property'
def get_object(self, queryset=None):
return get_object_or_404(EventMetaProperty, organizer=self.request.organizer, pk=self.kwargs.get('property'))
def get_success_url(self):
return reverse('control:organizer.properties', kwargs={
'organizer': self.request.organizer.slug,
})
@transaction.atomic
def delete(self, request, *args, **kwargs):
success_url = self.get_success_url()
self.object = self.get_object()
self.object.log_action('pretix.property.deleted', user=self.request.user)
self.object.delete()
messages.success(request, _('The selected property has been deleted.'))
return redirect(success_url)
class LogView(OrganizerPermissionRequiredMixin, PaginationMixin, ListView):
template_name = 'pretixcontrol/organizers/logs.html'
permission = 'can_change_organizer_settings'
model = LogEntry
context_object_name = 'logs'
def get_queryset(self):
qs = self.request.organizer.all_logentries().select_related(
'user', 'content_type', 'api_token', 'oauth_application', 'device'
).order_by('-datetime')
qs = qs.exclude(action_type__in=OVERVIEW_BANLIST)
if self.request.GET.get('user'):
qs = qs.filter(user_id=self.request.GET.get('user'))
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
return ctx
| 40.782987
| 123
| 0.638654
|
4ad91e8b89b6e9d4612b3c0d75b38fd5b53a7bfb
| 1,948
|
py
|
Python
|
examples/org-website/query_sample.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 98
|
2015-12-08T14:26:27.000Z
|
2022-03-23T17:44:11.000Z
|
examples/org-website/query_sample.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 396
|
2016-02-23T11:07:55.000Z
|
2022-03-31T14:26:34.000Z
|
examples/org-website/query_sample.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 62
|
2015-12-09T11:20:53.000Z
|
2022-01-28T01:30:54.000Z
|
import hazelcast
from hazelcast.serialization.api import Portable
from hazelcast.predicate import sql, and_, between, equal
class User(Portable):
FACTORY_ID = 1
CLASS_ID = 1
def __init__(self, username=None, age=None, active=None):
self.username = username
self.age = age
self.active = active
def write_portable(self, writer):
writer.write_string("username", self.username)
writer.write_int("age", self.age)
writer.write_boolean("active", self.active)
def read_portable(self, reader):
self.username = reader.read_string("username")
self.age = reader.read_int("age")
self.active = reader.read_boolean("active")
def get_factory_id(self):
return self.FACTORY_ID
def get_class_id(self):
return self.CLASS_ID
def __repr__(self):
return "User(username=%s, age=%s, active=%s]" % (self.username, self.age, self.active)
def generate_users(users):
users.put("Rod", User("Rod", 19, True))
users.put("Jane", User("Jane", 20, True))
users.put("Freddy", User("Freddy", 23, True))
# Start the Hazelcast Client and connect to an already running Hazelcast Cluster on 127.0.0.1
client = hazelcast.HazelcastClient(portable_factories={User.FACTORY_ID: {User.CLASS_ID: User}})
# Get a Distributed Map called "users"
users_map = client.get_map("users").blocking()
# Add some users to the Distributed Map
generate_users(users_map)
# Create a Predicate from a String (a SQL like Where clause)
sql_query = sql("active AND age BETWEEN 18 AND 21)")
# Creating the same Predicate as above but with a builder
criteria_query = and_(equal("active", True), between("age", 18, 21))
# Get result collections using the two different Predicates
result1 = users_map.values(sql_query)
result2 = users_map.values(criteria_query)
# Print out the results
print(result1)
print(result2)
# Shutdown this Hazelcast Client
client.shutdown()
| 32.466667
| 95
| 0.708419
|
dfa4183aad80b4a3123d5bf97e246bef13558315
| 835
|
py
|
Python
|
wpa_project/program_app/models/beginner_class.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 1
|
2022-01-03T02:46:34.000Z
|
2022-01-03T02:46:34.000Z
|
wpa_project/program_app/models/beginner_class.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 31
|
2021-12-29T17:43:06.000Z
|
2022-03-25T01:03:17.000Z
|
wpa_project/program_app/models/beginner_class.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | null | null | null |
import logging
from django.db import models
logger = logging.getLogger(__name__)
def choices(choice_list):
choice = []
for c in choice_list:
choice.append((c, c))
return choice
class BeginnerClass(models.Model):
class_types = ['beginner', 'returnee', 'combined']
class_states = ['scheduled', 'open', 'full', 'closed', 'canceled', 'recorded']
class_date = models.DateTimeField()
class_type = models.CharField(max_length=20, null=True, choices=choices(class_types))
beginner_limit = models.IntegerField()
returnee_limit = models.IntegerField()
instructor_limit = models.IntegerField(default=10)
state = models.CharField(max_length=20, null=True, choices=choices(class_states))
cost = models.IntegerField(default=5)
def get_states(self):
return self.class_states
| 29.821429
| 89
| 0.71018
|
bb752d79a7c30217fbbe7cc65b5bf743ff2fa781
| 2,620
|
py
|
Python
|
simplemooc/courses/migrations/0005_auto_20161208_0612.py
|
willsilvano/django-simplemooc
|
c6b7491ec600fbb0be563c4f3f0a0480f8f52270
|
[
"MIT"
] | null | null | null |
simplemooc/courses/migrations/0005_auto_20161208_0612.py
|
willsilvano/django-simplemooc
|
c6b7491ec600fbb0be563c4f3f0a0480f8f52270
|
[
"MIT"
] | null | null | null |
simplemooc/courses/migrations/0005_auto_20161208_0612.py
|
willsilvano/django-simplemooc
|
c6b7491ec600fbb0be563c4f3f0a0480f8f52270
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-08 08:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0004_auto_20161130_0332'),
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Título')),
('content', models.TextField(verbose_name='Conteúdo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Alterado em')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='announcements', to='courses.Course', verbose_name='Curso')),
],
options={
'verbose_name': 'Anúncio',
'verbose_name_plural': 'Anúncios',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='Conteúdo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Alterado em')),
('announcement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='courses.Announcement', verbose_name='Anúncio')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='Usuário')),
],
options={
'verbose_name': 'Comentário',
'verbose_name_plural': 'Comentários',
'ordering': ['-created_at'],
},
),
migrations.AlterField(
model_name='enrollment',
name='status',
field=models.IntegerField(blank=True, choices=[(0, 'Pendente'), (1, 'Aprovado'), (2, 'Cancelado')], default=1, verbose_name='Situação'),
),
]
| 46.785714
| 173
| 0.607252
|
556ac9158d6f667d12a731b341f1efca16f50aa7
| 2,700
|
py
|
Python
|
mycode/Ch. 8/tictactoe.py
|
evogel2/python-for-absolute-beginners-course
|
56e189bf049710ee4d4ca674c1f16291b7ea11b9
|
[
"MIT"
] | null | null | null |
mycode/Ch. 8/tictactoe.py
|
evogel2/python-for-absolute-beginners-course
|
56e189bf049710ee4d4ca674c1f16291b7ea11b9
|
[
"MIT"
] | null | null | null |
mycode/Ch. 8/tictactoe.py
|
evogel2/python-for-absolute-beginners-course
|
56e189bf049710ee4d4ca674c1f16291b7ea11b9
|
[
"MIT"
] | null | null | null |
# create the board
def create_board():
# board = [
# [r1_c1, r1_c2, r1_c3],
# [r2_c1, r2_c2, r2_c3],
# [r3_c1, r3_c2, r3_c3]
# ]
# Board is a list of rows
# Rows are a list of cells
board = [
[None, None, None],
[None, None, None],
[None, None, None]
]
return board
# until someone wins (check for winner)
# choose location, mark it
# toggle active player
def play_round(board, players, symbols, index):
while not find_winner(board):
announce_turn(players, index)
show_board(board)
if not choose_location(board, symbols[index]):
print("That isn't a valid play, try again.")
continue
index = (index + 1) % len(players)
winner = players[(index + 1) % len(players)]
return winner
def announce_turn(players, index):
player = players[index]
print(f"It's {player}'s turn! Here's the board:")
def show_board(board):
for row in board:
print("| ", end='')
for cell in row:
symbol = cell if cell is not None else "_"
print(symbol, end=' | ')
print()
def choose_location(board, symbol):
row = int(input("Choose which row:\n")) - 1
column = int(input("Choose which column:\n")) - 1
if row < 0 or row >= len(board):
return False
if column < 0 or column >= len(board[0]):
return False
cell = board[row][column]
if cell is not None:
return False
board[row][column] = symbol
return True
def find_winner(board):
sequences = get_winning_sequences(board)
for cells in sequences:
symbol1 = cells[0]
if symbol1 and all(symbol1 == cell for cell in cells):
return True
return False
def get_winning_sequences(board):
sequences = []
# Win by rows
rows = board
sequences.extend(rows)
# Win by columns
columns = []
for col_idx in range(0, 3):
col = [
board[0][col_idx],
board[1][col_idx],
board[2][col_idx]
]
sequences.append(col)
# Win by diagonals
diagonals = [
[board[0][0], board[1][1], board[2][2]],
[board[0][2], board[1][1], board[2][0]]
]
sequences.extend(diagonals)
return sequences
def main():
players_list = ['Elliott', 'Computer']
player_symbols = ['X', 'O']
active_player_index = 0
board = create_board()
winner = play_round(board, players_list, player_symbols, active_player_index)
# game over!
print(f"Game over! {winner} has won with the board: ")
show_board(board)
if __name__ == '__main__':
main()
| 22.131148
| 81
| 0.571852
|
aeba2ae2e147b1e82856eafea8a1dd27a8829822
| 1,783
|
py
|
Python
|
functionaltests/common/datagen.py
|
infobloxopen/designate
|
531a28b8453cfe5641284a16e0342db8d709ab36
|
[
"Apache-2.0"
] | null | null | null |
functionaltests/common/datagen.py
|
infobloxopen/designate
|
531a28b8453cfe5641284a16e0342db8d709ab36
|
[
"Apache-2.0"
] | null | null | null |
functionaltests/common/datagen.py
|
infobloxopen/designate
|
531a28b8453cfe5641284a16e0342db8d709ab36
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from functionaltests.api.v2.models.zone_model import ZoneModel
def random_ip():
return ".".join(str(random.randrange(0, 256)) for _ in range(4))
def random_string(prefix='rand', n=8, suffix=''):
"""Return a string containing random digits
:param prefix: the exact text to start the string. Defaults to "rand"
:param n: the number of random digits to generate
:param suffix: the exact text to end the string
"""
digits = "".join(str(random.randrange(0, 10)) for _ in range(n))
return prefix + digits + suffix
def random_zone_data(name=None, email=None, ttl=None, description=None):
"""Generate random zone data, with optional overrides
:return: A ZoneModel
"""
if name is None:
name = random_string(prefix='testdomain', suffix='.com.')
if email is None:
email = ("admin@" + name).strip('.')
if description is None:
description = random_string(prefix='Description ')
if ttl is None:
ttl = random.randint(1200, 8400),
return ZoneModel.from_dict({
'zone': {
'name': name,
'email': email,
'ttl': random.randint(1200, 8400),
'description': description}})
| 31.839286
| 73
| 0.681997
|
a5dec97e3858280bca93d67fc27cf5a92b25a2ff
| 142
|
py
|
Python
|
tests/unit/clone.py
|
microsoft/torchy
|
2c36cc50246dfeb22b9d65a402050398b78c14e5
|
[
"MIT"
] | 4
|
2021-12-30T19:54:57.000Z
|
2022-03-15T23:59:55.000Z
|
tests/unit/clone.py
|
microsoft/torchy
|
2c36cc50246dfeb22b9d65a402050398b78c14e5
|
[
"MIT"
] | null | null | null |
tests/unit/clone.py
|
microsoft/torchy
|
2c36cc50246dfeb22b9d65a402050398b78c14e5
|
[
"MIT"
] | null | null | null |
from testdriver import *
x = torch.tensor(((3.,2.), (7.,9.)))
y = x.clone().detach()
w = x.add(y)
y.add_(x)
z = x.add(x)
print(w)
print(z)
| 11.833333
| 36
| 0.556338
|
eb0d81d493cfce6b6b587619c99b638bea41187a
| 442
|
py
|
Python
|
homeassistant/components/generic/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/generic/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/generic/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Constants for the generic (IP Camera) integration."""
DOMAIN = "generic"
DEFAULT_NAME = "Generic Camera"
CONF_CONTENT_TYPE = "content_type"
CONF_LIMIT_REFETCH_TO_URL_CHANGE = "limit_refetch_to_url_change"
CONF_STILL_IMAGE_URL = "still_image_url"
CONF_STREAM_SOURCE = "stream_source"
CONF_FRAMERATE = "framerate"
GET_IMAGE_TIMEOUT = 10
DEFAULT_USERNAME = None
DEFAULT_PASSWORD = None
DEFAULT_IMAGE_URL = None
DEFAULT_STREAM_SOURCE = None
| 27.625
| 64
| 0.816742
|
ef17ac611c45024ad5c7c0d22a4e4eccd24ad177
| 4,423
|
py
|
Python
|
dm_control/viewer/gui/fullscreen_quad.py
|
h8907283/dm_control
|
fe4449606742a7b8bec81930790b98244cddc538
|
[
"Apache-2.0"
] | 2,863
|
2018-01-03T01:38:52.000Z
|
2022-03-30T09:49:50.000Z
|
dm_control/viewer/gui/fullscreen_quad.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | 266
|
2018-01-03T16:00:04.000Z
|
2022-03-26T15:45:48.000Z
|
dm_control/viewer/gui/fullscreen_quad.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | 580
|
2018-01-03T03:17:27.000Z
|
2022-03-31T19:29:32.000Z
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""OpenGL utility for rendering numpy arrays as images on a quad surface."""
import ctypes
import numpy as np
from OpenGL import GL
from OpenGL.GL import shaders
# This array contains packed position and texture cooridnates of a fullscreen
# quad.
# It contains definition of 4 vertices that will be rendered as a triangle
# strip. Each vertex is described by a tuple:
# (VertexPosition.X, VertexPosition.Y, TextureCoord.U, TextureCoord.V)
_FULLSCREEN_QUAD_VERTEX_POSITONS_AND_TEXTURE_COORDS = np.array([
-1, -1, 0, 1,
-1, 1, 0, 0,
1, -1, 1, 1,
1, 1, 1, 0], dtype=np.float32)
_FLOATS_PER_XY = 2
_FLOATS_PER_VERTEX = 4
_SIZE_OF_FLOAT = ctypes.sizeof(ctypes.c_float)
_VERTEX_SHADER = """
#version 120
attribute vec2 position;
attribute vec2 uv;
void main() {
gl_Position = vec4(position, 0, 1);
gl_TexCoord[0].st = uv;
}
"""
_FRAGMENT_SHADER = """
#version 120
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, gl_TexCoord[0].st);
}
"""
_VAR_POSITION = 'position'
_VAR_UV = 'uv'
_VAR_TEXTURE_SAMPLER = 'tex'
class FullscreenQuadRenderer:
"""Renders pixmaps on a fullscreen quad using OpenGL."""
def __init__(self):
"""Initializes the fullscreen quad renderer."""
GL.glClearColor(0, 0, 0, 0)
self._init_geometry()
self._init_texture()
self._init_shaders()
def _init_geometry(self):
"""Initializes the fullscreen quad geometry."""
vertex_buffer = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vertex_buffer)
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
_FULLSCREEN_QUAD_VERTEX_POSITONS_AND_TEXTURE_COORDS.nbytes,
_FULLSCREEN_QUAD_VERTEX_POSITONS_AND_TEXTURE_COORDS, GL.GL_STATIC_DRAW)
def _init_texture(self):
"""Initializes the texture storage."""
self._texture = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture)
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexParameteri(
GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
def _init_shaders(self):
"""Initializes the shaders used to render the textures fullscreen quad."""
vs = shaders.compileShader(_VERTEX_SHADER, GL.GL_VERTEX_SHADER)
fs = shaders.compileShader(_FRAGMENT_SHADER, GL.GL_FRAGMENT_SHADER)
self._shader = shaders.compileProgram(vs, fs)
stride = _FLOATS_PER_VERTEX * _SIZE_OF_FLOAT
var_position = GL.glGetAttribLocation(self._shader, _VAR_POSITION)
GL.glVertexAttribPointer(
var_position, 2, GL.GL_FLOAT, GL.GL_FALSE, stride, None)
GL.glEnableVertexAttribArray(var_position)
var_uv = GL.glGetAttribLocation(self._shader, _VAR_UV)
uv_offset = ctypes.c_void_p(_FLOATS_PER_XY * _SIZE_OF_FLOAT)
GL.glVertexAttribPointer(
var_uv, 2, GL.GL_FLOAT, GL.GL_FALSE, stride, uv_offset)
GL.glEnableVertexAttribArray(var_uv)
self._var_texture_sampler = GL.glGetUniformLocation(
self._shader, _VAR_TEXTURE_SAMPLER)
def render(self, pixmap, viewport_shape):
"""Renders the pixmap on a fullscreen quad.
Args:
pixmap: A 3D numpy array of bytes (np.uint8), with dimensions
(width, height, 3).
viewport_shape: A tuple of two elements, (width, height).
"""
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glViewport(0, 0, *viewport_shape)
GL.glUseProgram(self._shader)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB, pixmap.shape[1],
pixmap.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
pixmap)
GL.glUniform1i(self._var_texture_sampler, 0)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
| 35.103175
| 79
| 0.715804
|
f263722f1e6f2ea2b9719da0f40b24bd0b60eddc
| 7,203
|
py
|
Python
|
finrl/pairlist/IPairList.py
|
JuohmaruSanshiro/FinRL-Library
|
d62fd577a0be37e1daffdc5ba10862e42b38e04b
|
[
"MIT"
] | 2
|
2021-03-16T02:33:57.000Z
|
2021-04-01T02:09:49.000Z
|
finrl/pairlist/IPairList.py
|
JuohmaruSanshiro/FinRL-Library
|
d62fd577a0be37e1daffdc5ba10862e42b38e04b
|
[
"MIT"
] | null | null | null |
finrl/pairlist/IPairList.py
|
JuohmaruSanshiro/FinRL-Library
|
d62fd577a0be37e1daffdc5ba10862e42b38e04b
|
[
"MIT"
] | 2
|
2021-03-16T07:04:33.000Z
|
2021-12-19T09:12:10.000Z
|
"""
PairList Handler base class
"""
import logging
from abc import ABC, abstractmethod, abstractproperty
from copy import deepcopy
from typing import Any, Dict, List
from cachetools import TTLCache, cached
from finrl.exceptions import OperationalException
from finrl.exchange import market_is_active
logger = logging.getLogger(__name__)
class IPairList(ABC):
def __init__(self, exchange, pairlistmanager,
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
"""
:param exchange: Exchange instance
:param pairlistmanager: Instantiated Pairlist manager
:param config: Global bot configuration
:param pairlistconfig: Configuration for this Pairlist Handler - can be empty.
:param pairlist_pos: Position of the Pairlist Handler in the chain
"""
self._enabled = True
self._exchange = exchange
self._pairlistmanager = pairlistmanager
self._config = config
self._pairlistconfig = pairlistconfig
self._pairlist_pos = pairlist_pos
self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)
self._last_refresh = 0
self._log_cache: TTLCache = TTLCache(maxsize=1024, ttl=self.refresh_period)
@property
def name(self) -> str:
"""
Gets name of the class
-> no need to overwrite in subclasses
"""
return self.__class__.__name__
def log_on_refresh(self, logmethod, message: str) -> None:
"""
Logs message - not more often than "refresh_period" to avoid log spamming
Logs the log-message as debug as well to simplify debugging.
:param logmethod: Function that'll be called. Most likely `logger.info`.
:param message: String containing the message to be sent to the function.
:return: None.
"""
@cached(cache=self._log_cache)
def _log_on_refresh(message: str):
logmethod(message)
# Log as debug first
logger.debug(message)
# Call hidden function.
_log_on_refresh(message)
@abstractproperty
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty Dict is passed
as tickers argument to filter_pairlist
"""
@abstractmethod
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
-> Please overwrite in subclasses
"""
def _validate_pair(self, ticker) -> bool:
"""
Check one pair against Pairlist Handler's specific conditions.
Either implement it in the Pairlist Handler or override the generic
filter_pairlist() method.
:param ticker: ticker dict as returned from ccxt.load_markets()
:return: True if the pair can stay, false if it should be removed
"""
raise NotImplementedError()
def gen_pairlist(self, cached_pairlist: List[str], tickers: Dict) -> List[str]:
"""
Generate the pairlist.
This method is called once by the pairlistmanager in the refresh_pairlist()
method to supply the starting pairlist for the chain of the Pairlist Handlers.
Pairlist Filters (those Pairlist Handlers that cannot be used at the first
position in the chain) shall not override this base implementation --
it will raise the exception if a Pairlist Handler is used at the first
position in the chain.
:param cached_pairlist: Previously generated pairlist (cached)
:param tickers: Tickers (from exchange.get_tickers()).
:return: List of pairs
"""
raise OperationalException("This Pairlist Handler should not be used "
"at the first position in the list of Pairlist Handlers.")
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
"""
Filters and sorts pairlist and returns the whitelist again.
Called on each bot iteration - please use internal caching if necessary
This generic implementation calls self._validate_pair() for each pair
in the pairlist.
Some Pairlist Handlers override this generic implementation and employ
own filtration.
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
:return: new whitelist
"""
if self._enabled:
# Copy list since we're modifying this list
for p in deepcopy(pairlist):
# Filter out assets
if not self._validate_pair(tickers[p]):
pairlist.remove(p)
return pairlist
def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:
"""
Proxy method to verify_blacklist for easy access for child classes.
:param pairlist: Pairlist to validate
:param logmethod: Function that'll be called, `logger.info` or `logger.warning`.
:return: pairlist - blacklisted pairs
"""
return self._pairlistmanager.verify_blacklist(pairlist, logmethod)
def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:
"""
Check available markets and remove pair from whitelist if necessary
:param whitelist: the sorted list of pairs the user might want to trade
:return: the list of pairs the user wants to trade without those unavailable or
black_listed
"""
markets = self._exchange.markets
if not markets:
raise OperationalException(
'Markets not loaded. Make sure that exchange is initialized correctly.')
sanitized_whitelist: List[str] = []
for pair in pairlist:
# pair is not in the generated dynamic market or has the wrong stake currency
if pair not in markets:
logger.warning(f"Pair {pair} is not compatible with exchange "
f"{self._exchange.name}. Removing it from whitelist..")
continue
if not self._exchange.market_is_tradable(markets[pair]):
logger.warning(f"Pair {pair} is not tradable with Freqtrade."
"Removing it from whitelist..")
continue
if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:
logger.warning(f"Pair {pair} is not compatible with your stake currency "
f"{self._config['stake_currency']}. Removing it from whitelist..")
continue
# Check if market is active
market = markets[pair]
if not market_is_active(market):
logger.info(f"Ignoring {pair} from whitelist. Market is not active.")
continue
if pair not in sanitized_whitelist:
sanitized_whitelist.append(pair)
# We need to remove pairs that are unknown
return sanitized_whitelist
| 38.935135
| 97
| 0.6389
|
b8c60d95cbe8c09f4fb9d65881b5af9fbc5ed92b
| 1,580
|
py
|
Python
|
tools/mkdocs_macros_bids/main.py
|
JuliusWelzel/bids-specification
|
bebf4937fbacc26e7d820cf98501b1cc73d826db
|
[
"CC-BY-4.0"
] | null | null | null |
tools/mkdocs_macros_bids/main.py
|
JuliusWelzel/bids-specification
|
bebf4937fbacc26e7d820cf98501b1cc73d826db
|
[
"CC-BY-4.0"
] | null | null | null |
tools/mkdocs_macros_bids/main.py
|
JuliusWelzel/bids-specification
|
bebf4937fbacc26e7d820cf98501b1cc73d826db
|
[
"CC-BY-4.0"
] | 1
|
2022-01-16T13:48:51.000Z
|
2022-01-16T13:48:51.000Z
|
"""This package is used to build elements in the bids-specification schema into
MarkDown format for the specification text.
Functions decorated in "define_env()" are callable throughout the
specification and are run/rendered with the mkdocs plugin "macros".
"""
import os
import sys
code_path = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(code_path)
import macros
def define_env(env):
"""Define variables, macros and filters for the mkdocs-macros plugin.
Parameters
----------
env : :obj:`macros.plugin.MacrosPlugin`
An object in which to inject macros, variables, and filters.
Notes
-----
"variables" are the dictionary that contains the environment variables
"macro" is a decorator function, to declare a macro.
Macro aliases must start with "MACROS___", for compatibility with the PDF
build code.
"""
env.macro(macros.make_filename_template, "MACROS___make_filename_template")
env.macro(macros.make_entity_table, "MACROS___make_entity_table")
env.macro(
macros.make_entity_definitions,
"MACROS___make_entity_definitions",
)
env.macro(macros.make_glossary, "MACROS___make_glossary")
env.macro(macros.make_suffix_table, "MACROS___make_suffix_table")
env.macro(macros.make_metadata_table, "MACROS___make_metadata_table")
env.macro(macros.make_subobject_table, "MACROS___make_subobject_table")
env.macro(macros.make_columns_table, "MACROS___make_columns_table")
env.macro(macros.make_filetree_example, "MACROS___make_filetree_example")
| 35.909091
| 79
| 0.755696
|
099d53e039365f205eb5e3c30b23f710c1217df7
| 5,153
|
py
|
Python
|
lib/spack/spack/test/cmd/gpg.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
lib/spack/spack/test/cmd/gpg.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17
|
2019-03-21T15:54:00.000Z
|
2022-03-29T19:34:28.000Z
|
lib/spack/spack/test/cmd/gpg.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-04-07T18:27:09.000Z
|
2022-03-31T22:52:38.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import llnl.util.filesystem as fs
import spack.util.executable
import spack.util.gpg
from spack.paths import mock_gpg_data_path, mock_gpg_keys_path
from spack.main import SpackCommand
from spack.util.executable import ProcessError
#: spack command used by tests below
gpg = SpackCommand('gpg')
# test gpg command detection
@pytest.mark.parametrize('cmd_name,version', [
('gpg', 'undetectable'), # undetectable version
('gpg', 'gpg (GnuPG) 1.3.4'), # insufficient version
('gpg', 'gpg (GnuPG) 2.2.19'), # sufficient version
('gpg2', 'gpg (GnuPG) 2.2.19'), # gpg2 command
])
def test_find_gpg(cmd_name, version, tmpdir, mock_gnupghome, monkeypatch):
TEMPLATE = ('#!/bin/sh\n'
'echo "{version}"\n')
with tmpdir.as_cwd():
for fname in (cmd_name, 'gpgconf'):
with open(fname, 'w') as f:
f.write(TEMPLATE.format(version=version))
fs.set_executable(fname)
monkeypatch.setitem(os.environ, "PATH", str(tmpdir))
if version == 'undetectable' or version.endswith('1.3.4'):
with pytest.raises(spack.util.gpg.SpackGPGError):
spack.util.gpg.ensure_gpg(reevaluate=True)
else:
spack.util.gpg.ensure_gpg(reevaluate=True)
gpg_exe = spack.util.gpg.get_global_gpg_instance().gpg_exe
assert isinstance(gpg_exe, spack.util.executable.Executable)
gpgconf_exe = spack.util.gpg.get_global_gpg_instance().gpgconf_exe
assert isinstance(gpgconf_exe, spack.util.executable.Executable)
def test_no_gpg_in_path(tmpdir, mock_gnupghome, monkeypatch):
monkeypatch.setitem(os.environ, "PATH", str(tmpdir))
with pytest.raises(spack.util.gpg.SpackGPGError):
spack.util.gpg.ensure_gpg(reevaluate=True)
@pytest.mark.maybeslow
@pytest.mark.skipif(not spack.util.gpg.has_gpg(),
reason='These tests require gnupg2')
def test_gpg(tmpdir, mock_gnupghome):
# Verify a file with an empty keyring.
with pytest.raises(ProcessError):
gpg('verify', os.path.join(mock_gpg_data_path, 'content.txt'))
# Import the default key.
gpg('init', '--from', mock_gpg_keys_path)
# List the keys.
# TODO: Test the output here.
gpg('list', '--trusted')
gpg('list', '--signing')
# Verify the file now that the key has been trusted.
gpg('verify', os.path.join(mock_gpg_data_path, 'content.txt'))
# Untrust the default key.
gpg('untrust', 'Spack testing')
# Now that the key is untrusted, verification should fail.
with pytest.raises(ProcessError):
gpg('verify', os.path.join(mock_gpg_data_path, 'content.txt'))
# Create a file to test signing.
test_path = tmpdir.join('to-sign.txt')
with open(str(test_path), 'w+') as fout:
fout.write('Test content for signing.\n')
# Signing without a private key should fail.
with pytest.raises(RuntimeError) as exc_info:
gpg('sign', str(test_path))
assert exc_info.value.args[0] == 'no signing keys are available'
# Create a key for use in the tests.
keypath = tmpdir.join('testing-1.key')
gpg('create',
'--comment', 'Spack testing key',
'--export', str(keypath),
'Spack testing 1',
'spack@googlegroups.com')
keyfp = spack.util.gpg.signing_keys()[0]
# List the keys.
# TODO: Test the output here.
gpg('list')
gpg('list', '--trusted')
gpg('list', '--signing')
# Signing with the default (only) key.
gpg('sign', str(test_path))
# Verify the file we just verified.
gpg('verify', str(test_path))
# Export the key for future use.
export_path = tmpdir.join('export.testing.key')
gpg('export', str(export_path))
# Create a second key for use in the tests.
gpg('create',
'--comment', 'Spack testing key',
'Spack testing 2',
'spack@googlegroups.com')
# List the keys.
# TODO: Test the output here.
gpg('list', '--trusted')
gpg('list', '--signing')
test_path = tmpdir.join('to-sign-2.txt')
with open(str(test_path), 'w+') as fout:
fout.write('Test content for signing.\n')
# Signing with multiple signing keys is ambiguous.
with pytest.raises(RuntimeError) as exc_info:
gpg('sign', str(test_path))
assert exc_info.value.args[0] == \
'multiple signing keys are available; please choose one'
# Signing with a specified key.
gpg('sign', '--key', keyfp, str(test_path))
# Untrusting signing keys needs a flag.
with pytest.raises(ProcessError):
gpg('untrust', 'Spack testing 1')
# Untrust the key we created.
gpg('untrust', '--signing', keyfp)
# Verification should now fail.
with pytest.raises(ProcessError):
gpg('verify', str(test_path))
# Trust the exported key.
gpg('trust', str(export_path))
# Verification should now succeed again.
gpg('verify', str(test_path))
| 32.20625
| 74
| 0.656705
|
6b61dfa191f5696af30d6b1503b6bb7ecfaa18b3
| 60,033
|
py
|
Python
|
file.py
|
bictorv/chaosnet-bridge
|
15448ae4b7b6cd2ba5ca09f5f5c755e559246620
|
[
"ECL-2.0",
"Apache-2.0"
] | 11
|
2017-08-29T11:13:08.000Z
|
2021-05-08T01:58:06.000Z
|
file.py
|
bictorv/chaosnet-bridge
|
15448ae4b7b6cd2ba5ca09f5f5c755e559246620
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2017-11-30T18:07:03.000Z
|
2021-04-03T18:11:34.000Z
|
file.py
|
bictorv/chaosnet-bridge
|
15448ae4b7b6cd2ba5ca09f5f5c755e559246620
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2018-11-08T15:39:47.000Z
|
2020-06-23T06:59:33.000Z
|
# Copyright © 2020 Björn Victor (bjorn@victor.se)
# Chaosnet client for FILE protocol
# Demonstrates the Packet API for the NCP of cbridge, the bridge program for various Chaosnet implementations.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://github.com/PDP-10/its/blob/master/doc/sysdoc/chaos.file,
# and LMI's SYS:FILE;SERVER.LISP#202 and SYS;NETWORK.CHAOS;QFILE.LISP#389
# and its/src/syseng/file.591, its/src/sysen2/cftp.475
# TODO
# Document the protocol - SYSDOC;CHAOS FILE is not up-to-date.
# Complain if ostype unknown, implement command to set it
# Split better in classes/modules/files
# Improve exception handling
# abstract "packets" to make TCP port easy
# make completing (with Tab) and abbreviating parser - https://python-prompt-toolkit.readthedocs.io ?
# add pathname interpretation based on DNS HINFO
# - and handle Symbolics syntax, not only ITS and (MIT) LISPM
# Writing files still doesn't work well with ITS, e.g. writing chsgtv/chasta.8
import socket, io
import sys, subprocess, threading, time
import re, string
import functools
import codecs
# MacOS readline isn't always the GNU version, so no completion, but at least command history and basic line editing
import readline
from datetime import datetime
from enum import IntEnum, auto
from random import randint
from pprint import pprint, pformat
# pip3 install dnspython
import dns.resolver
from concurrent.futures import ThreadPoolExecutor
# The directory of this need to match the "socketdir" ncp setting in cbridge.
packet_address = '/tmp/chaos_packet'
# -d
debug = False
# Chaos packet opcodes
class Opcode(IntEnum):
RFC = 1
OPN = auto()
CLS = auto()
FWD = auto()
ANS = auto()
SNS = auto()
STS = auto()
RUT = auto()
LOS = auto()
LSN = auto()
MNT = auto()
EOF = auto() # with NCP, extended with optional "wait" data part which is never sent on the wire
UNC = auto()
BRD = auto()
ACK = 0o177 # new opcode to get an acknowledgement from NCP when an EOF+wait has been acked
DAT = 0o200
SMARK = 0o201 # synchronous mark
AMARK = 0o202 # asynchronous mark
DWD = 0o300
# Lispm character set
class LMchar:
RUBOUT = bytes([0o207])
BACKSPACE = bytes([0o210])
TAB = bytes([0o211])
LF = bytes([0o212])
PAGE = bytes([0o214])
RETURN = bytes([0o215])
def toascii(strng):
return LMcodec().decode(strng,tostring=False)[0]
# if isinstance(strng, str):
# return strng.translate(str.maketrans('\211\215\214\212','\t\n\f\r'))
# else:
# return strng.translate(bytes.maketrans(b'\211\215\214\212',b'\t\n\f\r'))
# See CHAOS FILE https://github.com/PDP-10/its/blob/master/doc/sysdoc/chaos.file
# and SYSENG;FILE > (label CHR2LM etc)
# and! https://www.rfc-editor.org/rfc/rfc1037.html Tables 1 and 2.
# and https://docs.python.org/3/library/codecs.html
# https://stackoverflow.com/questions/38777818/how-do-i-properly-create-custom-text-codecs
# https://github.com/pyserial/pyserial/blob/master/serial/tools/hexlify_codec.py
# Consider UTF8 translation of funny chars, like in Supdup?
class LMcodec(codecs.Codec):
def __init__(self, errors='strict'):
# See Tables 1 and 2 in https://www.rfc-editor.org/rfc/rfc1037.html
if False and debug:
print("LMcodec({!r})".format(errors), file=sys.stderr)
# LISPM to Unix
self.decoding_map = codecs.make_identity_dict(range(256))
for i in range(0o10, 0o15+1):
self.decoding_map[i] = i+0o200
self.decoding_map[0o177] = 0o377
for i in range(0o210,0o214+1):
self.decoding_map[i] = i-0o200
self.decoding_map[0o212] = 0o15
self.decoding_map[0o215] = 0o12
self.decoding_map[0o377] = 0o177
# self.decoding_map.update(zip([ ord(c) for c in '\211\215\214\212' ],
# [ ord(c) for c in '\t\n\f\r']))
#self.encoding_map = codecs.make_encoding_map(self.decoding_map)
# Unix to LISPM
self.encoding_map = codecs.make_identity_dict(range(256))
for i in range(0o10, 0o14+1):
self.encoding_map[i] = i+0o200
self.encoding_map[0o12] = 0o215
self.encoding_map[0o15] = 0o212
self.encoding_map[0o177] = 0o377
for i in range(0o210, 0o215+1):
self.encoding_map[i] = i-0o200
self.encoding_map[0o377] = 0o177
# self.encoding_map.update(zip([ ord(c) for c in '\t\n\f\r'],
# [ ord(c) for c in '\211\215\214\212' ]))
def decode(self, data, errors='strict', tostring=True):
if tostring:
# This always renders a string
return codecs.charmap_decode(data, errors, self.decoding_map)
if isinstance(data,str):
tr = str.maketrans(self.decoding_map)
r = data.translate(tr)
else:
tr = bytes.maketrans(bytes(self.decoding_map.keys()), bytes(self.decoding_map.values()))
data = bytes(data)
r = data.translate(tr)
if False and debug:
print("LMcodec.decode {!r} (len {}) errors {!r}: {!r}".format(type(data), len(data), errors, data), file=sys.stderr)
print("LMcodec.decode result {!r}".format(r))
return (r,len(r))
# return (LMdecode(data), len(data))
def encode(self, data, errors='strict', tostring=True):
if tostring:
# This always renders a string
if False and debug:
r = codecs.charmap_encode(data, errors, self.encoding_map)
print("LMcode.encode tostring {!r} -> {!r}".format(data, r))
return r
return codecs.charmap_encode(data, errors, self.encoding_map)
if isinstance(data,str):
tr = str.maketrans(self.encoding_map)
r = data.translate(tr)
else:
tr = bytes.maketrans(bytes(self.encoding_map.keys()), bytes(self.encoding_map.values()))
data = bytes(data)
r = data.translate(tr)
if False and debug:
print("LMcodec.encode {!r} (len {}) errors {!r}: {!r} -> {!r}".format(type(data), len(data), errors, data, r), file=sys.stderr)
return (r,len(r))
# return (LMencode(data), len(data))
class LMinc_decoder(LMcodec, codecs.IncrementalDecoder):
def decode(self, data, final=False):
return super().decode(data)[0]
class LMinc_encoder(LMcodec, codecs.IncrementalEncoder):
def encode(self, data, final=False):
return super().encode(data)[0]
class LMstream_writer(LMcodec, codecs.StreamWriter):
pass
class LMstream_reader(LMcodec, codecs.StreamReader):
pass
def LMregentry(encoding_name):
if False and debug:
print("LMregentry({})".format(encoding_name))
if (encoding_name == 'lispm'):
return codecs.CodecInfo(name='lispm', encode=LMcodec().encode, decode=LMcodec().decode,
incrementalencoder=LMinc_encoder, incrementaldecoder=LMinc_decoder,
streamwriter=LMstream_writer, streamreader=LMstream_reader)
return None
def LMdecode(data):
# LISPM to Unix
return LMcodec().decode(data,tostring=False)[0]
# if isinstance(data, str):
# tr = str.maketrans('\211\215\214\212','\t\n\f\r')
# else:
# data = bytes(data)
# tr = bytes.maketrans(b'\211\215\214\212',b'\t\n\f\r')
# if False and debug:
# print("LMdecode {!r} (len {}) tr {!r}".format(type(data), len(data), tr), file=sys.stderr)
# o = data.translate(tr)
# return o
def LMencode(data):
# Unix to LISPM
return LMcodec().encode(data,tostring=False)[0]
# Basic error class
class FileError(Exception):
typestring = "FILE Error"
def __init__(self,code,msg):
self.code = code
self.message = msg
super().__init__(msg)
def __str__(self):
return "{} {!s}: {!s}".format(self.typestring, str(self.code,"ascii"), str(self.message,"ascii"))
# The three types of errors
class CommandError(FileError):
typestring = "Command error"
class RestartableError(FileError):
typestring = "Restartable error"
class FatalError(FileError):
typestring = "Fatal error"
# Some specific errors we want to handle
class FNFError(FatalError):
pass
class DNFError(FatalError):
pass
class NLIError(FatalError):
pass
class NCPConn:
sock = None
active = False
contact = None
def __init__(self):
self.get_socket()
def __str__(self):
return "<{} {} {}>".format(type(self).__name__, self.contact, "active" if self.active else "passive")
def __del__(self):
if debug:
print("{!s} being deleted".format(self))
def close(self, msg="Thank you"):
if debug:
print("Closing {} with msg {}".format(self,msg), file=sys.stderr)
self.send_packet(Opcode.CLS, msg)
try:
self.sock.close()
except socket.error as msg:
print('Socket error closing:',msg)
self.sock = None
# Construct a 4-byte packet header for chaos_packet connections
def packet_header(self, opc, plen):
return bytes([opc, 0, plen & 0xff, int(plen/256)])
def get_socket(self):
address = '/tmp/chaos_packet'
# Create a Unix socket
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
try:
self.sock.connect(address)
return self.sock
except socket.error as msg:
print('Socket errror:',msg, file=sys.stderr)
sys.exit(1)
def send_packet(self, opcode, data):
# print("send pkt {} {} {!r}".format(Opcode(opcode).name, type(data), data))
if isinstance(data, str):
msg = bytes(data,"ascii")
else:
msg = data
if debug:
print("> {} {} {}".format(self,Opcode(opcode).name, len(msg)), file=sys.stderr)
self.sock.sendall(self.packet_header(Opcode(opcode), len(msg)) + msg)
def get_packet(self):
# Read header to see how long the pkt is
hdr = self.sock.recv(4)
# First is opcode
opc = hdr[0]
# then zero
assert(hdr[1] == 0)
# then length
length = hdr[2] + hdr[3]*256
assert(length <= 488)
if debug:
print("< {} {} {}".format(self,Opcode(opc).name, length), file=sys.stderr)
data = self.sock.recv(length)
# print("< {} {!s}".format(len(data), str(data.translate(bytes.maketrans(b'\211\215\214\212',b'\t\n\f\r')),"utf8")))
return (opc,data)
def rfc(self, contact,host,args=[]):
h = bytes(("{} {}"+" {}"*len(args)).format(host,contact.upper(),*args),"ascii")
if debug:
print("RFC: {}".format(h), file=sys.stderr)
self.send_packet(Opcode.RFC, h)
opc, data = self.get_packet()
if opc == Opcode.CLS:
raise FileError(b'CLS',data)
elif opc != Opcode.OPN:
print("Unexpected RFC response for {} from {}: {} {} (wanted OPN)".format(contact,host, Opcode(opc).name, data), file=sys.stderr)
return False
if debug:
print("OPN {!r}".format(data), file=sys.stderr)
if self.ostype == None and host != str(data,'ascii'):
if debug:
print("Checking DNS info for {}".format(str(data,'ascii')), file=sys.stderr)
self.dnsinfo = dns_info_for(str(data,'ascii'))
self.host = self.dnsinfo['name'] if self.dnsinfo and 'name' in self.dnsinfo else str(data,'ascii')
self.ostype = self.dnsinfo['os'] if self.dnsinfo and 'os' in self.dnsinfo else None
self.active = True
self.contact = contact
return True
def listen(self, contact, expected_host=None):
if debug:
print("Listen for {} (expected {})".format(contact,expected_host))
self.send_packet(Opcode.LSN,contact)
self.active = False
self.contact = contact
opc, data = self.get_packet()
rh = str(data,"ascii")
if opc != Opcode.RFC:
# Raise exception
print("Unexpected response {} ({}) in input handler for {} (wanted RFC)".format(Opcode(opc).name, data, ofh), file=sys.stderr)
return None
elif expected_host != None and expected_host != rh:
print("Unexpected host sent RFC: {} (expected {})".format(rh, expected_host))
self.send_packet(Opcode.CLS,"You are the wrong host to RFC this contact")
# Raise exception
return None
else:
if debug:
print("RFC {!r}".format(data), file=sys.stderr)
self.send_packet(Opcode.OPN,"")
return rh
def read_until_smark(self):
if debug:
print("attempting to read until SMARK from {}".format(self))
opc, d = self.get_packet()
# @@@@ cf SmrKin, but there it might read duplicates/ooo pkts?
while opc != Opcode.SMARK:
if opc not in (Opcode.SMARK, Opcode.AMARK, Opcode.EOF, Opcode.DAT, Opcode.DWD):
raise FileError(b'UNC', bytes("read_until_smark: Unexpected opcode {}".format(Opcode(opc).name),"ascii"))
if debug:
print("read_until_smark: read {} data len {} ({:15})".format(Opcode(opc).name, len(d), d), file=sys.stderr)
opc, d = self.get_packet()
class File(NCPConn):
ncp = None
curr_tid = 1
dnsinfo = None
def __init__(self, host, version=1):
self.dnsinfo = dns_info_for(host)
self.host = self.dnsinfo['name'] if self.dnsinfo and 'name' in self.dnsinfo else host
self.ostype = self.dnsinfo['os'] if self.dnsinfo and 'os' in self.dnsinfo else None
self.homedir = ""
self.dataconn = None
self.get_socket()
self.rfc("FILE", host, [version])
self.xecutor = ThreadPoolExecutor()
def next_tid(self):
self.curr_tid = self.curr_tid+1
return bytes(format("T{:04}".format(self.curr_tid)),"ascii")
def make_fh(self,direction):
if direction == 'input':
return b"I"+bytes("{:04}".format(randint(1,9999)), "ascii")
else:
return b"O"+bytes("{:04}".format(randint(1,9999)), "ascii")
def data_conn_maker(self, tid):
# Returns response to DATA-CONNECTION
# Caller is expected to get the result from self.data before proceeding to read/write (using read/write_handler)
self.ifh = self.make_fh('input')
self.ofh = self.make_fh('output')
# Submit a listener, returning a conn as result
self.data = self.xecutor.submit(self.dataconn_listener, self.ofh, self.ifh)
# Tell the other end to connect back
self.send_command(tid, b"", b"DATA-CONNECTION", options=[self.ifh,self.ofh])
resp = self.get_response()
# Return the response (@@@@ maybe parse it first?)
return resp
def dataconn_listener(self, ofh, ifh):
# Just make a conn for the ofh, and return when there is an RFC
conn = NCPConn()
self.dataconn = conn
rh = conn.listen(str(ofh,'ascii'))
return conn
def undata_connection(self, tid, ifh):
self.send_command(tid, ifh, b"UNDATA-CONNECTION")
resp = self.get_response()
if debug:
print("undata-conn response {}".format(resp), file=sys.stderr)
return resp
def read_handler(self, outstream, conn):
# if outstream is None, returns the input read as bytes
# the caller is expected to CLOSE the FH, read the reply, and then read_until_smark from the dataconn
idata = []
return_last = False
while True:
opc, d = conn.get_packet()
if opc == Opcode.EOF:
if outstream == None:
return b''.join(idata)
else:
if outstream is not None and outstream != sys.stdout:
print("!", file=sys.stderr)
if outstream == sys.stdout and not return_last:
outstream.write('\n')
return None
elif opc == Opcode.DAT:
if outstream == None:
idata.append(d)
else:
if outstream == sys.stdout:
return_last = d[-1:] == LMchar.RETURN
if isinstance(outstream, io.TextIOBase):
d = str(d,'lispm')
if outstream is not None and outstream != sys.stdout:
print('.', end='', flush=True, file=sys.stderr)
outstream.write(d)
elif opc == Opcode.DWD:
if outstream == None:
idata.append(d)
else:
if outstream != sys.stdout:
print('.', end='', flush=True, file=sys.stderr)
outstream.write(d)
elif opc == Opcode.AMARK:
# @@@@ parse it and make more specific exception
raise FileError(b'AMARK', d)
elif opc == Opcode.CLS:
raise FileError(b'CLS', d)
else:
raise FileError(b'UNC', bytes("Unexpected response {} ({}) in data handler for {} (wanted DAT or EOF)".format(Opcode(opc).name, d, outstream),"ascii"))
def write_handler(self, instream, ncp, binary=False):
# returns the number of bytes written
# caller is supposed to CLOSE the FH (but we already wrote EOF and SMARK)
nbytes = 0
if debug:
print("WH for {} and {} starting".format(instream,ncp), file=sys.stderr)
while True:
d = instream.read(488)
if len(d) == 0:
if debug:
print("WH for {} and {} done, sending EOF and closing, returning {}".format(instream,ncp,nbytes), file=sys.stderr)
# Need to wait for EOF to be acked - extend NCP protocol by data in EOF pkt, which is normally not there
ncp.send_packet(Opcode.EOF,"wait")
print("!", file=sys.stderr, end='', flush=True)
# but we notice the waiting only by delaying the next pkt, so, invent an ACK pkt as response to EOF+wait
opc, d = ncp.get_packet()
if opc != Opcode.ACK:
raise FileError(b'BUG', bytes("unexpected opcode in response to EOF+wait: {} ({})".format(Opcode(opc).name, d), "ascii"))
print("\n", end='', file=sys.stderr, flush=True)
ncp.send_packet(Opcode.SMARK,"")
time.sleep(2)
return nbytes
if not binary:
d = codecs.encode(d,'lispm')
nbytes += len(d)
if binary:
ncp.send_packet(Opcode.DWD, d)
else:
ncp.send_packet(Opcode.DAT, d)
print(".", file=sys.stderr, end='', flush=True)
# send_command(tid, fh, cmd, options = on same line as cmd, args = on consecutive lines)
def send_command(self, tid, fh, cmd, options=[], args=[]):
# ar = list(functools.reduce(lambda a,b: a+b, map(lambda x: [x,bytes([LMchar.RETURN])], args)))
# m = bytes(("{} {} {}"+" {}{}"*len(args)).format(tid,fh,cmd,*ar),"utf8")
m = tid+b" "+fh+b" "+cmd
if debug:
print("send_command: tid {} fh {} cmd {} opts {} args {}".format(tid,fh,cmd, options, args), file=sys.stderr)
if len(options) > 0:
m = m+b" "+b" ".join(options)
if len(args) > 0:
if cmd == b'CREATE-LINK':
# What a crock - "Compensate for incompetently-defined protocol"
m = m+LMchar.RETURN+LMchar.RETURN.join(args)
else:
m = m+LMchar.RETURN+LMchar.RETURN.join(args)+LMchar.RETURN
if debug:
print("send_command: {!r}".format(m), file=sys.stderr)
self.send_packet(Opcode.DAT, m)
# get_response => (tid, fh, cmd, array-of-results)
def get_response(self):
opc,data = self.get_packet()
if opc == Opcode.DAT:
dlines = data.split(LMchar.RETURN)
# return list(map(lambda x: LMdecode(x), dlines))
return list(map(lambda x: LMcodec().decode(x,tostring=False)[0], dlines))
elif opc == Opcode.EOF:
return []
elif opc == Opcode.AMARK:
# @@@@ better more specific condition, parse data
raise FileError(b'AMARK', data)
elif opc == Opcode.CLS:
# raise FileError(b'CLS',data)
if debug:
print("CLS {!s}".format(data))
return []
else:
# raise exception
raise FileError(b"UNC",bytes("Unexpected opcode {} ({}) (wanted DAT or EOF)".format(Opcode(opc).name, data), "ascii"))
# parse_response(first line of reply) => rest of line after "tid fh cmd", split at spaces
def parse_response(self, rsp, expected_tid=None):
if debug:
print("Parsing {}".format(rsp), file=sys.stderr)
if rsp.count(b' ') > 2:
tid,fh,cmd,res = rsp.split(b' ',maxsplit=3)
else:
tid,fh,cmd = rsp.split(b' ',maxsplit=2)
res = b""
if expected_tid is not None and expected_tid != tid:
if True or debug:
print("Response for wrong TID: expected {}, got {}".format(expected_tid,tid))
return None
if cmd == b'ERROR':
erc,flag,msg = res.split(b' ',maxsplit=2)
# @@@@ make exceptions
if flag == b'F':
# fatal error
if erc == b'FNF':
raise FNFError(erc,msg)
if erc == b'DNF':
raise DNFError(erc,msg)
elif erc == b'NLI':
raise NLIError(erc,msg)
else:
raise FatalError(erc,msg)
elif flag == b'R':
raise RestartableError(erc,msg)
elif flag == b'C':
raise CommandError(erc,msg)
else:
if debug:
print("{}: {}".format(cmd,res), file=sys.stderr)
return res.split(b' ')
def execute_operation(self, operation, is_write=False, options=[], args=[],
dataconn=True, outstream=None, instream=None, binary=False):
tid = self.next_tid()
if dataconn and self.dataconn == None:
if debug:
print("creating dataconn for {}".format(operation), file=sys.stderr)
resp = self.data_conn_maker(tid)
r = self.parse_response(resp[0])
if debug:
print("data-conn response {!r} ({!r})".format(r, resp), file=sys.stderr)
if r == None:
raise FileError(b'BUG',b'Bad response from data_conn_maker')
if dataconn:
ifh = self.ifh
ofh = self.ofh
else:
ifh = b""
ofh = b""
args = list(map(lambda x: bytes(x.upper(), 'ascii'), args))
options = list(map(lambda x: bytes(x.upper(), 'ascii'), options))
self.send_command(tid, ofh if is_write else ifh, bytes(operation.upper(), 'ascii'), options=options, args=args)
msg = self.get_response()
resp = self.parse_response(msg[0], tid)
while resp is None:
if debug:
print("Bad response or mismatching TID (expected {}) for {!r}".format(tid, msg))
msg = self.get_response()
resp = self.parse_response(msg[0], tid)
if dataconn:
# Get the conn (waiting here for RFC)
c = self.data.result()
if is_write:
hand = self.xecutor.submit(self.write_handler, instream, c, binary)
fh = ofh
else:
hand = self.xecutor.submit(self.read_handler, outstream, c)
fh = ifh
# Wait for the work to be done
r = hand.result()
# Close the FH, get the response
self.send_command(tid, fh, b"CLOSE")
cr = self.get_response()
crr = self.parse_response(cr[0], tid)
if debug:
print("response to CLOSE: {!r} ({!r})".format(crr,cr), file=sys.stderr)
# Post-process response
iolen = 0
if len(crr) > 2:
iolen = int(crr[2])
if is_write:
if iolen == 0:
iolen = r
return resp, msg[1:], iolen
else:
# read until SMARK
c.read_until_smark()
return resp,msg[1:],r
else:
return resp,msg[1:]
#### Here are the commands.
def login(self, uname):
resp, msg = self.execute_operation("LOGIN", options=[uname], dataconn=False)
if debug:
print('Login',resp,msg, file=sys.stderr)
self.uname = uname
# Lambda: b'bv ' [b'LX: BV;', b'']
# uname RETURN homedir
# ITS: b'BV USERS1' [b'Victor, Bjorn', b'@']
# uname hsname RETURN persname affiliation
homedir = ""
if self.ostype == 'ITS':
homedir = str(resp[1],"ascii")+";"
elif self.ostype == 'LISPM':
homedir = str(msg[0],"ascii") if len(msg) > 0 else ""
self.homedir = homedir
return str(resp[0],"ascii")
def delete_file(self, fname):
resp,msg = self.execute_operation("delete", args=[fname], dataconn=False)
if debug:
print('Delete:',resp,msg)
def expunge_file(self, fname):
resp,msg = self.execute_operation("expunge", args=[fname], dataconn=False)
if debug:
print('Expunge:',resp,msg)
def rename_file(self, fromfile, tofile):
resp,msg = self.execute_operation("rename", args=[fromfile,tofile], dataconn=False)
if debug:
print('Rename:',resp,msg)
def complete_file(self, fname, options=[]):
dflt = self.homedir + "*"
if self.ostype == 'ITS' and options == []:
# ITS requires some option, so use these?
options = ["READ","NEW-OK"]
resp, new = self.execute_operation("complete", options=options, args=[dflt, fname], dataconn=False)
if debug:
print("Complete {} with {} => {} {}".format(fname, dflt, resp, new))
return str(new[0].lstrip(),"ascii"), str(resp[0],"ascii")
def probe_file(self, fname):
try:
resp,msg = self.execute_operation("OPEN", options=["PROBE"], args=[fname], dataconn=False)
except FNFError:
print("File not found: {}".format(fname), file=sys.stderr)
return None
except DNFError:
print("Directory not found: {}".format(fname), file=sys.stderr)
return None
truename = str(msg[0],"ascii")
cdate,ctime,length,binp,x = resp[:5]
if debug:
print('response',cdate,ctime,length,binp, file=sys.stderr)
length = int(length)
crdt = datetime.strptime(str(cdate+b' '+ctime,"ascii"), '%m/%d/%y %H:%M:%S')
binp = False if binp == b'NIL' else True
if debug:
print(resp,msg, file=sys.stderr)
print("= {} created {} len {}{}".format(truename,crdt,length," (binary)" if binp else " (not binary)"),
file=sys.stderr)
return dict(truename=truename, creationdate=crdt, length=length, binary=binp)
def read_file(self, fname, output, raw=False, binary=False):
try:
opts = ["READ"]
if raw:
opts.append("RAW")
if binary:
opts += ["BINARY","BYTE-SIZE 16"]
if debug:
print("read_file options {}".format(opts))
resp, msg, content = self.execute_operation("OPEN", outstream=output,
options=opts,
args=[fname])
except DNFError as e:
print(e, file=sys.stderr)
return None
except FNFError as e:
print(e, file=sys.stderr)
return None
truename = str(msg[0],"ascii")
cdate,ctime,length,binp,x = resp[:5]
if debug:
print('response',cdate,ctime,length,binp, file=sys.stderr)
# But length often doesn't match for text files, since CR LF => #\Return
length = int(length)
crdt = datetime.strptime(str(cdate+b' '+ctime,"ascii"), '%m/%d/%y %H:%M:%S')
binp = False if binp == b'NIL' else True
if debug:
print("= Here comes {} created {} len {}{}".format(truename,crdt,length," (binary)" if binp else " (not binary)"),
file=sys.stderr)
if output == 'return' or output is None:
if raw or binp:
return content
else:
return str(content,'lispm')
else:
return dict(truename=truename, created=crdt, length=length, binary=binp)
def write_file(self, fname, instream, raw=False, binary=False):
if debug:
print("Writing {} from stream {}".format(fname,instream))
if instream is None:
raise FileError(b'BUG',b'You called write_file without an input stream')
opts = ["WRITE"]
if raw:
opts.append("RAW")
if binary:
opts += ["BINARY","BYTE-SIZE 16"]
resp, msg, content = self.execute_operation("OPEN", is_write=True, instream=instream, binary=binary,
options=opts, args=[fname])
truename = str(msg[0],"ascii")
cdate,ctime,length,binp,x = resp[:5]
if debug:
print('open write response',cdate,ctime,length,binp, file=sys.stderr)
# But length often doesn't match for text files, since CR LF => #\Return
length = int(length)
crdt = datetime.strptime(str(cdate+b' '+ctime,"ascii"), '%m/%d/%y %H:%M:%S')
binp = False if binp == b'NIL' else True
if debug:
print("= Here was {} created {} len {}{}".format(truename,crdt,length," (binary)" if binp else " (not binary)"),
file=sys.stderr)
return dict(truename=truename, created=crdt, length=max(length,content), binary=binp)
# See directory option DIRECTORIES-ONLY instead
def all_directories(self, fname=None):
if self.dnsinfo and self.dnsinfo['os'] == 'ITS':
# ITS: space dirname-using-six-positions-with-space-filler RETURN
dirlist = list(filter(lambda x: len(x) > 0, map(lambda x: x.strip(), self.read_file("dsk:m.f.d. (file)", 'return').split('\n'))))
dirlist.sort()
return dirlist
elif self.dnsinfo and self.dnsinfo['os'] == 'LISPM':
# LISPM
# ('', [b'(((#\x10FS::LM-PATHNAME "LX: BACKUP-LOGS; .#"\x11) (#\x10FS::LM-PATHNAME "LX: RELEASE-5; .#"\x11) (#\x10FS::LM-PATHNAME "LX: VICTOR; .#"\x11)))'])
resp,dlist = self.execute_operation("extended-command", options=["all-directories"], args=[fname,"((:noerror))"], dataconn=False)
if len(dlist) != 1 or not dlist[0].startswith(b'(((') or not dlist[0].endswith(b')))'):
print('Bad result from LISPM',dlist, file=sys.stderr)
return None
dlist = dlist[0]
dirlist = []
# This skips device/host name, and file part of pathname - only directory names remain
rgx = br'#\x10FS::LM-PATHNAME "[^:]+: *([^;]+);[^"]*"\x11'
x = re.search(rgx, dlist)
while x:
dirlist = dirlist + [str(x.group(1),"ascii")]
dlist = dlist[x.end():]
x = re.search(rgx, dlist)
dirlist.sort()
return dirlist
else:
print('unsupported OS',self.dnsinfo['os'] if self.dnsinfo else None, file=sys.stderr)
return None
def change_props(self, fname, propvaldict):
pv = [fname]
for k in propvaldict:
pv = pv+["{} {}".format(k,propvaldict[k])]
resp, msg = self.execute_operation("change-properties", args=pv, dataconn=False)
if debug:
print('change_props result',resp,msg, file=sys.stderr)
def create_directory(self, dname):
# For ITS, need to be logged in with a proper/personal homedir (named as userid)
if self.ostype == 'ITS':
if self.uname.lower()+";" != self.homedir.lower():
print("You need to have a homedir naed as your user name", file=sys.stderr)
if not dname.endswith(';'):
print("Directory name should end with ;", file=sys.stderr)
resp, msg = self.execute_operation("create-directory", args=[dname], dataconn=False)
if debug:
print('create_directory response',resp,msg, file=sys.stderr)
def home_directory(self, uname):
resp, msg = self.execute_operation("homedir", options=[uname], dataconn=False)
if debug:
print('homedir response',resp,msg, file=sys.stderr)
return str(resp[0],'ascii')
def file_system_info(self):
resp, msg = self.execute_operation("file-system-info", dataconn=False)
if True or debug:
print('file_system_info response',resp,msg, file=sys.stderr)
return list(map(lambda x: str(x,'ascii'), msg))
def create_link(self, lname, fname):
resp, msg = self.execute_operation("create-link", args=[lname,fname], dataconn=False)
if True or debug:
print('create_link response',resp,msg, file=sys.stderr)
def parse_properties(self, lines):
props = dict()
if debug:
print('parsing',lines, file=sys.stderr)
for l in lines:
if l == b'':
break
try:
prop,val = l.split(b' ', maxsplit=1)
except ValueError:
# Binary property, true when mentioned
prop = l
val = b"T"
prop = str(prop,"ascii")
# Hack values
if val.isdigit():
val = int(val)
elif prop.endswith('-DATE'):
# except ITS seems to represent "never" as 01/31/27 00:00:00
if val == b'01/31/27 00:00:00':
val = '--'
else:
val = datetime.strptime(str(val,"ascii"),'%m/%d/%y %H:%M:%S')
elif prop == 'SETTABLE-PROPERTIES':
val = str(val,"ascii").split(' ')
elif prop == 'PHYSICAL-VOLUME-FREE-BLOCKS':
# e.g. '0:9357,1:11724,2:36499'
volumes = dict()
diskl = str(val,"ascii").split(',')
for d in diskl:
unit,free = d.split(':')
volumes[int(unit)] = int(free)
val = volumes
elif val == b'T':
val = True
elif val == b'NIL':
val = False
else:
val = str(val,"ascii")
if False and debug:
print('prop {} = {}'.format(prop,val), file=sys.stderr)
props[prop] = val
if debug:
print('found',props, file=sys.stderr)
return props
def list_files(self, path, deleted=False, directories=False, fast=False):
opts = ["DELETED"] if deleted else []
# Unfortunately Lambdas only give top-level directories (and ITS only has such, of course)
opts += ["DIRECTORIES-ONLY"] if directories else []
opts += ["FAST"] if fast else []
try:
resp, msg, res = self.execute_operation("DIRECTORY", args=[path], options=opts)
except DNFError as e:
print(e, file=sys.stderr)
return [],[]
if len(res) > 0 and res.startswith(LMchar.RETURN):
res = res[1:]
if debug:
print("{!s}".format(str(LMchar.toascii(res),"ascii")), file=sys.stderr)
# Break at double RETURN
if res.startswith(LMchar.RETURN): #special case
resparts = [b""]+res[1:].split(LMchar.RETURN * 2)
else:
resparts = res.split(LMchar.RETURN * 2)
if len(resparts) == 0:
return None
# Parse headers into a dictionary
if debug:
print("Headers {!r}".format(resparts[0]), file=sys.stderr)
hdrs = self.parse_properties(resparts[0].split(LMchar.RETURN))
# Parse files
files = dict()
if len(resparts) > 1:
for p in resparts[1:]:
if debug:
print("Part {!r}".format(p), file=sys.stderr)
if p == b'':
continue
# Parse lines
reslines = p.split(LMchar.RETURN)
# First is filename
fname = str(reslines[0],"ascii")
# Get its properties
fps = self.parse_properties(reslines[1:])
files[fname] = fps
return hdrs,files
#### some DNS support
# Get all info
def dns_info_for(nameoraddr):
if isinstance(nameoraddr,int):
name = dns_name_of_addr(nameoraddr)
elif isinstance(nameoraddr,str) and nameoraddr.isdigit():
name = dns_name_of_addr(int(nameoraddr,8))
else:
name = nameoraddr
addrs = dns_addr_of_name(name)
hinfo = get_dns_host_info(name)
return dict(name=name, addrs=addrs, os=None if hinfo == None else hinfo['os'], cpu=None if hinfo == None else hinfo['cpu'])
def get_dns_host_info(name):
# If it's an address given, look up the name first
if isinstance(name,int):
name = dns_name_of_addr(name) or name
elif isinstance(name,str) and name.isdigit():
name = dns_name_of_addr(int(name,8)) or name
try:
h = dns.query.udp(dns.message.make_query(name, dns.rdatatype.HINFO, rdclass=dns.rdataclass.CH), '130.238.19.25')
for t in h.answer:
if t.rdtype == dns.rdatatype.HINFO:
for d in t:
return dict(os= str(d.os.decode()), cpu= str(d.cpu.decode()))
except AttributeError as e:
# dnspython not updated with support for Chaos records?
pass
# print("Error", e, file=sys.stderr)
except dns.exception.DNSException as e:
print("Error", e, file=sys.stderr)
def dns_addr_of_name(name):
# If it's an address given, look up the name first, to collect all its addresses
if isinstance(name,int):
name = dns_name_of_addr(name) or name
elif isinstance(name,str) and name.isdigit():
name = dns_name_of_addr(int(name,8)) or name
addrs = []
try:
h = dns.query.udp(dns.message.make_query(name, dns.rdatatype.A, rdclass=dns.rdataclass.CH), '130.238.19.25')
for t in h.answer:
if t.rdtype == dns.rdatatype.A:
for d in t:
addrs.append(d.address)
except AttributeError as e:
# dnspython not updated with support for Chaos records?
pass
# print("Error", e, file=sys.stderr)
except dns.exception.DNSException as e:
print("Error", e, file=sys.stderr)
return addrs
def dns_name_of_addr(addr):
if isinstance(addr, str) and not addr.isdigit():
# already a name, so get the canonical name by looking up its first address
addrs = dns_addr_of_name(addr)
if len(addrs) > 0:
addr = addrs[0]
try:
if (isinstance(addr,int)):
name = "{:o}.CH-ADDR.NET.".format(addr)
else:
name = "{}.CH-ADDR.NET.".format(addr)
h = dns.query.udp(dns.message.make_query(name, dns.rdatatype.PTR, rdclass=dns.rdataclass.CH), '130.238.19.25')
for t in h.answer:
if t.rdtype == dns.rdatatype.PTR:
for d in t:
return d.target.to_text(omit_final_dot=True)
# return d.target_to_text()
except AttributeError as e:
# dnspython not updated with support for Chaos records?
pass
# print("Error", e, file=sys.stderr)
except dns.exception.DNSException as e:
print("Error", e, file=sys.stderr)
## Handling directory listings
def print_directory_list(hd,fs):
# Format this nicely instead
pprint(hd,width=100)
if debug:
pprint(fs,width=100)
if fs:
# Get max pathname length
mxlen = len(max(fs, key=len))
fmt = string.Template("{:<2} {:$mxlen} {:>7} {:<4} {:<4} {:<19} {}").substitute(mxlen=mxlen)
# Handle links (in ITS)
lks = list(filter(lambda x: 'LINK-TO' in fs[x], fs))
if len(lks) > 0:
lklen = len(max(lks, key=len))
lfmt = string.Template("{:<2} {:$mxlen} => {:$lklen} {:<4} {:<19} {}").substitute(mxlen=mxlen, lklen=lklen)
else:
lfmt = None
print(fmt.format("","Name","Length","Bs","Flg","Creation","Author"))
def ftype(f,fs):
if 'DELETED' in fs[f] and fs[f]['DELETED']:
return "d"
elif 'LINK-TO' in fs[f] and len(fs[f]['LINK-TO']) > 0:
return "L"
else:
return ''
def flags(f,fs):
return ("!" if 'NOT-BACKED-UP' in fs[f] and fs[f]['NOT-BACKED-UP'] else "")+\
("@" if 'DONT-DELETE' in fs[f] and fs[f]['DONT-DELETE'] else "")+\
("$" if 'DONT-REAP' in fs[f] and fs[f]['DONT-REAP'] else "")+\
("#" if 'DONT-SUPERSEDE' in fs[f] and fs[f]['DONT-SUPERSEDE'] else "")
def fieldp(f,fs, fld):
if fld in fs[f]:
return fs[f][fld]
else:
return ''
for f in fs:
if 'DIRECTORY' in fs[f]:
print(fmt.format(ftype(f,fs),
f, fieldp(f,fs,'LENGTH-IN-BYTES') if not fs[f]['DIRECTORY'] else "(dir)",
"({})".format(fieldp(f,fs,'BYTE-SIZE')) if not fs[f]['DIRECTORY'] else "",
flags(f,fs),
str(fieldp(f,fs,'CREATION-DATE')), fieldp(f,fs,'AUTHOR')))
elif 'LINK-TO' in fs[f]:
print(lfmt.format(ftype(f,fs),
f, fs[f]['LINK-TO'],
flags(f,fs),
# Is creation-date really wanted/valid for links?
str(fieldp(f,fs,'CREATION-DATE')), fieldp(f,fs,'AUTHOR')))
else:
print(fmt.format(ftype(f,fs),
f, fieldp(f,fs,'LENGTH-IN-BYTES'), "({})".format(fieldp(f,fs,'BYTE-SIZE')),
flags(f,fs),
str(fieldp(f,fs,'CREATION-DATE')), fieldp(f,fs,'AUTHOR')))
# Make a command interpreter out of this.
if __name__ == '__main__':
codecs.register(LMregentry)
import argparse
parser = argparse.ArgumentParser(description='Chaosnet FILE protocol client')
parser.add_argument("-d",'--debug',dest='debug',action='store_true',
help='Turn on debug printouts')
parser.add_argument('--user', '-u', nargs=1, dest='user', help='User to login as')
parser.add_argument("host", help='The host to connect to')
args = parser.parse_args()
cmdhelp = {"debug": "Toggle debug",
"bye": "Close connection and exit program",
"cwd": '"cwd dname" changes working directory (local effect only), "cwd" shows the working directory.',
"login":'"login uname" logs in as user uname',
"probe": '"probe fname" checks if file fname exists',
"complete": '"complete str" tries to complete the str as a filename',
"delete": '"delete fname" deletes the file fname',
"undelete": '"undelete fname" undeletes the file fname (if supported)',
"expunge": '"expunge fname" expunges deleted files (if supported)',
"nodelete": '"nodelete fname" sets the do-not-delete property (if supported)',
"nosupersede": '"nosupersede fname" sets the dont-supersede property (if supported)',
"supersede": '"supersede fname" un-sets the dont-supersede property (if supported)',
"homedir": '"homedir uname" gets the home directory of the user uname',
"fsi": "gets file system information (if available)",
"crdir": '"crdir dname" creates the directory dname (if possible)',
"crlink": '"crlink linkname destination" creates a link to the destination filename',
"read": '"read fname" reads the file fname and prints it on stdout',
"readinto": '"readinto local remote" reads the remote file into the local file',
"breadinto": '"breadinto local remote" reads the remote file into the local file in binary mode, byte-size 16',
"readraw": '"readraw fname" reads the fname without translating characters',
"write": '"write local remote" writes the local file to the remote file',
"bwrite": '"bwrite local remote" writes the local file to the remote file in binary mode, byte-size 16',
"alldirs": 'lists all (top-level) directories. To list subdirectories, use the "directory" command',
"directory": '"directory pname" lists files matching the pathname. Can be abbreviated "dir".',
"ddirectory": '"ddirectory pname" lists file including deleted ones (if supported). Abbrev "ddir".',
"fdirectory": '"fdirectory pname" lists files using the FAST option, without properties. Abbrev "fdir".'
}
if args.debug:
debug = True
uid = ""
cwd = ""
ncp = None
def directory_name(name):
if ncp.homedir.endswith(';'):
return name+';'
elif ncp.homedir.startswith(">") and ncp.homedir.endswith(">"):
return name+">"
else:
return name+";"
def wdparse(f):
if f.count(';') > 0:
# Assume ITS or MIT/LMI LISPM
return f
elif ncp.homedir.startswith(">") and ncp.homedir.endswith(">") and f.count('>') > 0:
# Symbolics or old CADR (e.g. System 78)
return f
else:
return cwd + f
def dologin(uname):
uid = ncp.login(uname)
if ncp.homedir.count(':') > 0:
# skip any device
cwd = ncp.homedir[ncp.homedir.index(':')+1:].strip()
else:
cwd = ncp.homedir
print("Logged in as {} (homedir {!r})".format(uid,ncp.homedir))
if 'name' in ncp.dnsinfo:
args.host = ncp.dnsinfo['name'].split(".")[0]
return uid, cwd
try:
ncp = File(args.host)
if ncp == None:
exit(1)
if args.user and len(args.user) > 0:
uid,cwd = dologin(args.user[0])
while True:
# Wish: a completing/abbreviating command reader
cline = input("FILE {}@{}{}> ".format(uid,args.host, " [debug]" if debug else "")).lstrip()
parts = cline.split(' ', maxsplit=1)
if len(parts) == 0:
continue
op = parts[0]
arg = parts[1:]
if debug:
print("op {!r} args {!r}".format(op,arg))
try:
if op == '':
continue
elif op == '?':
print('Commands:')
mxlen = len(max(cmdhelp, key=len))
fmt = string.Template("{:$mxlen} {}").substitute(mxlen=mxlen)
for cmd in cmdhelp:
print(fmt.format(cmd, cmdhelp[cmd]))
elif op == "bye" or op == "quit":
print("Bye bye.", file=sys.stderr)
try:
ncp.send_packet(Opcode.EOF,"")
except BrokenPipeError as e:
print("[Connection already down: {}]".format(e))
break
elif op == "debug":
debug = not debug
elif op == "dns":
print('DNS info: name {}, addr {!s}, OS {}, CPU {}'.format(
ncp.dnsinfo['name'], ", ".join(["{:o}"]*len(ncp.dnsinfo['addrs'])).format(*ncp.dnsinfo['addrs']),
ncp.dnsinfo['os'], ncp.dnsinfo['cpu']))
elif op == "login":
uid,cwd = dologin(arg[0])
elif op == "cd" or op == "cwd":
if len(arg) > 0:
if arg[0].endswith(ncp.homedir[-1:]):
cwd = arg[0].strip()
else:
print("Directory should end with {}".format(ncp.homedir[-1:]), file=sys.stderr)
print("CWD = {}".format(cwd))
elif op == "probe":
pb = ncp.probe_file(wdparse(arg[0]))
if pb is not None:
print("{} created {} length {}{}".format(pb['truename'],pb['creationdate'],pb['length']," (binary)" if pb['binary'] else " (not binary)"),
file=sys.stderr)
elif op == "delete":
ncp.delete_file(wdparse(arg[0]))
elif op == "undelete":
ncp.change_props(wdparse(arg[0]),dict(deleted='NIL'))
elif op == "rename":
ffil,tofil = arg[0].split(' ', maxsplit=1)
ncp.rename_file(wdparse(ffil),wdparse(tofil))
elif op == "complete":
fname, stat = ncp.complete_file(wdparse(arg[0]))
print("{!s} ({!s})".format(fname, stat))
if ncp.ostype != 'ITS' and (stat == 'NEW' or stat == 'NIL'):
# Look for ambiguousity (but ITS doesn't handle partial wildcards)
if stat == 'NIL':
fname += "*"
hd,fs = ncp.list_files(fname if stat != 'NIL' else fname+"*", fast=True)
if fs:
print(list(fs))
elif op == "expunge":
ncp.expunge_file(wdparse(arg[0]))
# @@@@ make a generic proprty-change command, e.g. a "toggle"?
elif op == "nodelete":
ncp.change_props(wdparse(arg[0]),{'dont-delete':'T'})
elif op == "supersede":
ncp.change_props(wdparse(arg[0]),{'dont-supersede':'NIL'})
elif op == "nosupersede":
ncp.change_props(wdparse(arg[0]),{'dont-supersede':'T'})
elif op == "crdir":
ncp.create_directory(arg[0])
elif op == "homedir":
print(ncp.home_directory(arg[0]))
elif op == "fsi":
print(ncp.file_system_info())
elif op == "crlink":
if ncp.ostype != 'ITS':
print("Links are probably only supported in ITS", file=sys.stderr)
linkname, destname = arg[0].split(' ', maxsplit=1)
ncp.create_link(linkname, destname)
elif op == "read":
# s = io.StringIO()
s = sys.stdout
ncp.read_file(wdparse(arg[0]), s)
# print(s.getvalue(), end='')
elif op == "readinto":
outf, inf = arg[0].split(' ', maxsplit=1)
try:
os = open(outf, "w")
r = ncp.read_file(wdparse(arg[0]), os)
except FileNotFoundError as e:
print(e, file=sys.stderr)
continue
finally:
os.close()
print("Read {}, length {} ({}), created {}".format(r['truename'], r['length'],
"binary" if r['binary'] else "character",
r['created']))
elif op == "breadinto":
outf, inf = arg[0].split(' ', maxsplit=1)
try:
os = open(outf, "wb")
r = ncp.read_file(wdparse(arg[0]), os, binary=True)
except FileNotFoundError as e:
print(e, file=sys.stderr)
continue
finally:
os.close()
print("Read {}, length {} ({}), created {}".format(r['truename'], r['length'],
"binary" if r['binary'] else "character",
r['created']))
elif op == "readraw":
ncp.read_file(wdparse(arg[0]), None, raw=True)
elif op == "get":
hd, flist = ncp.list_files(wdparse(arg[0].strip()))
if flist:
if len(flist) > 1:
print("Multiple matches: please be more specific: {}".format(list(flist.keys())),
file=sys.stderr)
else:
fn = list(flist.keys())[0]
binp = True if 'CHARACTERS' not in flist[fn] else False
outf = fn[fn.index(cwd[-1:])+1:].strip().lower()
print("Read into {} <= ".format(outf), end='', file=sys.stderr, flush=True)
try:
os = open(outf, "wb" if binp else "w")
r = ncp.read_file(fn, os, binary=binp)
except FileNotFoundError as e:
print(e, file=sys.stderr)
finally:
os.close()
print("Read {}, {}length {}, created {}".format(r['truename'], "binary, " if r['binary'] else "", r['length'], r['created']))
else:
print("File not found: {}".format(arg[0].strip()), file=sys.stderr)
elif op == "mget":
hd, flist = ncp.list_files(wdparse(arg[0].strip()))
if flist:
for fn in flist:
if 'CHARACTERS' in flist[fn]:
outf = fn[fn.index(cwd[-1:])+1:].strip().lower()
print("Read into {} <= ".format(outf), end='', file=sys.stderr, flush=True)
try:
os = open(outf,"w")
r = ncp.read_file(fn, os)
except FileNotFoundError as e:
print(e, file=sys.stderr)
continue
finally:
os.close()
print("Read {}, length {}, created {}".format(r['truename'], r['length'], r['created']))
elif op == "write":
inf,outf = arg[0].split(' ', maxsplit=1)
try:
ins = open(inf,"r")
r = ncp.write_file(wdparse(outf), ins)
except FileNotFoundError as e:
print(e, file=sys.stderr)
continue
finally:
ins.close()
print("Wrote {}, length {} ({}), created {}".format(r['truename'], r['length'],
"binary" if r['binary'] else "character",
r['created']))
elif op == "bwrite":
inf,outf = arg[0].split(' ', maxsplit=1)
try:
ins = open(inf,"rb")
r = ncp.write_file(wdparse(outf), ins, binary=True)
except FileNotFoundError as e:
print(e, file=sys.stderr)
continue
finally:
ins.close()
print("Wrote {}, length {} ({}), created {}".format(r['truename'], r['length'],
"binary" if r['binary'] else "character",
r['created']))
elif op == "alldirs":
# print(ncp.all_directories())
hd,fs = ncp.list_files(wdparse(arg[0]) if len(arg) > 0 else directory_name("*"), directories=True)
for f in fs:
fs[f]['DIRECTORY'] = True
print_directory_list(hd,fs)
elif op == "directory" or op == "ddirectory" or op == "fdirectory" or op == "dir" or op == "ddir" or op == "fdir":
dflt = cwd + "*"
if len(arg) > 0:
a = wdparse(arg[0])
if a.endswith(";"):
a += "*"
else:
a = dflt
hd,fs = ncp.list_files(a, deleted=True if op.startswith("ddir") else False,
fast=True if op.startswith("fdir") else False)
print_directory_list(hd,fs)
else:
print("NYI operation {} not yet implemented".format(op), file=sys.stderr)
except NLIError as e:
print(e)
except RestartableError as e:
print(e)
except CommandError as e:
print(e)
except FatalError as e:
print(e)
except IndexError as e:
print(e)
print("Maybe you forgot an argument to the command?")
except ValueError as e:
print(e)
print("Maybe you forgot an argument to the command?")
except FileError as e:
print(e)
except EOFError:
print("EOF", file=sys.stderr)
try:
ncp.send_packet(Opcode.EOF,"")
except BrokenPipeError as e:
print("[Connection already down: {}]".format(e))
if ncp:
if ncp.dataconn:
try:
ncp.dataconn.close()
except BrokenPipeError:
pass
try:
ncp.close()
except BrokenPipeError:
pass
exit(0)
| 44.567929
| 168
| 0.522729
|
e3af51ad3fd306d6af32ce3568fd7f280774a821
| 6,514
|
py
|
Python
|
petra_viewer/utils/fio_reader.py
|
yamedvedya/data_viewer
|
c6238b71edcf0178ebe8ab8f9bf6e56e41cd4916
|
[
"MIT"
] | null | null | null |
petra_viewer/utils/fio_reader.py
|
yamedvedya/data_viewer
|
c6238b71edcf0178ebe8ab8f9bf6e56e41cd4916
|
[
"MIT"
] | null | null | null |
petra_viewer/utils/fio_reader.py
|
yamedvedya/data_viewer
|
c6238b71edcf0178ebe8ab8f9bf6e56e41cd4916
|
[
"MIT"
] | null | null | null |
# Taken from HasyUtils
import numpy as np
class fioColumn:
'''
the class represents a column of a FIO file. The first column is the
x-axis which is used by all columns, name_in, e.g. test_00001_C1
'''
def __init__(self, name_in):
self.name = name_in
lst = self.name.split('_')
if len(lst) > 1:
self.deviceName = lst[-1]
if self.deviceName.find("0") == 0:
self.deviceName = "ScanName"
else:
self.deviceName = "n.n."
self.x = []
self.y = []
return
class fioReader:
'''
represents an entire file with several columns
input: name of the .fio file (or .dat or .iint)
flagMCA: if True, the x-axis are channel numbers
returns: object containing:
self.comments
self.user_comments
self.parameters
self.columns
self.fileName
The string 'None' appearing in a column is interpreted as '0.'
'''
def __init__(self, fileName, flagMCA=False):
'''
flagMCA: don't be too smart and try to guess it.
'''
self.comments = []
self.user_comments = []
self.parameters = {}
self.columns = []
self.fileName = fileName
self.flagMCA = flagMCA
self.isImage = None
#
# /home/p12user/dirName/gen_00001.fio -> gen_00001
#
self.scanName = self.fileName.split("/")[-1].split(".")[0]
if fileName.endswith('.fio'):
self._readFio()
else:
raise ValueError("fioReader: format not identified, %s" % fileName)
return
def _readFio(self):
'''
!
! user comments
!
%c
comments
%p
parameterName = parameterValue
%d
Col 1 AU_ALO_14_0001 FLOAT
Col 2 AU_ALO_14_0001 FLOAT
Col 3 AU_ALO_14_0001_RING FLOAT
data data data etc.
'''
try:
inp = open(self.fileName, 'r')
except IOError as e:
raise ValueError("fioReader.fioReader._readFio: failed to open %s" % self.fileName)
return False
lines = inp.readlines()
inp.close()
flagComment = 0
flagParameter = 0
flagData = 0
lineCount = 0
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line.find("!") == 0:
self.user_comments.append(line)
flagComment, flagParameter, flagData = False, False, False
elif line.find("%c") == 0:
flagComment, flagParameter, flagData = True, False, False
continue
elif line.find("%p") == 0:
flagComment, flagParameter, flagData = False, True, False
continue
elif line.find("%d") == 0:
flagComment, flagParameter, flagData = False, False, True
continue
#
if flagComment:
self.comments.append(line)
#
# parName = parValue
#
if flagParameter:
lst = line.split("=")
self.parameters[lst[0].strip()] = lst[1].strip()
if not flagData:
continue
#
# height and width indicate that we are reading an image
#
if self.isImage is None:
if 'width' in self.parameters and 'height' in self.parameters:
self.isImage = True
else:
self.isImage = False
lst = line.split()
if lst[0] == "Col":
#
# the 'Col 1 ...' description does not create a
# new FIO_dataset because it contains the x-axis for all
#
if not self.flagMCA and not self.isImage:
#
# the first column contains the independent variable (motor position)
#
if lst[1] == "1":
self.motorName = lst[2]
else:
self.columns.append(fioColumn(lst[2]))
#
# MCA and image files have one colum only
#
else:
if self.isImage:
self.motorName = lst[2]
if self.flagMCA:
self.motorName = "Channels"
self.columns.append(fioColumn(lst[2]))
else:
if not self.flagMCA and not self.isImage:
for i in range(1, len(self.columns) + 1):
self.columns[i - 1].x.append(float(lst[0]))
#
# some column may be 'None' - try to continue anyway
#
if lst[i].lower() == 'none':
self.columns[i - 1].y.append(float(0.))
else:
self.columns[i - 1].y.append(float(lst[i]))
elif self.flagMCA:
for i in range(0, len(self.columns)):
self.columns[i].x.append(float(lineCount))
#
# some column may be 'None' - try to continue anyway
#
if lst[i].lower() == 'none':
self.columns[i].y.append(float(0.))
else:
self.columns[i].y.append(float(lst[i]))
#
# image, one column only
#
elif self.isImage:
self.columns[0].x.append(float(lst[0]))
lineCount += 1
if self.isImage:
if len(self.columns) != 1:
raise ValueError(" fioReader.reasdFio: isImage and len( self.columns) != 1")
if len(self.columns[0].y) is not 0:
raise ValueError(" fioReader.readFio: isImage and len( self.columns[0].y) is not 0")
if int(self.parameters['width']) * int(self.parameters['height']) != len(self.columns[0].x):
raise ValueError(" fioReader.reasdFio: isImage and width*height != len(x)")
xlocal = np.asarray(self.columns[0].x, dtype=np.float64)
self.columns[0].x = xlocal[:]
return True
| 35.210811
| 104
| 0.4716
|
0e1b05341dd4fc134ad1f7c9580d033ae39780c6
| 1,294
|
py
|
Python
|
TME 13. GAIL/Behavioral-Cloning/behavioral_cloning.py
|
hanouticelina/reinforcement-learning
|
c7c6765486ea9546bbd8ce75e6032a408a1410cf
|
[
"MIT"
] | null | null | null |
TME 13. GAIL/Behavioral-Cloning/behavioral_cloning.py
|
hanouticelina/reinforcement-learning
|
c7c6765486ea9546bbd8ce75e6032a408a1410cf
|
[
"MIT"
] | null | null | null |
TME 13. GAIL/Behavioral-Cloning/behavioral_cloning.py
|
hanouticelina/reinforcement-learning
|
c7c6765486ea9546bbd8ce75e6032a408a1410cf
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.distributions import Categorical
from torch.optim import Adam
import numpy as np
from utils import *
import math
from expert_dataset import *
class BehavioralCloning(nn.Module):
"""
Behavioral Cloning class.
"""
def __init__(self, input_size, output_size):
"""
Args
- input_size: dimension of the states space.
- output_size: number of possible actions.
"""
super(BehavioralCloning, self).__init__()
self.act_n = output_size
self.obs_n = input_size
self.pol = nn.Sequential(*[nn.Linear(self.obs_n, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, self.act_n),
nn.Softmax(dim=-1)])
def forward(self, obs):
return self.pol(obs)
def act(self, ob):
with torch.no_grad():
probs = self.forward(ob)
m = Categorical(probs)
action = m.sample().item()
return action, probs[action]
| 28.130435
| 62
| 0.525502
|
6a06811e8aa301c53c84c8da8e5ad4293552e49b
| 4,693
|
py
|
Python
|
.ipynb_checkpoints/og_app-checkpoint.py
|
ACeldo1/TwitterNLP
|
77b46d8cbcdfddaa541dfdf9e975aac761c9a729
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/og_app-checkpoint.py
|
ACeldo1/TwitterNLP
|
77b46d8cbcdfddaa541dfdf9e975aac761c9a729
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/og_app-checkpoint.py
|
ACeldo1/TwitterNLP
|
77b46d8cbcdfddaa541dfdf9e975aac761c9a729
|
[
"MIT"
] | null | null | null |
import flask
import os
import pickle
import pandas as pd
import skimage
import string
import nltk
from nltk.corpus import stopwords, wordnet
from nltk.stem import PorterStemmer, WordNetLemmatizer, SnowballStemmer
stopwords = stopwords.words('english')
app = flask.Flask(__name__, template_folder='templates')
#path_to_vectorizer = 'models/vectorizer.pkl'
path_to_vectorizer = "models/vectorizer_ngram3.pkl"
#path_to_text_classifier = 'models/text-classifier.pkl'
path_to_text_classifier = "models/comments_model_ngram3.pkl"
# path_to_image_classifier = 'models/image-classifier.pkl'
with open(path_to_vectorizer, 'rb') as f:
vectorizer = pickle.load(f)
with open(path_to_text_classifier, 'rb') as f:
model = pickle.load(f)
#with open(path_to_image_classifier, 'rb') as f:
# image_classifier = pickle.load(f)
##### pipeline from jupyter notebook ######
translator = str.maketrans('', '', string.punctuation)
def remove_stopwords(a):
return " ".join([word for word in nltk.word_tokenize(a) if word not in stopwords])
def remove_sp_char(a):
return a.translate(translator)
def text_pipeline2(a):
a = remove_sp_char(a.lower())
a = remove_stopwords(a)
return a
###################################################
@app.route('/', methods=['GET', 'POST'])
def main():
if flask.request.method == 'GET':
# Just render the initial form, to get input
return(flask.render_template('main.html'))
if flask.request.method == 'POST':
# Get the input from the user.
user_input_text = flask.request.form['user_input_text']
parsed_text = text_pipeline2(user_input_text)
# Turn the text into numbers using our vectorizer
X = vectorizer.transform([parsed_text])
# Make a prediction
predictions = model.predict(X)
# Get the first and only value of the prediction.
prediction = predictions[0]
# Get the predicted probabs
predicted_probas = model.predict_proba(X)
# Get the value of the first, and only, predicted proba.
predicted_proba = predicted_probas[0]
# The first element in the predicted probabs is % democrat
precent_democrat = predicted_proba[0]
# The second elemnt in predicted probas is % republican
precent_republican = predicted_proba[1]
return flask.render_template('main.html',
input_text=user_input_text,
parsed_text=parsed_text,
result=prediction,
precent_democrat=precent_democrat,
precent_republican=precent_republican)
@app.route('/input_values/', methods=['GET', 'POST'])
def input_values():
if flask.request.method == 'GET':
# Just render the initial form, to get input
return(flask.render_template('input_values.html'))
if flask.request.method == 'POST':
# Get the input from the user.
var_one = flask.request.form['input_variable_one']
var_two = flask.request.form['another-input-variable']
var_three = flask.request.form['third-input-variable']
list_of_inputs = [var_one, var_two, var_three]
return(flask.render_template('input_values.html',
returned_var_one=var_one,
returned_var_two=var_two,
returned_var_three=var_three,
returned_list=list_of_inputs))
return(flask.render_template('input_values.html'))
@app.route('/images/')
def images():
return flask.render_template('images.html')
@app.route('/bootstrap/')
def bootstrap():
return flask.render_template('bootstrap.html')
@app.route('/classify_image/', methods=['GET', 'POST'])
def classify_image():
if flask.request.method == 'GET':
# Just render the initial form, to get input
return(flask.render_template('classify_image.html'))
if flask.request.method == 'POST':
# Get file object from user input.
file = flask.request.files['file']
if file:
# Read the image using skimage
img = skimage.io.imread(file)
# Resize the image to match the input the model will accept
img = skimage.transform.resize(img, (28, 28))
# Flatten the pixels from 28x28 to 784x0
img = img.flatten()
# Get prediction of image from classifier
predictions = image_classifier.predict([img])
# Get the value of the prediction
prediction = predictions[0]
return flask.render_template('classify_image.html', prediction=str(prediction))
return(flask.render_template('classify_image.html'))
if __name__ == '__main__':
app.run(debug=True)
| 28.969136
| 91
| 0.664394
|
b7386bbf28d592cc303c8a3da3826fa258b9334d
| 2,610
|
py
|
Python
|
godhand/__init__.py
|
zombie-guru/godhand-api
|
05213589e07b59c59f516ba91aeae9fefba8bf18
|
[
"MIT"
] | 1
|
2017-01-03T04:43:23.000Z
|
2017-01-03T04:43:23.000Z
|
godhand/__init__.py
|
zombie-guru/godhand-api
|
05213589e07b59c59f516ba91aeae9fefba8bf18
|
[
"MIT"
] | null | null | null |
godhand/__init__.py
|
zombie-guru/godhand-api
|
05213589e07b59c59f516ba91aeae9fefba8bf18
|
[
"MIT"
] | null | null | null |
import logging
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
import couchdb.client
import couchdb.http
from .config import GodhandConfiguration
from .models import init_views
from .models import Subscription
from .utils import owner_group
from .utils import subscription_group
from .utils import wait_for_couchdb
def setup_godhand_config(config):
settings = config.get_settings()
cfg = GodhandConfiguration.from_env(
couchdb_url=settings.get('couchdb_url', None),
disable_auth=settings.get('disable_auth', None),
google_client_id=settings.get('google_client_id'),
google_client_secret=settings.get('google_client_secret'),
google_client_appname=settings.get('google_client_appname'),
auth_secret=settings.get('auth_secret'),
root_email=settings.get('root_email'),
token_secret=settings.get('token_secret'),
)
config.registry['godhand:cfg'] = cfg
def main(global_config, **settings):
logging.getLogger('PIL.PngImagePlugin').setLevel('INFO')
logging.getLogger('PIL.Image').setLevel('INFO')
config = Configurator(settings=settings)
config.include('cornice')
setup_godhand_config(config)
setup_db(config)
config.include('godhand.auth')
setup_acl(config)
config.scan('.views')
return config.make_wsgi_app()
def setup_db(config):
couchdb_url = config.registry['godhand:cfg'].couchdb_url
wait_for_couchdb(couchdb_url)
client = couchdb.client.Server(couchdb_url)
try:
db = client.create('godhand')
except couchdb.http.PreconditionFailed:
db = client['godhand']
try:
authdb = client.create('auth')
except couchdb.http.PreconditionFailed:
authdb = client['auth']
config.registry['godhand:db'] = db
config.registry['godhand:authdb'] = authdb
init_views(db)
def groupfinder(userid, request):
subscriptions = Subscription.query(
request.registry['godhand:db'], subscriber_id=userid)
return [
owner_group(userid)
] + [
subscription_group(x.publisher_id) for x in subscriptions
]
def setup_acl(config):
secret = config.registry['godhand:cfg'].auth_secret
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_authentication_policy(AuthTktAuthenticationPolicy(
secret, callback=groupfinder, hashalg='sha512'))
config.set_session_factory(SignedCookieSessionFactory(secret))
| 32.625
| 68
| 0.737165
|
220b08edf9664aa5a9b6070e32bce80ee7bd3d24
| 1,601
|
py
|
Python
|
tests/test_cli_buckets_list.py
|
hall-lab/cloud-polices
|
85075d909e6450c5ae21efb566ab9075f8fe8b16
|
[
"MIT"
] | null | null | null |
tests/test_cli_buckets_list.py
|
hall-lab/cloud-polices
|
85075d909e6450c5ae21efb566ab9075f8fe8b16
|
[
"MIT"
] | null | null | null |
tests/test_cli_buckets_list.py
|
hall-lab/cloud-polices
|
85075d909e6450c5ae21efb566ab9075f8fe8b16
|
[
"MIT"
] | null | null | null |
import re, subprocess, unittest
from mock import MagicMock, Mock, patch
from click.testing import CliRunner
from .context import hlcloud
from hlcloud import cli_buckets_list
class CliBucketsListTest(unittest.TestCase):
@patch("google.cloud.storage.Client")
def test_buckets_list(self, client_patch):
expected_help = re.compile('Usage: hlcloud buckets list')
out = subprocess.check_output(['hlcloud', 'buckets', 'list', '--help'], stderr=subprocess.STDOUT)
self.assertRegex(str(out), expected_help)
client = Mock()
client_patch.return_value = client
bucket = Mock()
bucket.name = "BUCKET"
bucket.storage_class = "REGIONAL"
bucket.location = "US-CENTRAL1"
bucket.labels = {
"user": "rfranklin",
"project": "mgi-project",
"pipeline": "mgi_pipeline",
}
bucket.update = MagicMock(return_value=1)
client.list_buckets = MagicMock(return_value=[bucket])
runner = CliRunner()
result = runner.invoke(cli_buckets.buckets_list_cmd)
self.assertEqual(result.exit_code, 0)
expected_output = """NAME LOCATION CLASS USER PROJECT PIPELINE
------ ----------- -------- --------- ----------- ------------
BUCKET US-CENTRAL1 REGIONAL rfranklin mgi-project mgi_pipeline"""
self.assertEqual(result.output, expected_output)
client.list_buckets.assert_called()
bucket.update.assert_called
# -- CliBucketsTest
if __name__ == '__main__':
unittest.main(verbosity=2)
#-- __main__
| 32.673469
| 105
| 0.63273
|
c793579366c0bb42aa25ccc1d12349d0e49a61cb
| 13,166
|
py
|
Python
|
test/parser/test_parser_visitor.py
|
georgia-tech-db/Eva
|
ab457457a0bf39940384f7036e9d2f5742283432
|
[
"Apache-2.0"
] | 6
|
2019-09-19T14:24:14.000Z
|
2019-11-27T16:47:48.000Z
|
test/parser/test_parser_visitor.py
|
georgia-tech-db/Eva
|
ab457457a0bf39940384f7036e9d2f5742283432
|
[
"Apache-2.0"
] | 10
|
2019-09-09T14:31:27.000Z
|
2019-12-12T20:22:41.000Z
|
test/parser/test_parser_visitor.py
|
georgia-tech-db/Eva
|
ab457457a0bf39940384f7036e9d2f5742283432
|
[
"Apache-2.0"
] | 17
|
2019-09-18T14:24:46.000Z
|
2019-12-12T22:50:19.000Z
|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
import numpy as np
from unittest import mock
from unittest.mock import MagicMock, call
from eva.models.storage.batch import Batch
from eva.parser.parser_visitor import ParserVisitor
from eva.parser.evaql.evaql_parser import evaql_parser
from eva.expression.abstract_expression import ExpressionType
from eva.parser.table_ref import TableRef
from eva.parser.types import FileFormatType
from antlr4 import TerminalNode
class ParserVisitorTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@mock.patch.object(ParserVisitor, 'visit')
def test_should_query_specification_visitor(self, mock_visit):
mock_visit.side_effect = ["columns",
{"from": ["tables"], "where": "predicates"}]
visitor = ParserVisitor()
ctx = MagicMock()
child_1 = MagicMock()
child_1.getRuleIndex.return_value = evaql_parser.RULE_selectElements
child_2 = MagicMock()
child_2.getRuleIndex.return_value = evaql_parser.RULE_fromClause
ctx.children = [None, child_1, child_2]
expected = visitor.visitQuerySpecification(ctx)
mock_visit.assert_has_calls([call(child_1), call(child_2)])
self.assertEqual(expected.from_table, "tables")
self.assertEqual(expected.where_clause, "predicates")
self.assertEqual(expected.target_list, "columns")
@mock.patch.object(ParserVisitor, 'visit')
def test_from_clause_visitor(self, mock_visit):
mock_visit.side_effect = ["tables", "predicates"]
ctx = MagicMock()
tableSources = MagicMock()
whereExpr = MagicMock()
ctx.whereExpr = whereExpr
ctx.tableSources.return_value = tableSources
visitor = ParserVisitor()
expected = visitor.visitFromClause(ctx)
mock_visit.assert_has_calls([call(tableSources), call(whereExpr)])
self.assertEqual(expected.get('where'), 'predicates')
self.assertEqual(expected.get('from'), 'tables')
def test_logical_operator(self):
ctx = MagicMock()
visitor = ParserVisitor()
self.assertEqual(
visitor.visitLogicalOperator(ctx),
ExpressionType.INVALID)
ctx.getText.return_value = 'OR'
self.assertEqual(
visitor.visitLogicalOperator(ctx),
ExpressionType.LOGICAL_OR)
ctx.getText.return_value = 'AND'
self.assertEqual(
visitor.visitLogicalOperator(ctx),
ExpressionType.LOGICAL_AND)
def test_comparison_operator(self):
ctx = MagicMock()
visitor = ParserVisitor()
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.INVALID
)
ctx.getText.return_value = '='
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.COMPARE_EQUAL
)
ctx.getText.return_value = '<'
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.COMPARE_LESSER
)
ctx.getText.return_value = '>'
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.COMPARE_GREATER
)
ctx.getText.return_value = '@>'
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.COMPARE_CONTAINS
)
ctx.getText.return_value = '<@'
self.assertEqual(
visitor.visitComparisonOperator(ctx),
ExpressionType.COMPARE_IS_CONTAINED
)
# To be fixed
# def test_visit_full_column_name_none(self):
# ''' Testing for getting a Warning when column name is None
# Function: visitFullColumnName
# '''
# ctx = MagicMock()
# visitor = ParserVisitor()
# ParserVisitor.visit = MagicMock()
# ParserVisitor.visit.return_value = None
# with self.assertWarns(SyntaxWarning, msg='Column Name Missing'):
# visitor.visitFullColumnName(ctx)
# def test_visit_table_name_none(self):
# ''' Testing for getting a Warning when table name is None
# Function: visitTableName
# '''
# ctx = MagicMock()
# visitor = ParserVisitor()
# ParserVisitor.visit = MagicMock()
# ParserVisitor.visit.return_value = None
# with self.assertWarns(SyntaxWarning, msg='Invalid from table'):
# visitor.visitTableName(ctx)
def test_logical_expression(self):
''' Testing for break in code if len(children) < 3
Function : visitLogicalExpression
'''
ctx = MagicMock()
visitor = ParserVisitor()
# Test for no children
ctx.children = []
expected = visitor.visitLogicalExpression(ctx)
self.assertEqual(expected, None)
# Test for one children
child_1 = MagicMock()
ctx.children = [child_1]
expected = visitor.visitLogicalExpression(ctx)
self.assertEqual(expected, None)
# Test for two children
child_1 = MagicMock()
child_2 = MagicMock()
ctx.children = [child_1, child_2]
expected = visitor.visitLogicalExpression(ctx)
self.assertEqual(expected, None)
@mock.patch.object(ParserVisitor, 'visitChildren')
def test_visit_string_literal_none(self, mock_visit):
''' Testing when string literal is None
Function: visitStringLiteral
'''
visitor = ParserVisitor()
ctx = MagicMock()
ctx.STRING_LITERAL.return_value = None
# ParserVisitor.visitChildren = MagicMock()
# mock_visit = ParserVisitor.visitChildren
visitor.visitStringLiteral(ctx)
mock_visit.assert_has_calls([call(ctx)])
def test_visit_constant(self):
''' Testing for value of returned constant
when real literal is not None
Function: visitConstant
'''
ctx = MagicMock()
visitor = ParserVisitor()
ctx.REAL_LITERAL.return_value = '5'
expected = visitor.visitConstant(ctx)
self.assertEqual(
expected.evaluate(),
Batch(pd.DataFrame([float(ctx.getText())])))
def test_visit_int_array_literal(self):
''' Testing int array literal
Function: visitArrayLiteral
'''
ctx = MagicMock()
visitor = ParserVisitor()
ctx.getText.return_value = '[1,2,3,4]'
expected = visitor.visitArrayLiteral(ctx)
self.assertEqual(
expected.evaluate(),
Batch(pd.DataFrame({0: [np.array([1, 2, 3, 4])]}))
)
def test_visit_str_array_literal(self):
''' Testing str array literal
Function: visitArrayLiteral
'''
ctx = MagicMock()
visitor = ParserVisitor()
ctx.getText.return_value = "['person', 'car']"
expected = visitor.visitArrayLiteral(ctx)
self.assertEqual(
expected.evaluate(),
Batch(pd.DataFrame({0: [np.array(['person', 'car'])]}))
)
def test_visit_query_specification_base_exception(self):
''' Testing Base Exception error handling
Function: visitQuerySpecification
'''
visitor = ParserVisitor()
ctx = MagicMock()
child_1 = MagicMock()
child_2 = MagicMock()
ctx.children = [None, child_1, child_2]
child_1.getRuleIndex.side_effect = BaseException
# expected = visitor.visitQuerySpecification(ctx)
self.assertRaises(BaseException, visitor.visitQuerySpecification, ctx)
##################################################################
# UDFs
##################################################################
@mock.patch.object(ParserVisitor, 'visit')
@mock.patch('eva.parser.parser_visitor._functions.FunctionExpression')
def test_visit_udf_function_call(self, func_mock, visit_mock):
ctx = MagicMock()
udf_name = 'name'
udf_output = 'label'
func_args = [MagicMock(), MagicMock()]
values = {ctx.simpleId.return_value: udf_name,
ctx.functionArgs.return_value: func_args,
ctx.dottedId.return_value: udf_output}
def side_effect(arg):
return values[arg]
visit_mock.side_effect = side_effect
visitor = ParserVisitor()
actual = visitor.visitUdfFunction(ctx)
visit_mock.assert_has_calls(
[call(ctx.simpleId()), call(ctx.dottedId()),
call(ctx.functionArgs())])
func_mock.assert_called_with(None, name='name', output=udf_output)
for arg in func_args:
func_mock.return_value.append_child.assert_any_call(arg)
self.assertEqual(actual, func_mock.return_value)
@mock.patch.object(ParserVisitor, 'visit')
def test_visit_function_args(self, visit_mock):
ctx = MagicMock()
obj = MagicMock(spec=TerminalNode())
ctx.children = ['arg1', obj, 'arg2']
visit_mock.side_effect = [1, 2]
visitor = ParserVisitor()
actual = visitor.visitFunctionArgs(ctx)
visit_mock.assert_has_calls([call('arg1'), call('arg2')])
self.assertEqual(actual, [1, 2])
@mock.patch.object(ParserVisitor, 'visit')
@mock.patch('eva.parser.parser_visitor._functions.CreateUDFStatement')
def test_visit_create_udf(self, create_udf_mock, visit_mock):
ctx = MagicMock()
ctx.children = [MagicMock() for i in range(5)]
ctx.children[0].getRuleIndex.return_value = evaql_parser.RULE_udfName
ctx.children[1].getRuleIndex.return_value = evaql_parser. \
RULE_ifNotExists
ctx.children[2].getRuleIndex.return_value = evaql_parser. \
RULE_createDefinitions
ctx.children[3].getRuleIndex.return_value = evaql_parser.RULE_udfType
ctx.children[4].getRuleIndex.return_value = evaql_parser.RULE_udfImpl
ctx.createDefinitions.return_value.__len__.return_value = 2
udf_name = 'name'
udf_type = 'classification'
udf_impl = MagicMock()
udf_impl.value = 'udf_impl'
values = {
ctx.udfName.return_value: udf_name,
ctx.udfType.return_value: udf_type,
ctx.udfImpl.return_value: udf_impl,
ctx.createDefinitions.return_value: 'col'}
def side_effect(arg):
return values[arg]
visit_mock.side_effect = side_effect
visitor = ParserVisitor()
actual = visitor.visitCreateUdf(ctx)
visit_mock.assert_has_calls(
[call(ctx.udfName()),
call(ctx.createDefinitions(0)),
call(ctx.createDefinitions(1)),
call(ctx.udfType()),
call(ctx.udfImpl())])
create_udf_mock.assert_called_once()
create_udf_mock.assert_called_with(
udf_name, True, 'col', 'col', 'udf_impl', udf_type)
self.assertEqual(actual, create_udf_mock.return_value)
##################################################################
# LOAD DATA Statement
##################################################################
@mock.patch.object(ParserVisitor, 'visit')
@mock.patch('eva.parser.parser_visitor._load_statement.LoadDataStatement')
def test_visit_load_statement(self, mock_load, mock_visit):
ctx = MagicMock()
table = 'myVideo'
path = MagicMock()
path.value = 'video.mp4'
column_list = None
file_format = FileFormatType.VIDEO
file_options = {}
file_options['file_format'] = file_format
params = {ctx.fileName.return_value: path,
ctx.tableName.return_value: table,
ctx.fileOptions.return_value: file_options,
ctx.uidList.return_value: column_list}
def side_effect(arg):
return params[arg]
mock_visit.side_effect = side_effect
visitor = ParserVisitor()
visitor.visitLoadStatement(ctx)
mock_visit.assert_has_calls([call(ctx.fileName()),
call(ctx.tableName()),
call(ctx.fileOptions()),
call(ctx.uidList())])
mock_load.assert_called_once()
mock_load.assert_called_with(TableRef('myVideo'), 'video.mp4',
column_list,
file_options)
| 35.297587
| 78
| 0.61955
|
710f22fe8f8fc56a3cf9f8befc330e883355c259
| 5,381
|
py
|
Python
|
test/functional/rpc_preciousblock.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/rpc_preciousblock.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/rpc_preciousblock.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(VivuscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
gen_address = lambda i: self.nodes[i].get_deterministic_priv_key().address # A non-wallet address to mine to
self.nodes[0].generatetoaddress(1, gen_address(0))
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generatetoaddress(2, gen_address(1))[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generatetoaddress(3, gen_address(0))[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generatetoaddress(3, gen_address(1))[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generatetoaddress(1, gen_address(0))
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generatetoaddress(4, gen_address(2))
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| 46.387931
| 127
| 0.676454
|
9291e026aa7a85f7a0f73e86cb6857e77b918766
| 6,144
|
py
|
Python
|
tests/test_web_delta.py
|
N-Buchanan/web_delta
|
1497d9624ae4afa59d7c44176c479845c4993130
|
[
"MIT"
] | null | null | null |
tests/test_web_delta.py
|
N-Buchanan/web_delta
|
1497d9624ae4afa59d7c44176c479845c4993130
|
[
"MIT"
] | null | null | null |
tests/test_web_delta.py
|
N-Buchanan/web_delta
|
1497d9624ae4afa59d7c44176c479845c4993130
|
[
"MIT"
] | null | null | null |
from web_delta.web_delta import WebDelta, RateLimit
import unittest
import os
import pickle
import threading
from flask import Flask
import logging
from queue import Queue
# set up flask app for testing
app = Flask(__name__)
@app.route('/static')
def index():
return 'static'
@app.route('/changes')
def changes():
changes.counter += 1
return 'changes {}'.format(changes.counter)
@app.route('/fail')
def fail():
if fail.counter < 1:
fail.counter += 1
return 'None'
return 'fail'
changes.counter = 0
fail.counter = 0
# suppress flask logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
def fake_parse(html):
"""
Stand-in for a parse function
"""
return html
def fail_parse(html):
if 'None' in html:
return None
return html
class TestWebDelta(unittest.TestCase):
"""
Tests for the WebDelta library
"""
@classmethod
def setUpClass(cls):
"""
open test flask app in another thread
"""
t = threading.Thread(target=app.run)
t.daemon = True # stop flask when tests are done
t.start()
def setUp(self):
"""
Create cache file that we will use in our tests
"""
cache = {}
cache[('http://localhost:5000/static', fake_parse.__name__)] = 'static'
cache[('http://localhost:5000/changes', fake_parse.__name__)] = ''
# write cache file
with open('test.cache', 'wb') as handle:
pickle.dump(cache, handle, protocol=pickle.HIGHEST_PROTOCOL)
def tearDown(self):
"""
Undo any changes we've made to the cache file
"""
try:
os.remove('test.cache')
except FileNotFoundError:
# we wanted it gone anyway
pass
def test_one_site(self):
"""
Test a single registered site
"""
delta = WebDelta()
delta.register('http://localhost:5000/static', fake_parse)
result = delta.get_all()
self.assertEqual(result[0][1], 'static')
self.assertEqual(len(result), 1)
def test_url_matches(self):
"""
Test that the urls returned match the ones registered
"""
delta = WebDelta()
sites = ['http://localhost:5000/static', 'http://localhost:5000/changes']
for site in sites:
delta.register(site, fake_parse)
for result in delta.get_all():
self.assertIn(result[0], sites)
def test_only_changes(self):
"""
Test that calling get_new only returns results that have changed
"""
delta = WebDelta(cache_file='test.cache')
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
result = delta.get_new()
self.assertEqual(result[0][0], 'http://localhost:5000/changes')
self.assertEqual(len(result), 1)
def test_clear(self):
"""
Test that clearing the tasks actually works
"""
delta = WebDelta(cache_file='test.cache')
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
result = delta.get_new()
self.assertEqual(len(result), 1)
delta.clear_tasks()
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
result = delta.get_new()
self.assertEqual(len(result), 2)
def test_new_and_all(self):
"""
Test that get_all and get_new return the same thing if there isn't a cache file
"""
delta = WebDelta()
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
new_results = delta.get_new()
all_results = delta.get_all()
self.assertEqual(len(new_results), len(all_results))
def test_rate_limit(self):
"""
Test that the RateLimit object is correctly used
"""
delta = WebDelta(rate_limit=RateLimit(1,1,1,1))
self.assertEqual(delta.rate_limit, 1 + 60 + 60 * 60 + 60 * 60 * 24)
def test_default_rate_limit(self):
"""
Test that the default rate limit is set properly
"""
delta = WebDelta()
self.assertEqual(delta.rate_limit, 60)
def test_continuous_new(self):
"""
Test that get_continuous_new adds new results to the queue and doesn't
include results already in the cache
"""
delta = WebDelta(rate_limit=RateLimit(1,0,0,0), cache_file='test.cache')
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
queue = Queue()
delta.get_continuous_new(queue)
old = ''
new = ''
for i in range(5):
new = queue.get()
self.assertNotEqual(new, old)
old = new
delta.stop()
def test_continuous_all(self):
"""
Test that get_continuous_all addes new results to the queue and includes
results already in the cache
"""
delta = WebDelta(rate_limit=RateLimit(1,0,0,0), cache_file='test.cache')
delta.register('http://localhost:5000/static', fake_parse)
delta.register('http://localhost:5000/changes', fake_parse)
queue = Queue()
delta.get_continuous_all(queue)
results = []
for i in range(5):
results.append(queue.get()[1]) # only care about the results, not the sites
delta.stop()
self.assertIn('static', results)
def test_failed_response(self):
"""
Test that the request is retried upon failure
"""
delta = WebDelta()
delta.register('http://localhost:5000/fail', fail_parse)
result = delta.get_all()
self.assertEqual(result[0][1], 'fail')
| 26.829694
| 91
| 0.594727
|
2d845ea736f9579abe8ea07ff4252fcf6462afa7
| 4,729
|
py
|
Python
|
twint/url.py
|
mabalegend/twint
|
0349cb4eafa2829ff61ab712a9b36a4fea9fdb01
|
[
"MIT"
] | null | null | null |
twint/url.py
|
mabalegend/twint
|
0349cb4eafa2829ff61ab712a9b36a4fea9fdb01
|
[
"MIT"
] | null | null | null |
twint/url.py
|
mabalegend/twint
|
0349cb4eafa2829ff61ab712a9b36a4fea9fdb01
|
[
"MIT"
] | null | null | null |
import datetime, time
from sys import platform
import logging as logme
mobile = "https://mobile.twitter.com"
base = "https://twitter.com/i"
def _sanitizeQuery(base,params):
_serialQuery = ""
for p in params:
_serialQuery += p[0]+"="+p[1]+"&"
_serialQuery = base + "?" + _serialQuery[:-1].replace(":", "%3A").replace(" ", "%20")
return _serialQuery
def _formatDate(date):
if "win" in platform:
return f'\"{date.split()[0]}\"'
try:
return int(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timestamp())
except ValueError:
return int(datetime.datetime.strptime(date, "%Y-%m-%d").timestamp())
async def Favorites(username, init):
logme.debug(__name__+':Favorites')
url = f"{mobile}/{username}/favorites?lang=en"
if init != '-1':
url += f"&max_id={init}"
return url
async def Followers(username, init):
logme.debug(__name__+':Followers')
url = f"{mobile}/{username}/followers?lang=en"
if init != '-1':
url += f"&cursor={init}"
return url
async def Following(username, init):
logme.debug(__name__+':Following')
url = f"{mobile}/{username}/following?lang=en"
if init != '-1':
url += f"&cursor={init}"
return url
async def MobileProfile(username, init):
logme.debug(__name__+':MobileProfile')
url = f"{mobile}/{username}?lang=en"
if init != '-1':
url += f"&max_id={init}"
return url
async def Profile(username, init):
logme.debug(__name__+':Profile')
url = f"{base}/profiles/show/{username}/timeline/tweets?include_"
url += "available_features=1&lang=en&include_entities=1"
url += "&include_new_items_bar=true"
if init != '-1':
url += f"&max_position={init}"
return url
async def Search(config, init):
logme.debug(__name__ + ':Search')
url = "https://mobile.twitter.com/search"
q = ""
params = [
('vertical', 'default'),
('src', 'unkn'),
('include_available_features', '1'),
('include_entities', '1'),
#('max_position', str(init)),
('reset_error_state', 'false'),
('prefetchTimestamp', str(int(time.time() * 1000)))
]
if init != '-1':
url += init
if not config.Popular_tweets:
params.append(('f', 'tweets'))
if config.Lang:
params.append(("l", config.Lang))
params.append(("lang", "en"))
if config.Query:
q += f" from:{config.Query}"
if config.Username:
q += f" from:{config.Username}"
if config.Geo:
config.Geo = config.Geo.replace(" ", "")
q += f" geocode:{config.Geo}"
if config.Search:
q += f" {config.Search}"
if config.Year:
q += f" until:{config.Year}-1-1"
if config.Since:
q += f" since:{_formatDate(config.Since)}"
if config.Until:
q += f" until:{_formatDate(config.Until)}"
if config.Email:
q += ' "mail" OR "email" OR'
q += ' "gmail" OR "e-mail"'
if config.Phone:
q += ' "phone" OR "call me" OR "text me"'
if config.Verified:
q += " filter:verified"
if config.To:
q += f" to:{config.To}"
if config.All:
q += f" to:{config.All} OR from:{config.All} OR @{config.All}"
if config.Near:
q += f' near:"{config.Near}"'
if config.Images:
q += " filter:images"
if config.Videos:
q += " filter:videos"
if config.Media:
q += " filter:media"
if config.Replies:
q += " filter:replies"
if config.Native_retweets:
q += " filter:nativeretweets"
if config.Min_likes:
q += f" min_faves:{config.Min_likes}"
if config.Min_retweets:
q += f" min_retweets:{config.Min_retweets}"
if config.Min_replies:
q += f" min_replies:{config.Min_replies}"
if config.Links == "include":
q += " filter:links"
elif config.Links == "exclude":
q += " exclude:links"
if config.Source:
q += f" source:\"{config.Source}\""
if config.Members_list:
q += f" list:{config.Members_list}"
if config.Filter_retweets:
q += f" exclude:nativeretweets exclude:retweets"
if config.Custom_query:
q = config.Custom_query
params.append(("q", q))
_serialQuery = _sanitizeQuery(url, params)
return url, params, _serialQuery
async def Thread(urlz, init):
# logme.debug(__name__+':Profile')
# url = f"{base}/profiles/show/{username}/timeline/tweets?include_"
# url += "available_features=1&lang=en&include_entities=1"
# url += "&include_new_items_bar=true"
# if init != '-1':
# url += f"&max_position={init}"
url = 'https://mobile.twitter.com' + urlz
return url
| 29.191358
| 89
| 0.581941
|
2a5e6a4d0bb87f26c257b43bfded653daf1bfe11
| 6,540
|
py
|
Python
|
sdk/core/azure-core/tests/test_streaming.py
|
v-rajdeep/azure-sdk-for-python
|
8568a833322f777087df2097e8dc8cc22cd67750
|
[
"MIT"
] | 1
|
2021-09-16T02:33:52.000Z
|
2021-09-16T02:33:52.000Z
|
sdk/core/azure-core/tests/test_streaming.py
|
v-rajdeep/azure-sdk-for-python
|
8568a833322f777087df2097e8dc8cc22cd67750
|
[
"MIT"
] | 1
|
2019-08-05T19:14:28.000Z
|
2019-08-05T19:30:05.000Z
|
sdk/core/azure-core/tests/test_streaming.py
|
v-rajdeep/azure-sdk-for-python
|
8568a833322f777087df2097e8dc8cc22cd67750
|
[
"MIT"
] | 1
|
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# --------------------------------------------------------------------------
from azure.core import PipelineClient
from azure.core.exceptions import DecodeError
def test_decompress_plain_no_header():
# expect plain text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test.txt".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=True)
content = b"".join(list(data))
decoded = content.decode('utf-8')
assert decoded == "test"
def test_compress_plain_no_header():
# expect plain text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test.txt".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=False)
content = b"".join(list(data))
decoded = content.decode('utf-8')
assert decoded == "test"
def test_decompress_compressed_no_header():
# expect compressed text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test.tar.gz".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=True)
content = b"".join(list(data))
try:
decoded = content.decode('utf-8')
assert False
except UnicodeDecodeError:
pass
def test_compress_compressed_no_header():
# expect compressed text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test.tar.gz".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=False)
content = b"".join(list(data))
try:
decoded = content.decode('utf-8')
assert False
except UnicodeDecodeError:
pass
def test_decompress_plain_header():
# expect error
import requests
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test_with_header.txt".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=True)
try:
content = b"".join(list(data))
assert False
except (requests.exceptions.ContentDecodingError, DecodeError):
pass
def test_compress_plain_header():
# expect plain text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test_with_header.txt".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=False)
content = b"".join(list(data))
decoded = content.decode('utf-8')
assert decoded == "test"
def test_decompress_compressed_header():
# expect plain text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test_with_header.tar.gz".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=True)
content = b"".join(list(data))
decoded = content.decode('utf-8')
assert decoded == "test"
def test_compress_compressed_header():
# expect compressed text
account_name = "coretests"
account_url = "https://{}.blob.core.windows.net".format(account_name)
url = "https://{}.blob.core.windows.net/tests/test_with_header.tar.gz".format(account_name)
client = PipelineClient(account_url)
request = client.get(url)
pipeline_response = client._pipeline.run(request, stream=True)
response = pipeline_response.http_response
data = response.stream_download(client._pipeline, decompress=False)
content = b"".join(list(data))
try:
decoded = content.decode('utf-8')
assert False
except UnicodeDecodeError:
pass
| 43.026316
| 95
| 0.708563
|
84148974992b84fccabec90a5d23fd1ff3f97766
| 10,103
|
py
|
Python
|
Back-End/Python/Basics/Part -4- OOP/04 - Descriptors/08_application_2.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/Basics/Part -4- OOP/04 - Descriptors/08_application_2.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/Basics/Part -4- OOP/04 - Descriptors/08_application_2.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
import collections
class Int:
def __init__(self, min_value=None, max_value=None):
self.min_value = min_value
self.max_value = max_value
def __set_name__(self, owner_class, name):
self.name = name
def __set__(self, instance, value):
if not isinstance(value, int):
raise ValueError(f'{self.name} must be an int.')
if self.min_value is not None and value < self.min_value:
raise ValueError(f'{self.name} must be at least {self.min_value}')
if self.max_value is not None and value > self.max_value:
raise ValueError(f'{self.name} cannot exceed {self.max_value}')
instance.__dict__[self.name] = value
def __get__(self, instance, owner_class):
if instance is None:
return self
else:
return instance.__dict__.get(self.name, None)
class Point2D:
x = Int(min_value=0, max_value=800)
y = Int(min_value=0, max_value=400)
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'Point2D(x={self.x}, y={self.y})'
def __str__(self):
return f'({self.x}, {self.y})'
p = Point2D(0, 10)
str(p)
# '(0, 10)'
repr(p)
# 'Point2D(x=0, y=10)'
p.x, p.y
# (0, 10)
try:
p = Point2D(0, 500)
except ValueError as ex:
print(ex)
# y cannot exceed 400
isinstance([1, 2, 3], collections.abc.Sequence)
# True
isinstance([1, 2, 3], collections.abc.MutableSequence)
# True
isinstance((1, 2, 3), collections.abc.Sequence)
# True
isinstance((1, 2, 3), collections.abc.MutableSequence)
# False
class Point2DSequence:
def __init__(self, min_length=None, max_length=None):
self.min_length = min_length
self.max_length = max_length
def __set_name__(self, cls, name):
self.name = name
def __set__(self, instance, value):
if not isinstance(value, collections.abc.Sequence):
raise ValueError(f'{self.name} must be a sequence type.')
if self.min_length is not None and len(value) < self.min_length:
raise ValueError(f'{self.name} must contain at least '
f'{self.min_length} elements'
)
if self.max_length is not None and len(value) > self.max_length:
raise ValueError(f'{self.name} cannot contain more than '
f'{self.max_length} elements'
)
for index, item in enumerate(value):
if not isinstance(item, Point2D):
raise ValueError(f'Item at index {index} is not a Point2D instance.')
# value passes checks - want to store it as a mutable sequence so we can
# append to it later
instance.__dict__[self.name] = list(value)
def __get__(self, instance, cls):
if instance is None:
return self
else:
if self.name not in instance.__dict__:
# current point list has not been defined,
# so let's create an empty list
instance.__dict__[self.name] = []
return instance.__dict__.get(self.name)
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
try:
p = Polygon()
except ValueError as ex:
print(ex)
vertices must contain at least 3 elements
try:
p = Polygon(Point2D(-100,0), Point2D(0, 1), Point2D(1, 0))
except ValueError as ex:
print(ex)
# x must be at least 0
p = Polygon(Point2D(0,0), Point2D(0, 1), Point2D(1, 0))
p.vertices
# [Point2D(x=0, y=0), Point2D(x=0, y=1), Point2D(x=1, y=0)]
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(0,1))
p.vertices
[Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=0, y=1)]
p.append(Point2D(10, 10))
p.vertices
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=0, y=1), Point2D(x=10, y=10)]
class Polygon:
vertices = Point2DSequence(min_length=3, max_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(0,1))
try:
p.append(Point2D(10, 10))
except ValueError as ex:
print(ex)
# Vertices length is at max (3)
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
class Triangle(Polygon):
vertices = Point2DSequence(min_length=3, max_length=3)
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(0,1))
p.append(Point2D(10, 10))
p.vertices
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=0, y=1), Point2D(x=10, y=10)]
t = Triangle(Point2D(0,0), Point2D(1,0), Point2D(0,1))
try:
t.append(Point2D(10, 10))
except ValueError as ex:
print(ex)
# Vertices length is at max (3)
class Square(Polygon):
vertices = Point2DSequence(min_length=4, max_length=4)
s = Square(Point2D(0,0), Point2D(1,0), Point2D(0,1), Point2D(1, 1))
s.vertices
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=0, y=1), Point2D(x=1, y=1)]
try:
s.append(Point2D(10, 10))
except ValueError as ex:
print(ex)
# Vertices length is at max (4)
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
def __len__(self):
return len(self.vertices)
def __getitem__(self, idx):
return self.vertices[idx]
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(1,1))
len(p)
# 3
list(p)
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=1, y=1)]
p[0], p[1], p[2]
# (Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=1, y=1))
p[0:2]
# [Point2D(x=0, y=0), Point2D(x=1, y=0)]
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
def __len__(self):
return len(self.vertices)
def __getitem__(self, idx):
return self.vertices[idx]
def __iadd__(self, pt):
self.append(pt)
return self
def __contains__(self, pt):
return pt in self.vertices
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(1,1))
list(p)
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=1, y=1)]
p += Point2D(10, 10)
list(p)
# [Point2D(x=0, y=0), Point2D(x=1, y=0), Point2D(x=1, y=1), Point2D(x=10, y=10)]
print(Point2D(0, 0) in p)
# False
class Point2D:
x = Int(min_value=0, max_value=800)
y = Int(min_value=0, max_value=400)
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'Point2D(x={self.x}, y={self.y})'
def __str__(self):
return f'({self.x}, {self.y})'
def __eq__(self, other):
return isinstance(other, Point2D) and self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
class Polygon:
vertices = Point2DSequence(min_length=3)
def __init__(self, *vertices):
self.vertices = vertices
def append(self, pt):
if not isinstance(pt, Point2D):
raise ValueError('Can only append Point2D instances.')
max_length = type(self).vertices.max_length
if max_length is not None and len(self.vertices) >= max_length:
# cannot add more points!
raise ValueError(f'Vertices length is at max ({max_length})')
self.vertices.append(pt)
def __len__(self):
return len(self.vertices)
def __getitem__(self, idx):
return self.vertices[idx]
def __iadd__(self, pt):
self.append(pt)
return self
def __contains__(self, pt):
return pt in self.vertices
p = Polygon(Point2D(0,0), Point2D(1,0), Point2D(1,1))
Point2D(0,0) in p
# True
| 26.941333
| 85
| 0.600911
|
e6c629789888f06ad19137922ab8e50625063808
| 30,787
|
py
|
Python
|
tests/httpwrappers/tests.py
|
Fak3/django
|
1ae8014a0bbae0cc1d951c1ee0f7888b6141f582
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 19
|
2015-07-07T02:08:59.000Z
|
2021-11-08T11:05:40.000Z
|
tests/httpwrappers/tests.py
|
Fak3/django
|
1ae8014a0bbae0cc1d951c1ee0f7888b6141f582
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 31
|
2018-08-26T14:01:16.000Z
|
2018-10-19T07:35:57.000Z
|
tests/httpwrappers/tests.py
|
Fak3/django
|
1ae8014a0bbae0cc1d951c1ee0f7888b6141f582
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 145
|
2019-03-14T18:54:45.000Z
|
2022-03-04T20:25:31.000Z
|
import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import SimpleTestCase
from django.utils.functional import lazystr
class QueryDictTests(SimpleTestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(''))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__('foo')
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
self.assertNotIn('foo', q)
self.assertEqual(list(q), [])
self.assertEqual(list(q.items()), [])
self.assertEqual(list(q.lists()), [])
self.assertEqual(list(q.keys()), [])
self.assertEqual(list(q.values()), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict('foo=bar')
self.assertEqual(q['foo'], 'bar')
with self.assertRaises(KeyError):
q.__getitem__('bar')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
self.assertIn('foo', q)
self.assertNotIn('bar', q)
self.assertEqual(list(q), ['foo'])
self.assertEqual(list(q.items()), [('foo', 'bar')])
self.assertEqual(list(q.lists()), [('foo', ['bar'])])
self.assertEqual(list(q.keys()), ['foo'])
self.assertEqual(list(q.values()), ['bar'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_urlencode_int(self):
# Normally QueryDict doesn't contain non-string values but lazily
# written tests may make that mistake.
q = QueryDict(mutable=True)
q['a'] = 1
self.assertEqual(q.urlencode(), 'a=1')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
self.assertIn('foo', q)
self.assertCountEqual(q, ['foo', 'name'])
self.assertCountEqual(q.items(), [('foo', 'another'), ('name', 'john')])
self.assertCountEqual(q.lists(), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertCountEqual(q.keys(), ['foo', 'name'])
self.assertCountEqual(q.values(), ['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict('vote=yes&vote=no')
self.assertEqual(q['vote'], 'no')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
self.assertIn('vote', q)
self.assertNotIn('foo', q)
self.assertEqual(list(q), ['vote'])
self.assertEqual(list(q.items()), [('vote', 'no')])
self.assertEqual(list(q.lists()), [('vote', ['yes', 'no'])])
self.assertEqual(list(q.keys()), ['vote'])
self.assertEqual(list(q.values()), ['no'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
with self.assertRaises(AttributeError):
q.__delitem__('vote')
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict('a=b&c=d')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict('a=b&c=d&a=1')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict('cur=%A4', encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(q.items()), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(q.items()), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
def test_querydict_fromkeys(self):
self.assertEqual(QueryDict.fromkeys(['key1', 'key2', 'key3']), QueryDict('key1&key2&key3'))
def test_fromkeys_with_nonempty_value(self):
self.assertEqual(
QueryDict.fromkeys(['key1', 'key2', 'key3'], value='val'),
QueryDict('key1=val&key2=val&key3=val')
)
def test_fromkeys_is_immutable_by_default(self):
# Match behavior of __init__() which is also immutable by default.
q = QueryDict.fromkeys(['key1', 'key2', 'key3'])
with self.assertRaisesMessage(AttributeError, 'This QueryDict instance is immutable'):
q['key4'] = 'nope'
def test_fromkeys_mutable_override(self):
q = QueryDict.fromkeys(['key1', 'key2', 'key3'], mutable=True)
q['key4'] = 'yep'
self.assertEqual(q, QueryDict('key1&key2&key3&key4=yep'))
def test_duplicates_in_fromkeys_iterable(self):
self.assertEqual(QueryDict.fromkeys('xyzzy'), QueryDict('x&y&z&z&y'))
def test_fromkeys_with_nondefault_encoding(self):
key_utf16 = b'\xff\xfe\x8e\x02\xdd\x01\x9e\x02'
value_utf16 = b'\xff\xfe\xdd\x01n\x00l\x00P\x02\x8c\x02'
q = QueryDict.fromkeys([key_utf16], value=value_utf16, encoding='utf-16')
expected = QueryDict('', mutable=True)
expected['ʎǝʞ'] = 'ǝnlɐʌ'
self.assertEqual(q, expected)
def test_fromkeys_empty_iterable(self):
self.assertEqual(QueryDict.fromkeys([]), QueryDict(''))
def test_fromkeys_noniterable(self):
with self.assertRaises(TypeError):
QueryDict.fromkeys(0)
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# ASCII strings or bytes values are converted to strings.
r['key'] = 'test'
self.assertEqual(r['key'], 'test')
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], 'test')
self.assertIn(b'test', r.serialize_headers())
# Non-ASCII values are serialized to Latin-1.
r['key'] = 'café'
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], '=?utf-8?b?4oCg?=')
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts string or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
headers = list(r.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ('foo', 'bar'))
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
headers = list(r.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ('foo', 'bar'))
self.assertIsInstance(headers[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.__setitem__('føø', 'bar')
with self.assertRaises(UnicodeError):
r.__setitem__('føø'.encode(), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering https://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.__setitem__('test\rstr', 'test')
with self.assertRaises(BadHeaderError):
r.__setitem__('test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertIsNone(r.get('test'))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode()
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
with self.assertRaises(DisallowedRedirect):
HttpResponseRedirect(url)
with self.assertRaises(DisallowedRedirect):
HttpResponsePermanentRedirect(url)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Standard HttpResponse init args can be used
response = HttpResponseRedirect(
'/redirected/',
content='The resource has temporarily moved',
content_type='text/html',
)
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_redirect_repr(self):
response = HttpResponseRedirect('/redirected/')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">'
self.assertEqual(repr(response), expected)
def test_invalid_redirect_repr(self):
"""
If HttpResponseRedirect raises DisallowedRedirect, its __repr__()
should work (in the debug view, for example).
"""
response = HttpResponseRedirect.__new__(HttpResponseRedirect)
with self.assertRaisesMessage(DisallowedRedirect, "Unsafe redirect to URL with protocol 'ssh'"):
HttpResponseRedirect.__init__(response, 'ssh://foo')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="ssh://foo">'
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_modified_repr(self):
response = HttpResponseNotModified()
self.assertEqual(repr(response), '<HttpResponseNotModified status_code=304>')
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain')
expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
self.assertEqual(repr(response), expected)
def test_not_allowed_repr_no_content_type(self):
response = HttpResponseNotAllowed(('GET', 'POST'))
del response['Content-Type']
self.assertEqual(repr(response), '<HttpResponseNotAllowed [GET, POST] status_code=405>')
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode()
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(bytes(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(SimpleTestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(__file__), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(__file__), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""Semicolons and commas are encoded."""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';')) # IE compat
self.assertNotIn(",", c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""Semicolons and commas are decoded."""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_nonstandard_keys(self):
"""
A single non-standard cookie name doesn't affect all cookies (#13007).
"""
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes'))
def test_repeated_nonstandard_keys(self):
"""
A repeated non-standard name doesn't affect all cookies (#15852).
"""
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes'))
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en'))
# Even a double quote may be an unnamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': 'André Bessette'})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
def test_samesite(self):
c = SimpleCookie('name=value; samesite=lax; httponly')
self.assertEqual(c['name']['samesite'], 'lax')
self.assertIn('SameSite=lax', c.output())
def test_httponly_after_load(self):
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| 39.319285
| 118
| 0.61016
|
7fea4873af14d679f304605cf82ed0ac7d9b8531
| 5,751
|
py
|
Python
|
ptrace_inst/libwrap.py
|
liona24/ptrace-inst
|
86e410b97dd08dc5a809f4c6e06e520697934679
|
[
"MIT"
] | null | null | null |
ptrace_inst/libwrap.py
|
liona24/ptrace-inst
|
86e410b97dd08dc5a809f4c6e06e520697934679
|
[
"MIT"
] | null | null | null |
ptrace_inst/libwrap.py
|
liona24/ptrace-inst
|
86e410b97dd08dc5a809f4c6e06e520697934679
|
[
"MIT"
] | null | null | null |
import os
import enum
import ctypes
from typing import Callable, Any, Type
SHARED_LIB_PATH = os.path.join(os.path.dirname(__file__), "..", "build", "libptrace_inst.so")
LIB = ctypes.cdll.LoadLibrary(SHARED_LIB_PATH)
class _ProcessHandle(ctypes.Structure):
_fields_ = [
("pid", ctypes.c_int),
("rip", ctypes.c_uint64),
("_process", ctypes.c_void_p),
]
class UserRegsStruct(ctypes.Structure):
_fields_ = [
("r15", ctypes.c_ulonglong),
("r14", ctypes.c_ulonglong),
("r13", ctypes.c_ulonglong),
("r12", ctypes.c_ulonglong),
("rbp", ctypes.c_ulonglong),
("rbx", ctypes.c_ulonglong),
("r11", ctypes.c_ulonglong),
("r10", ctypes.c_ulonglong),
("r9", ctypes.c_ulonglong),
("r8", ctypes.c_ulonglong),
("rax", ctypes.c_ulonglong),
("rcx", ctypes.c_ulonglong),
("rdx", ctypes.c_ulonglong),
("rsi", ctypes.c_ulonglong),
("rdi", ctypes.c_ulonglong),
("orig_rax", ctypes.c_ulonglong),
("rip", ctypes.c_ulonglong),
("cs", ctypes.c_ulonglong),
("eflags", ctypes.c_ulonglong),
("rsp", ctypes.c_ulonglong),
("ss", ctypes.c_ulonglong),
("fs_base", ctypes.c_ulonglong),
("gs_base", ctypes.c_ulonglong),
("ds", ctypes.c_ulonglong),
("es", ctypes.c_ulonglong),
("fs", ctypes.c_ulonglong),
("gs", ctypes.c_ulonglong),
]
UserRegsStructRef = ctypes.POINTER(UserRegsStruct)
class BranchInstruction(enum.IntFlag):
BI_JUMP = 1 << 0
BI_CALL = 1 << 1
BI_RET = 1 << 2
BI_IRET = 1 << 3
BI_JUMP_REL = 1 << 4
BI_ALL = BI_JUMP | BI_CALL | BI_RET | BI_IRET | BI_JUMP_REL
_start_process = LIB.pi_start_process
_start_process.argtypes = [
ctypes.c_char_p,
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_char_p),
]
_start_process.restype = ctypes.POINTER(_ProcessHandle)
_run_until = LIB.pi_run_until
_run_until.argtypes = [
ctypes.POINTER(_ProcessHandle),
ctypes.c_uint64
]
_run_until.restype = ctypes.c_int
_run_continue = LIB.pi_run_continue
_run_continue.argtypes = [
ctypes.POINTER(_ProcessHandle),
]
_run_continue.restype = ctypes.c_int
_find_next_basic_block = LIB.pi_find_next_basic_block
_find_next_basic_block.argtypes = [
ctypes.POINTER(_ProcessHandle),
ctypes.POINTER(ctypes.c_uint64),
ctypes.c_uint32,
]
_find_next_basic_block.restype = ctypes.c_int
HOOK = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.POINTER(_ProcessHandle), ctypes.c_uint64, UserRegsStructRef, ctypes.c_void_p)
_hook_add = LIB.pi_hook_add
_hook_add.argtypes = [
ctypes.POINTER(_ProcessHandle),
ctypes.c_uint64,
HOOK,
ctypes.c_void_p
]
_hook_add.restype = ctypes.c_int
_hook_remove = LIB.pi_hook_remove
_hook_remove.argtypes = [
ctypes.POINTER(_ProcessHandle),
ctypes.c_uint64
]
_hook_remove.restype = ctypes.c_int
_read_memory = LIB.pi_read_memory
_read_memory.argtypes = [
ctypes.POINTER(_ProcessHandle),
ctypes.c_uint64,
ctypes.c_void_p,
ctypes.c_size_t
]
_read_memory.restype = ctypes.c_int
_close_process = LIB.pi_close_process
_close_process.argtypes = [
ctypes.POINTER(_ProcessHandle),
]
_close_process.restype = ctypes.c_int
class Process():
def __init__(self):
self._handle = None
self._hooks = {}
@property
def rip(self):
return self._handle.contents.rip
@property
def pid(self):
return self._handle.contents.pid
def _check(self, code: int):
if code != 0:
raise RuntimeError(f"Return value {code} != 0!")
def _wrap_hook(self, hook: Callable[["Process", int, UserRegsStructRef, Any], int], user_data_type: Type):
@HOOK
def wrapper(_handle, addr, regs, data):
return hook(self, addr, regs, ctypes.cast(data, user_data_type))
return wrapper
def start_process(self, pathname: str, argv: list[str], envp: list[str]):
c_argv = (ctypes.c_char_p * (len(argv) + 1))()
c_argv[:-1] = list(map(str.encode, argv))
c_argv[-1] = None
c_envp = (ctypes.c_char_p * (len(envp) + 1))()
c_envp[:-1] = list(map(str.encode, envp))
c_envp[-1] = None
pathname = pathname.encode()
rv = _start_process(pathname, c_argv, c_envp)
if rv is None:
raise RuntimeError("Could not start process: " + pathname)
self._handle = rv
def run_until(self, addr: int):
self._check(_run_until(self._handle, addr))
def run_continue(self):
self._check(_run_continue(self._handle))
def find_next_basic_block(self, instruction_mask: BranchInstruction) -> int:
bb = ctypes.c_uint64()
self._check(_find_next_basic_block(self._handle, ctypes.byref(bb), int(instruction_mask)))
return bb.value
def hook_add(self, addr: int, hook: Callable[["Process", int, UserRegsStructRef, Any], int], user_data: Any):
self._hooks[addr] = self._wrap_hook(hook, type(user_data))
self._check(_hook_add(self._handle, addr, self._hooks[addr], ctypes.byref(user_data)))
def hook_remove(self, addr: int):
self._check(_hook_remove(self._handle, addr))
del self._hooks[addr]
def read_memory(self, addr: int, size: int) -> bytes:
if size % ctypes.sizeof(ctypes.c_size_t) != 0:
raise ValueError(f"Sorry, size needs to be aligned to {ctypes.sizeof(ctypes.c_size_t)} bytes")
buf = ctypes.create_string_buffer(size)
self._check(_read_memory(self._handle, addr, buf, size))
return buf.raw
def close_process(self):
self._check(_close_process(self._handle))
self._handle = None
self._hooks.clear()
| 28.755
| 122
| 0.655712
|
9e4a6fc00ed60b8160dcb138671aeafce87860c5
| 7,604
|
py
|
Python
|
lyacompiler/lya_builtins.py
|
MC911-MV-1s2016/lya-compiler-python
|
b876857161ca988a419a3c27a2f1cbb5214c50e6
|
[
"BSD-3-Clause"
] | null | null | null |
lyacompiler/lya_builtins.py
|
MC911-MV-1s2016/lya-compiler-python
|
b876857161ca988a419a3c27a2f1cbb5214c50e6
|
[
"BSD-3-Clause"
] | null | null | null |
lyacompiler/lya_builtins.py
|
MC911-MV-1s2016/lya-compiler-python
|
b876857161ca988a419a3c27a2f1cbb5214c50e6
|
[
"BSD-3-Clause"
] | null | null | null |
# ------------------------------------------------------------
# MC911 - Compiler construction laboratory.
# IC - UNICAMP
#
# RA094139 - Marcelo Mingatos de Toledo
# RA093175 - Victor Fernando Pompeo Barbosa
#
# lya_builtins.py
# Lya builtins.
#
# ------------------------------------------------------------
from .lya_errors import *
from .lya_lvminstruction import *
__all__ = [
'LTF',
'LyaTypeFactory',
'LyaType',
'LyaIntType',
'LyaBoolType',
'LyaCharType',
'LyaStringType',
'LyaArrayType',
'LyaVoidType',
'LyaRefType'
]
# Types
class LyaType(object):
"""Base class that represents a Lya builtin type.
"""
_name = None
_unary_ops = None
_binary_ops = None
_rel_ops = None
_unary_opcodes = None
_binary_opcodes = None
_rel_opcodes = None
def __init__(self):
pass
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, LyaType):
return False
return self.name == other.name
def __ne__(self, other):
return self.name != other
def get_unary_instruction(self, op):
instruction = self._unary_opcodes.get(op, None)
if instruction is None:
raise LyaGenericError(-1, None, "Unary operation error")
return instruction()
def get_binary_instruction(self, op):
instruction = self._binary_opcodes.get(op, None)
if instruction is None:
raise LyaGenericError(-1, None, "Binary operation error")
return instruction()
def get_relational_instruction(self, op):
instruction = self._rel_opcodes.get(op, None)
if instruction is None:
raise LyaGenericError(-1, None, "Relational operation error")
return instruction()
@property
def name(self) -> str:
return self._name
@property
def memory_size(self) -> int:
return None
@property
def binary_ops(self):
return self._binary_ops
@property
def relational_ops(self):
return self._rel_ops
@property
def unary_ops(self):
return self._unary_ops
class LyaBaseType(LyaType):
"""Class that represents a lya base type.
Meant to be used as singleton via 'get_instance' method.
"""
_instance = None
@classmethod
def get_instance(cls) -> 'LyaBaseType':
if cls._instance is None:
cls._instance = cls()
return cls._instance
@property
def memory_size(self):
return 1
class LyaVoidType(LyaBaseType):
"""Class that represents a void type in lya.
"""
_name = "void"
_instance = None
@property
def memory_size(self):
return 0
class LyaIntType(LyaBaseType):
"""Class that represents an integer type in lya.
"""
_name = "int"
_unary_ops = {'+', '-'}
_binary_ops = {'+', '-', '*', '/', '%'}
_rel_ops = {'==', '!=', '>', '>=', '<', '<='}
_unary_opcodes = {
'-': NEG,
'+': NOP
}
_binary_opcodes = {
'+': ADD,
'-': SUB,
'*': MUL,
'/': DIV,
'%': MOD
}
_rel_opcodes = {
'==': EQU,
'!=': NEQ,
'<': LES,
'<=': LEQ,
'>': GRT,
'>=': GRE
}
_instance = None
class LyaBoolType(LyaBaseType):
"""Class that represents a boolean type in lya.
"""
_name = "bool"
_unary_ops = {'!'}
_binary_ops = {}
_rel_ops = {'&&', '||', '==', '!=', '<', '<=', '>', '>='}
_unary_opcodes = {
'!': NOT
}
_binary_opcodes = {}
_rel_opcodes = {
'&&': AND,
'||': LOR,
'==': EQU,
'!=': NEQ,
'<': LES,
'<=': LEQ,
'>': GRT,
'>=': GRE
}
_instance = None
class LyaCharType(LyaBaseType):
"""Class that represents a character type in lya.
"""
_name = "char"
_unary_ops = {}
_binary_ops = {}
_rel_ops = {'==', '!=', '>', '>=', '<', '<='}
_unary_opcodes = {}
_binary_opcodes = {}
_rel_opcodes = {}
_instance = None
class LyaRefType(LyaType):
"""Lya Type that references another LyaType.
"""
_name = "ref"
_unary_ops = {"->"}
_binary_ops = {}
_rel_ops = {"==", "!="}
_unary_opcodes = {"->": "ldr"}
_binary_opcodes = {}
_rel_opcodes = {"==": "equ",
"!=": "neq"}
def __init__(self, referenced_type: LyaType):
super().__init__()
self.referenced_type = referenced_type # type : LyaType
# TODO: Can assign <- other
@property
def name(self):
return "{0} {1}".format(self._name, self.referenced_type.name)
@property
def memory_size(self):
return self.referenced_type.memory_size
class LyaArrayType(LyaRefType):
"""Lya Type that represents an array.
"""
_name = "array"
_unary_ops = {}
_binary_ops = {}
_rel_ops = {}
_unary_opcodes = {}
_binary_opcodes = {}
_rel_opcodes = {}
# index_ranges = [(lower, upper)]
def __init__(self, reference_type: LyaType, index_ranges: list):
element_type = reference_type
index_range = index_ranges[0]
if len(index_ranges) > 1:
element_type = LyaArrayType(reference_type, index_ranges[1:])
index_range = index_ranges[0]
super().__init__(element_type)
self.index_range = index_range
self.length = index_range[1] - index_range[0] + 1
self._memory_size = self.length * self.referenced_type.memory_size
pass
@property
def memory_size(self):
return self._memory_size
def get_referenced_type(self, depth) -> LyaType:
if depth == 1:
return self.referenced_type
if not isinstance(self.referenced_type, LyaArrayType):
return None
else:
return self.referenced_type.get_referenced_type(depth-1)
class LyaStringType(LyaType):
"""Lya Type that represents a string.
"""
_name = "chars"
_unary_ops = {}
_binary_ops = {'+'}
_rel_ops = {'==', '!='}
_unary_opcodes = {}
_binary_opcodes = {}
_rel_opcodes = {}
def __init__(self, length: int):
super().__init__()
self.length = length
@property
def memory_size(self):
return self.length
class LyaTypeFactory(object):
"""Lya Types Factory.
"""
@staticmethod
def void_type() -> LyaVoidType:
return LyaVoidType.get_instance()
@staticmethod
def int_type() -> LyaIntType:
return LyaIntType.get_instance()
@staticmethod
def bool_type() -> LyaBoolType:
return LyaBoolType.get_instance()
@staticmethod
def char_type() -> LyaCharType:
return LyaCharType.get_instance()
@staticmethod
def ref_type(referenced_type: LyaType) -> LyaRefType:
return LyaRefType(referenced_type)
@staticmethod
def array_type(referenced_type: LyaType, index_ranges) -> LyaArrayType:
return LyaArrayType(referenced_type, index_ranges)
@staticmethod
def string_type(length) -> LyaStringType:
return LyaStringType(length)
@staticmethod
def base_type_from_string(name: str) -> LyaBaseType:
if name == LTF.int_type().name:
return LTF.int_type()
if name == LTF.bool_type().name:
return LTF.bool_type()
if name == LTF.char_type().name:
return LTF.char_type()
return None
LTF = LyaTypeFactory
# 6 - Visitar Exps
# 10 - Visitar Locs
# 11 - Visitar Assigns
# 12 - Visitar Slices
| 22.430678
| 75
| 0.569569
|
780de3b291b1ddcd03cb83970fc6cfcf297887e4
| 2,636
|
py
|
Python
|
Discussion1.py
|
ai-se/TCP2020
|
5480547ae3be025f7b0c7c15768be5105757fa98
|
[
"MIT"
] | 1
|
2020-08-11T15:16:08.000Z
|
2020-08-11T15:16:08.000Z
|
Discussion1.py
|
ai-se/TCP2020
|
5480547ae3be025f7b0c7c15768be5105757fa98
|
[
"MIT"
] | null | null | null |
Discussion1.py
|
ai-se/TCP2020
|
5480547ae3be025f7b0c7c15768be5105757fa98
|
[
"MIT"
] | null | null | null |
import pandas as pd
# import numpy as np
# import sys
# import statistics
def read_data(path):
data = pd.read_csv(path)
data = data.to_numpy()
return data
def main():
projects = ['TCT_deeplearning4j@deeplearning4j.csv', 'TCT_diaspora@diaspora.csv',
'TCT_eclipse@jetty-project.csv',
'TCT_facebook@presto.csv', 'TCT_galaxyproject@galaxy.csv', 'TCT_Graylog2@graylog2-server.csv',
'TCT_jruby@jruby.csv', 'TCT_languagetool-org@languagetool.csv',
'TCT_locomotivecms@engine.csv', 'TCT_loomio@loomio.csv', 'TCT_materialsproject@pymatgen.csv',
'TCT_mdanalysis@mdanalysis.csv', 'TCT_middleman@middleman.csv', 'TCT_nutzam@nutz.csv',
'TCT_ocpsoft@rewrite.csv',
'TCT_openforcefield@openforcefield.csv', 'TCT_openSUSE@open-build-service.csv', 'TCT_parsl@parsl.csv',
'TCT_puppetlabs@puppet.csv', 'TCT_radical-sybertools@radical.csv', 'TCT_rails@rails.csv',
'TCT_reactionMechanismGenerator@RMG-Py.csv', 'TCT_rspec@rspec-core.csv', 'TCT_spotify@luigi.csv',
'TCT_square@okhttp.csv', 'TCT_structr@structr.csv', 'TCT_thinkaurelius@titan.csv',
'TCT_unidata@metpy.csv',
'TCT_Unidata@thredds.csv', 'TCT_yt-project@yt.csv']
for index in range(len(projects)):
file_path = 'data/' + projects[index]
data = read_data(file_path)
print("-------------------------------")
print(projects[index])
max_fail = 0
min_fail = float('inf')
fail_number = []
for row in data:
temp_row = row[1:len(row)]
cur_failure = 0
for tc in temp_row:
if tc == "F":
cur_failure += 1
if cur_failure > max_fail:
max_fail = cur_failure
if cur_failure < min_fail:
min_fail = cur_failure
fail_number.append(cur_failure)
range_10 = int(len(fail_number) * 0.1)
range_30 = int(len(fail_number) * 0.3)
range_50 = int(len(fail_number) * 0.5)
range_70 = int(len(fail_number) * 0.7)
range_90 = int(len(fail_number) * 0.9)
print("10%: " + str(sorted(fail_number)[range_10]))
print("30%: " + str(sorted(fail_number)[range_30]))
print("50%: " + str(sorted(fail_number)[range_50]))
print("70%: " + str(sorted(fail_number)[range_70]))
print("90%: " + str(sorted(fail_number)[range_90]))
if __name__ == "__main__":
main()
| 37.657143
| 119
| 0.572838
|
ce1ffd4ff091528043e56856f054a84072e8dc2d
| 872
|
py
|
Python
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt4.Qwt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 3
|
2018-11-27T06:30:23.000Z
|
2021-05-30T15:56:32.000Z
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt4.Qwt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 1
|
2018-11-15T02:00:31.000Z
|
2021-12-06T02:20:32.000Z
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt4.Qwt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 1
|
2018-04-04T12:38:47.000Z
|
2018-04-04T12:38:47.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import eval_statement
hiddenimports = ["PyQt4.QtCore", "PyQt4.QtGui", "PyQt4.QtSvg"]
if eval_statement("from PyQt4 import Qwt5; print(hasattr(Qwt5, 'toNumpy'))"):
hiddenimports.append("numpy")
if eval_statement("from PyQt4 import Qwt5; print(hasattr(Qwt5, 'toNumeric'))"):
hiddenimports.append("Numeric")
if eval_statement("from PyQt4 import Qwt5; print(hasattr(Qwt5, 'toNumarray'))"):
hiddenimports.append("numarray")
| 41.52381
| 80
| 0.620413
|
d02adf38cf6c6216afbd122cc88e9799362a9f7d
| 36,516
|
py
|
Python
|
cea/demand/hourly_procedure_heating_cooling_system_load.py
|
architecture-building-systems/cea-toolbox
|
bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6
|
[
"MIT"
] | 121
|
2017-08-15T20:10:22.000Z
|
2022-03-24T01:25:42.000Z
|
cea/demand/hourly_procedure_heating_cooling_system_load.py
|
architecture-building-systems/cea-toolbox
|
bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6
|
[
"MIT"
] | 2,121
|
2017-07-27T12:02:01.000Z
|
2022-03-31T16:39:28.000Z
|
cea/demand/hourly_procedure_heating_cooling_system_load.py
|
architecture-building-systems/cea-toolbox
|
bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6
|
[
"MIT"
] | 42
|
2017-09-19T09:59:56.000Z
|
2022-02-19T20:19:56.000Z
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from cea.demand import airconditioning_model, rc_model_SIA, control_heating_cooling_systems, \
space_emission_systems, latent_loads, constants
__author__ = "Gabriel Happle"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Gabriel Happle"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "thomas@arch.ethz.ch"
__status__ = "Production"
# this is used in 'detailed_thermal_balance_to_tsd'
B_F = constants.B_F
def calc_heating_cooling_loads(bpr, tsd, t, config):
"""
:param bpr:
:param tsd:
:param t:
:return:
"""
# first check for season
if control_heating_cooling_systems.is_heating_season(t, bpr)\
and not control_heating_cooling_systems.is_cooling_season(t, bpr):
# +++++++++++++++++++++++++++++++++++++++++++
# HEATING
# +++++++++++++++++++++++++++++++++++++++++++
# check system
if not control_heating_cooling_systems.has_heating_system(bpr.hvac["class_hs"]) \
or not control_heating_cooling_systems.heating_system_is_active(tsd, t):
# no system = no loads
rc_model_temperatures = calc_rc_no_loads(bpr, tsd, t, config)
elif control_heating_cooling_systems.has_radiator_heating_system(bpr)\
or control_heating_cooling_systems.has_floor_heating_system(bpr):
# radiator or floor heating
rc_model_temperatures = calc_heat_loads_radiator(bpr, t, tsd, config)
tsd['Ehs_lat_aux'][t] = 0 # TODO
# elif has_local_ac_heating_system:
# TODO: here could be a heating system using the mini-split unit ("T5")
elif control_heating_cooling_systems.has_central_ac_heating_system(bpr):
rc_model_temperatures = calc_heat_loads_central_ac(bpr, t, tsd, config)
else:
# message and no heating system
warnings.warn('Unknown heating system. Calculation without system.')
# no system = no loads
rc_model_temperatures = calc_rc_no_loads(bpr, tsd, t, config)
# update tsd
update_tsd_no_cooling(tsd, t)
# for dashboard
detailed_thermal_balance_to_tsd(tsd, bpr, t, rc_model_temperatures)
elif control_heating_cooling_systems.is_cooling_season(t, bpr) \
and not control_heating_cooling_systems.is_heating_season(t, bpr):
# +++++++++++++++++++++++++++++++++++++++++++
# COOLING
# +++++++++++++++++++++++++++++++++++++++++++
# check system
if (not control_heating_cooling_systems.has_cooling_system(bpr.hvac["class_cs"])
or not control_heating_cooling_systems.cooling_system_is_active(bpr, tsd, t)):
# no system = no loads
rc_model_temperatures = calc_rc_no_loads(bpr, tsd, t, config)
elif control_heating_cooling_systems.has_local_ac_cooling_system(bpr):
rc_model_temperatures = calc_cool_loads_mini_split_ac(bpr, t, tsd, config)
elif control_heating_cooling_systems.has_central_ac_cooling_system(bpr):
rc_model_temperatures = calc_cool_loads_central_ac(bpr, t, tsd, config)
elif control_heating_cooling_systems.has_3for2_cooling_system(bpr):
rc_model_temperatures = calc_cool_loads_3for2(bpr, t, tsd, config)
elif control_heating_cooling_systems.has_ceiling_cooling_system(bpr) or \
control_heating_cooling_systems.has_floor_cooling_system(bpr):
rc_model_temperatures = calc_cool_loads_radiator(bpr, t, tsd, config)
else:
# message and no cooling system
warnings.warn('Unknown cooling system. Calculation without system.')
# no system = no loads
rc_model_temperatures = calc_rc_no_loads(bpr, tsd, t, config)
# update tsd
update_tsd_no_heating(tsd, t)
# for dashboard
detailed_thermal_balance_to_tsd(tsd, bpr, t, rc_model_temperatures)
else:
warnings.warn('Timestep %s not in heating season nor cooling season' % t)
calc_rc_no_loads(bpr, tsd, t, config)
return
def calc_heat_loads_radiator(bpr, t, tsd, config):
"""
Procedure for hourly heating system load calculation for a building with a radiative heating system.
Gabriel Happle, February 2018
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return:
"""
# (1) The RC-model gives the sensible energy demand for the hour
# calc rc model sensible demand
qh_sen_rc_demand, rc_model_temperatures = calc_rc_heating_demand(bpr=bpr, tsd=tsd, t=t, config=config)
# (2) A radiative system does not act on humidity
# no action on humidity
tsd['g_hu_ld'][t] = 0.0 # no humidification or dehumidification
tsd['g_dhu_ld'][t] = 0.0
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t) # moisture balance for zone
# (3) Results are passed to tsd
# write sensible loads to tsd
tsd['Qhs_sen_rc'][t] = qh_sen_rc_demand # demand is load
tsd['Qhs_sen_shu'][t] = qh_sen_rc_demand
tsd['Qhs_sen_ahu'][t] = 0.0
tsd['sys_status_ahu'][t] = 'no system'
tsd['Qhs_sen_aru'][t] = 0.0
tsd['sys_status_aru'][t] = 'no system'
tsd['Qhs_sen_sys'][t] = qh_sen_rc_demand # sum system loads
# write temperatures to rc-model
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
tsd['Qhs_lat_sys'][t] = 0.0
# mass flows to tsd
tsd['ma_sup_hs_ahu'][t] = 0.0
tsd['ta_sup_hs_ahu'][t] = np.nan
tsd['ta_re_hs_ahu'][t] = np.nan
tsd['ma_sup_hs_aru'][t] = 0.0
tsd['ta_sup_hs_aru'][t] = np.nan
tsd['ta_re_hs_aru'][t] = np.nan
# (4) Calculate emission losses and pass to tsd
# emission losses
q_em_ls_heating = space_emission_systems.calc_q_em_ls_heating(bpr, tsd, t)
tsd['Qhs_em_ls'][t] = q_em_ls_heating
# (5) System status to tsd
if qh_sen_rc_demand > 0.0:
tsd['sys_status_sen'][t] = 'On'
else:
tsd['sys_status_sen'][t] = 'Off'
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def calc_cool_loads_radiator(bpr, t, tsd, config):
"""
Procedure for hourly cooling system load calculation for a building with a radiative cooling system.
Gabriel Happle, February 2018
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return:
"""
# (1) The RC-model gives the sensible energy demand for the hour
# calc rc model sensible demand
qc_sen_rc_demand, rc_model_temperatures = calc_rc_cooling_demand(bpr, tsd, t, config)
# (2) A radiative system does not act on humidity
# no action on humidity
tsd['g_hu_ld'][t] = 0.0 # no humidification or dehumidification
tsd['g_dhu_ld'][t] = 0.0
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t) # moisture balance for zone
# (3) Results are passed to tsd
# write sensible loads to tsd
tsd['Qcs_sen_rc'][t] = qc_sen_rc_demand # demand is load
tsd['Qcs_sen_scu'][t] = qc_sen_rc_demand
tsd['Qcs_sen_ahu'][t] = 0.0
tsd['sys_status_ahu'][t] = 'no system'
tsd['Qcs_sen_aru'][t] = 0.0
tsd['sys_status_aru'][t] = 'no system'
tsd['Qcs_sen_sys'][t] = qc_sen_rc_demand # sum system loads
# write temperatures to rc-model
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
tsd['Qcs_lat_ahu'][t] = 0.0
tsd['Qcs_lat_aru'][t] = 0.0
tsd['Qcs_lat_sys'][t] = 0.0
# mass flows to tsd
tsd['ma_sup_cs_ahu'][t] = 0.0
tsd['ta_sup_cs_ahu'][t] = np.nan
tsd['ta_re_cs_ahu'][t] = np.nan
tsd['ma_sup_cs_aru'][t] = 0.0
tsd['ta_sup_cs_aru'][t] = np.nan
tsd['ta_re_cs_aru'][t] = np.nan
# (4) Calculate emission losses and pass to tsd
# emission losses
q_em_ls_cooling = space_emission_systems.calc_q_em_ls_cooling(bpr, tsd, t)
tsd['Qcs_em_ls'][t] = q_em_ls_cooling
# (5) System status to tsd
if qc_sen_rc_demand < 0.0:
tsd['sys_status_sen'][t] = 'On'
else:
tsd['sys_status_sen'][t] = 'Off'
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def calc_heat_loads_central_ac(bpr, t, tsd, config):
"""
Procedure for hourly heating system load calculation for a building with a central AC heating system.
Gabriel Happle, February 2018
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return:
"""
# (0) Extract values from tsd
# get values from tsd
m_ve_mech = tsd['m_ve_mech'][t]
t_ve_mech_after_hex = tsd['theta_ve_mech'][t]
x_ve_mech = tsd['x_ve_mech'][t]
t_int_prev = tsd['T_int'][t - 1]
ta_hs_set = tsd['ta_hs_set'][t]
# (1) The RC-model gives the sensible energy demand for the hour
# calc rc model sensible demand
qh_sen_rc_demand, rc_model_temperatures = calc_rc_heating_demand(bpr, tsd, t, config)
# (2) The load of the central AC unit is determined by the air mass flows and fixed supply temperature
# calc central ac unit load
system_loads_ahu = airconditioning_model.central_air_handling_unit_heating(m_ve_mech, t_ve_mech_after_hex,
x_ve_mech, ta_hs_set, t_int_prev, bpr)
qh_sen_central_ac_load = system_loads_ahu['qh_sen_ahu']
# (3) Check demand vs. central AC heating load
# check for over heating
if qh_sen_central_ac_load > qh_sen_rc_demand >= 0.0:
# case: over heating
qh_sen_aru = 0.0 # no additional heating via air recirculation unit
# update rc model temperatures
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_heating(qh_sen_central_ac_load, bpr, tsd, t,
config)
# ARU values to tsd
ma_sup_hs_aru = 0.0
ta_sup_hs_aru = np.nan
ta_re_hs_aru = np.nan
tsd['sys_status_aru'][t] = 'Off'
tsd['sys_status_ahu'][t] = 'On:over heating'
elif 0.0 <= qh_sen_central_ac_load < qh_sen_rc_demand:
# case: additional heating by air recirculation unit
qh_sen_aru = qh_sen_rc_demand - qh_sen_central_ac_load
# calc recirculation air mass flows
system_loads_aru = airconditioning_model.local_air_recirculation_unit_heating(qh_sen_aru, t_int_prev, bpr)
# update of rc model not necessary
ma_sup_hs_aru = system_loads_aru['ma_sup_hs_aru']
ta_sup_hs_aru = system_loads_aru['ta_sup_hs_aru']
ta_re_hs_aru = system_loads_aru['ta_re_hs_aru']
tsd['sys_status_aru'][t] = 'On'
# check status of ahu
if qh_sen_central_ac_load > 0.0:
tsd['sys_status_ahu'][t] = 'On'
elif qh_sen_central_ac_load == 0.0:
tsd['sys_status_ahu'][t] = 'Off'
# this state happens during sensible demand but zero mechanical ventilation air flow
# (= sufficient infiltration)
elif 0.0 == qh_sen_central_ac_load == qh_sen_rc_demand:
# everything off
qh_sen_aru = 0.0
ma_sup_hs_aru = 0.0
ta_sup_hs_aru = np.nan
ta_re_hs_aru = np.nan
tsd['sys_status_aru'][t] = 'Off'
tsd['sys_status_ahu'][t] = 'Off'
else:
raise Exception("Something went wrong in the central AC heating load calculation.")
# act on humidity
tsd['T_int'][t] = rc_model_temperatures['T_int'] # humidification load needs zone temperature
g_hu_ld = latent_loads.calc_humidification_moisture_load(bpr, tsd, t) # calc local humidification load
tsd['Ehs_lat_aux'][t] = airconditioning_model.electric_humidification_unit(g_hu_ld, m_ve_mech) # calc electricity of humidification unit
tsd['g_hu_ld'][t] = g_hu_ld # humidification
tsd['g_dhu_ld'][t] = 0.0 # no dehumidification
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t) # calculate moisture in zone
# write sensible loads to tsd
tsd['Qhs_sen_rc'][t] = qh_sen_rc_demand
tsd['Qhs_sen_shu'][t] = 0.0
tsd['sys_status_sen'][t] = 'no system'
tsd['Qhs_sen_ahu'][t] = qh_sen_central_ac_load
tsd['Qhs_sen_aru'][t] = qh_sen_aru
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
tsd['Qhs_sen_sys'][t] = qh_sen_central_ac_load + qh_sen_aru # sum system loads
tsd['Qhs_lat_sys'][t] = 0.0
# mass flows to tsd
tsd['ma_sup_hs_ahu'][t] = system_loads_ahu['ma_sup_hs_ahu']
tsd['ta_sup_hs_ahu'][t] = system_loads_ahu['ta_sup_hs_ahu']
tsd['ta_re_hs_ahu'][t] = system_loads_ahu['ta_re_hs_ahu']
tsd['ma_sup_hs_aru'][t] = ma_sup_hs_aru
tsd['ta_sup_hs_aru'][t] = ta_sup_hs_aru
tsd['ta_re_hs_aru'][t] = ta_re_hs_aru
# emission losses
q_em_ls_heating = space_emission_systems.calc_q_em_ls_heating(bpr, tsd, t)
tsd['Qhs_em_ls'][t] = q_em_ls_heating
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def calc_cool_loads_mini_split_ac(bpr, t, tsd, config):
"""
Calculation procedure for cooling system loads of an ARU subsystem of a mini-split AC system
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return:
"""
# (0) Extract values from tsd
# get values from tsd
t_int_prev = tsd['T_int'][t - 1]
x_int_prev = tsd['x_int'][t - 1]
# (1) The RC-model gives the sensible energy demand for the hour
# calculate rc model demand
qc_sen_rc_demand, rc_model_temperatures = calc_rc_cooling_demand(bpr, tsd, t, config)
# (2) The demand is system load of air recirculation unit (ARU)
qc_sen_aru = qc_sen_rc_demand
# "uncontrolled" dehumidification by air recirculation unit
g_dhu_demand_aru = 0.0 # no demand that controls the unit
aru_system_loads = airconditioning_model.local_air_recirculation_unit_cooling(qc_sen_aru, g_dhu_demand_aru,
t_int_prev,
x_int_prev, bpr, t_control=True,
x_control=False)
g_dhu_aru = aru_system_loads['g_dhu_aru']
qc_lat_aru = aru_system_loads['qc_lat_aru']
# action on moisture
tsd['g_hu_ld'][t] = 0.0 # no humidification
tsd['g_dhu_ld'][t] = g_dhu_aru
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t)
# () Values to tsd
tsd['Qcs_sen_rc'][t] = qc_sen_rc_demand
tsd['Qcs_sen_ahu'][t] = 0.0
tsd['Qcs_sen_aru'][t] = qc_sen_aru
tsd['Qcs_sen_scu'][t] = 0.0 # not present in this system
tsd['Qcs_lat_ahu'][t] = 0.0
tsd['Qcs_lat_aru'][t] = qc_lat_aru
tsd['Qcs_sen_sys'][t] = qc_sen_aru # sum system loads
tsd['Qcs_lat_sys'][t] = qc_lat_aru
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
# air flow
tsd['m_ve_rec'][t] = aru_system_loads['ma_sup_cs_aru']
# mass flows to tsd
tsd['ma_sup_cs_ahu'][t] = 0.0
tsd['ta_sup_cs_ahu'][t] = np.nan
tsd['ta_re_cs_ahu'][t] = np.nan
tsd['ma_sup_cs_aru'][t] = aru_system_loads['ma_sup_cs_aru']
tsd['ta_sup_cs_aru'][t] = aru_system_loads['ta_sup_cs_aru']
tsd['ta_re_cs_aru'][t] = aru_system_loads['ta_re_cs_aru']
# () emission losses
q_em_ls_cooling = space_emission_systems.calc_q_em_ls_cooling(bpr, tsd, t)
tsd['Qcs_em_ls'][t] = q_em_ls_cooling
# system status
tsd['sys_status_aru'][t] = 'On:T'
tsd['sys_status_ahu'][t] = 'no system'
tsd['sys_status_sen'][t] = 'no system'
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def calc_cool_loads_central_ac(bpr, t, tsd, config):
"""
Calculation procedure for cooling system loads of AHU and ARU subsystems of a central AC system
Gabriel Happle, Feb. 2018
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return:
"""
# get values from tsd
m_ve_mech = tsd['m_ve_mech'][t]
t_ve_mech_after_hex = tsd['theta_ve_mech'][t]
x_ve_mech = tsd['x_ve_mech'][t]
t_int_prev = tsd['T_int'][t - 1]
x_int_prev = tsd['x_int'][t - 1]
# ***
# RC MODEL
# ***
# calculate rc model demand
qc_sen_rc_demand, rc_model_temperatures = calc_rc_cooling_demand(bpr=bpr, tsd=tsd, t=t, config=config)
# ***
# AHU
# ***
# calculate ahu loads
loads_ahu = airconditioning_model.central_air_handling_unit_cooling(m_ve_mech, t_ve_mech_after_hex, x_ve_mech, bpr)
qc_sen_ahu = loads_ahu['qc_sen_ahu']
qc_lat_ahu = loads_ahu['qc_lat_ahu']
tsd['x_ve_mech'][t] = loads_ahu['x_sup_c_ahu'] # update tsd['x_ve_mech'] is needed for dehumidification
# load calculation
# ***
# ARU
# ***
# calculate recirculation unit dehumidification demand
# NOTE: here we might make some error, as we calculate the moisture set point for the
# uncorrected zone air temperature (i.e. no over cooling)
tsd['T_int'][t] = rc_model_temperatures['T_int'] # dehumidification load needs zone temperature
g_dhu_demand_aru = latent_loads.calc_dehumidification_moisture_load(bpr, tsd, t)
# calculate remaining sensible demand to be attained by aru
qc_sen_demand_aru = np.min([0.0, qc_sen_rc_demand - qc_sen_ahu])
# calculate ARU system loads with T and x control activated
aru_system_loads = airconditioning_model.local_air_recirculation_unit_cooling(qc_sen_demand_aru, g_dhu_demand_aru,
t_int_prev, x_int_prev, bpr,
t_control=True, x_control=True)
g_dhu_aru = aru_system_loads['g_dhu_aru']
qc_lat_aru = aru_system_loads['qc_lat_aru']
qc_sen_aru = aru_system_loads['qc_sen_aru']
# ***
# ADJUST RC MODEL TEMPERATURE
# ***
# TODO: check if it is smaller, something went wrong in the calculation
qc_sen_total = qc_sen_ahu + qc_sen_aru
# update rc model temperatures
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_cooling(qc_sen_total, bpr, tsd, t, config)
# ***
# ZONE MOISTURE
# ***
# action on moisture
tsd['g_hu_ld'][t] = 0.0 # no humidification
tsd['g_dhu_ld'][t] = g_dhu_aru
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t)
# write to tsd
tsd['Qcs_sen_rc'][t] = qc_sen_rc_demand
tsd['Qcs_sen_ahu'][t] = qc_sen_ahu
tsd['Qcs_sen_aru'][t] = qc_sen_aru
tsd['Qcs_sen_scu'][t] = 0.0 # not present in this system
tsd['Qcs_lat_ahu'][t] = qc_lat_ahu
tsd['Qcs_lat_aru'][t] = qc_lat_aru
tsd['Qcs_sen_sys'][t] = qc_sen_ahu + qc_sen_aru # sum system loads
tsd['Qcs_lat_sys'][t] = qc_lat_ahu + qc_lat_aru
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
# air flow
tsd['m_ve_rec'][t] = aru_system_loads['ma_sup_cs_aru']
# mass flows to tsd
tsd['ma_sup_cs_ahu'][t] = loads_ahu['ma_sup_cs_ahu']
tsd['ta_sup_cs_ahu'][t] = loads_ahu['ta_sup_cs_ahu']
tsd['ta_re_cs_ahu'][t] = loads_ahu['ta_re_cs_ahu']
tsd['ma_sup_cs_aru'][t] = aru_system_loads['ma_sup_cs_aru']
tsd['ta_sup_cs_aru'][t] = aru_system_loads['ta_sup_cs_aru']
tsd['ta_re_cs_aru'][t] = aru_system_loads['ta_re_cs_aru']
# ***
# emission losses
# ***
# emission losses on total sensible load
# TODO: check
q_em_ls_cooling = space_emission_systems.calc_q_em_ls_cooling(bpr, tsd, t)
tsd['Qcs_em_ls'][t] = q_em_ls_cooling
# system status
tsd['sys_status_ahu'][t] = 'On'
tsd['sys_status_aru'][t] = 'On:T/R'
tsd['sys_status_sen'][t] = 'no system'
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def calc_cool_loads_3for2(bpr, t, tsd, config):
"""
Calculation procedure for cooling system loads of AHU, ARU and SCU subsystems of 3for2 system
Gabriel Happle, Feb. 2018
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param tsd: time series data dict
:return: dict of rc_model_temperatures
"""
# get values from tsd
m_ve_mech = tsd['m_ve_mech'][t]
t_ve_mech_after_hex = tsd['theta_ve_mech'][t]
x_ve_mech = tsd['x_ve_mech'][t]
t_int_prev = tsd['T_int'][t - 1]
x_int_prev = tsd['x_int'][t - 1]
# ***
# RC MODEL
# ***
# calculate rc model demand
qc_sen_rc_demand, rc_model_temperatures = calc_rc_cooling_demand(bpr=bpr, tsd=tsd, t=t, config=config)
# ***
# AHU
# ***
# calculate ahu loads
ahu_loads = airconditioning_model.central_air_handling_unit_cooling(m_ve_mech, t_ve_mech_after_hex, x_ve_mech, bpr)
qc_sen_ahu = ahu_loads['qc_sen_ahu']
qc_lat_ahu = ahu_loads['qc_lat_ahu']
tsd['x_ve_mech'][t] = ahu_loads['x_sup_c_ahu']
# ***
# ARU
# ***
# calculate recirculation unit dehumidification demand
tsd['T_int'][t] = rc_model_temperatures['T_int'] # dehumidification load needs zone temperature
# NOTE: here we might make some error, as we calculate the moisture set point for the
# uncorrected zone air temperature (i.e. no over cooling)
g_dhu_demand_aru = latent_loads.calc_dehumidification_moisture_load(bpr, tsd, t)
# no sensible demand that controls the ARU
qc_sen_demand_aru = 0.0
# calculate ARU system loads with T and x control activated
aru_system_loads = airconditioning_model.local_air_recirculation_unit_cooling(qc_sen_demand_aru, g_dhu_demand_aru,
t_int_prev, x_int_prev, bpr,
t_control=False, x_control=True)
g_dhu_aru = aru_system_loads['g_dhu_aru']
qc_lat_aru = aru_system_loads['qc_lat_aru']
qc_sen_aru = aru_system_loads['qc_sen_aru']
# ***
# SCU
# ***
# calculate remaining sensible cooling demand to be met by radiative cooling
qc_sen_demand_scu = np.min([0.0, qc_sen_rc_demand - qc_sen_ahu - qc_sen_aru])
# demand is load
qc_sen_scu = qc_sen_demand_scu
# ***
# ADJUST RC MODEL TEMPERATURE
# ***
# TODO: check, if it is smaller something went wrong in the calculation
qc_sen_total = qc_sen_ahu + qc_sen_aru + qc_sen_scu
# update rc model temperatures
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_cooling(qc_sen_total, bpr, tsd, t, config)
# ***
# ZONE MOISTURE
# ***
# action on moisture
tsd['g_hu_ld'][t] = 0.0 # no humidification
tsd['g_dhu_ld'][t] = g_dhu_aru
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t)
# ***
# emission losses
# ***
# emission losses on total sensible load
# TODO: check
# write to tsd
tsd['Qcs_sen_rc'][t] = qc_sen_rc_demand
tsd['Qcs_sen_ahu'][t] = qc_sen_ahu
tsd['Qcs_sen_aru'][t] = qc_sen_aru
tsd['Qcs_sen_scu'][t] = qc_sen_scu
tsd['Qcs_lat_ahu'][t] = qc_lat_ahu
tsd['Qcs_lat_aru'][t] = qc_lat_aru
rc_temperatures_to_tsd(rc_model_temperatures, tsd, t)
tsd['Qcs_sen_sys'][t] = qc_sen_ahu + qc_sen_aru + qc_sen_scu # sum system loads
tsd['Qcs_lat_sys'][t] = qc_lat_ahu + qc_lat_aru
# air flow
tsd['m_ve_rec'][t] = aru_system_loads['ma_sup_cs_aru']
# mass flows to tsd
tsd['ma_sup_cs_ahu'][t] = ahu_loads['ma_sup_cs_ahu']
tsd['ta_sup_cs_ahu'][t] = ahu_loads['ta_sup_cs_ahu']
tsd['ta_re_cs_ahu'][t] = ahu_loads['ta_re_cs_ahu']
tsd['ma_sup_cs_aru'][t] = aru_system_loads['ma_sup_cs_aru']
tsd['ta_sup_cs_aru'][t] = aru_system_loads['ta_sup_cs_aru']
tsd['ta_re_cs_aru'][t] = aru_system_loads['ta_re_cs_aru']
q_em_ls_cooling = space_emission_systems.calc_q_em_ls_cooling(bpr, tsd, t)
tsd['Qcs_em_ls'][t] = q_em_ls_cooling
# system status
tsd['sys_status_ahu'][t] = 'On'
tsd['sys_status_aru'][t] = 'On:R'
tsd['sys_status_sen'][t] = 'On'
# the return is only for the input into the detailed thermal reverse calculations for the dashboard graphs
return rc_model_temperatures
def rc_temperatures_to_tsd(rc_model_temperatures, tsd, t):
tsd['T_int'][t] = rc_model_temperatures['T_int']
tsd['theta_m'][t] = rc_model_temperatures['theta_m']
tsd['theta_c'][t] = rc_model_temperatures['theta_c']
tsd['theta_o'][t] = rc_model_temperatures['theta_o']
def update_tsd_no_heating(tsd, t):
"""
updates NaN values in tsd for case of no heating demand
Author: Gabriel Happle
Date: 01/2017
:param tsd: time series data dict
:param t: time step / hour of year [0..8760]
:return: updates tsd values
"""
# no sensible loads
tsd['Qhs_sen_rc'][t] = 0.0
tsd['Qhs_sen_shu'][t] = 0.0
tsd['Qhs_sen_aru'][t] = 0.0
tsd['Qhs_sen_ahu'][t] = 0.0
# no latent loads
tsd['Qhs_lat_aru'][t] = 0.0
tsd['Qhs_lat_ahu'][t] = 0.0
tsd['Qhs_sen_sys'][t] = 0.0
tsd['Qhs_lat_sys'][t] = 0.0
tsd['Qhs_em_ls'][t] = 0.0
tsd['Ehs_lat_aux'][t] = 0.0
# mass flows to tsd
tsd['ma_sup_hs_ahu'][t] = 0.0
tsd['ta_sup_hs_ahu'][t] = np.nan
tsd['ta_re_hs_ahu'][t] = np.nan
tsd['ma_sup_hs_aru'][t] = 0.0
tsd['ta_sup_hs_aru'][t] = np.nan
tsd['ta_re_hs_aru'][t] = np.nan
return
def update_tsd_no_cooling(tsd, t):
"""
updates NaN values in tsd for case of no cooling demand
Author: Gabriel Happle
Date: 01/2017
:param tsd: time series data dict
:param t: time step / hour of year [0..8760]
:return: updates tsd values
"""
# no sensible loads
tsd['Qcs_sen_rc'][t] = 0.0
tsd['Qcs_sen_scu'][t] = 0.0
tsd['Qcs_sen_aru'][t] = 0.0
tsd['Qcs_sen_ahu'][t] = 0.0
# no latent loads
tsd['Qcs_lat_aru'][t] = 0.0
tsd['Qcs_lat_ahu'][t] = 0.0
# no losses
tsd['Qcs_sen_sys'][t] = 0.0
tsd['Qcs_lat_sys'][t] = 0.0
tsd['Qcs_em_ls'][t] = 0.0
# mass flows to tsd
tsd['ma_sup_cs_ahu'][t] = 0.0
tsd['ta_sup_cs_ahu'][t] = np.nan
tsd['ta_re_cs_ahu'][t] = np.nan
tsd['ma_sup_cs_aru'][t] = 0.0
tsd['ta_sup_cs_aru'][t] = np.nan
tsd['ta_re_cs_aru'][t] = np.nan
return
def detailed_thermal_balance_to_tsd(tsd, bpr, t, rc_model_temperatures):
"""
Back calculate energy flows in RC model for dashboard of energy balance visualization
:param tsd: time series data dict
:param bpr: building properties row object
:param t: time step / hour of year [0..8760]
:param rc_model_temperatures: dict of rc model temperatures
:return: None
"""
# internal gains from lights
tsd['Q_gain_sen_light'][t] = rc_model_SIA.calc_phi_i_l(tsd['El'][t])
# internal gains from appliances, data centres and losses from refrigeration
tsd['Q_gain_sen_app'][t] = (rc_model_SIA.calc_phi_i_a(tsd['Ea'][t], tsd['Epro'][t]) - 0.9*tsd['Epro'][t])/0.9
tsd['Q_gain_sen_pro'][t] = tsd['Epro'][t]
tsd['Q_gain_sen_data'][t] = tsd['Qcdata_sys'][t]
tsd['Q_loss_sen_ref'] = -tsd['Qcre_sys'][t]
# internal gains from people
tsd['Q_gain_sen_peop'][t] = rc_model_SIA.calc_phi_i_p(tsd['Qs'][t])
# losses / gains from ventilation
# tsd['']
# extract detailed rc model intermediate results
h_em = rc_model_temperatures['h_em']
h_op_m = rc_model_temperatures['h_op_m']
theta_m = rc_model_temperatures['theta_m']
theta_em = rc_model_temperatures['theta_em']
h_ec = rc_model_temperatures['h_ec']
theta_c = rc_model_temperatures['theta_c']
theta_ec = rc_model_temperatures['theta_ec']
h_ea = rc_model_temperatures['h_ea']
T_int = rc_model_temperatures['T_int']
theta_ea = rc_model_temperatures['theta_ea']
# backwards calculate individual heat transfer coefficient
h_wall_em = h_em * bpr.rc_model['Awall_ag'] * bpr.rc_model['U_wall'] / h_op_m
h_base_em = h_em * bpr.rc_model['Aop_bg'] * B_F * bpr.rc_model['U_base'] / h_op_m
h_roof_em = h_em * bpr.rc_model['Aroof'] * bpr.rc_model['U_roof'] / h_op_m
# calculate heat fluxes between mass and outside through opaque elements
tsd['Q_gain_sen_wall'][t] = h_wall_em * (theta_em - theta_m)
tsd['Q_gain_sen_base'][t] = h_base_em * (theta_em - theta_m)
tsd['Q_gain_sen_roof'][t] = h_roof_em * (theta_em - theta_m)
# calculate heat fluxes between central and outside through windows
tsd['Q_gain_sen_wind'][t] = h_ec * (theta_ec - theta_c)
# calculate heat between outside and inside air through ventilation
tsd['Q_gain_sen_vent'][t] = h_ea * (theta_ea - T_int)
return
def calc_rc_no_loads(bpr, tsd, t, config):
"""
Crank-Nicholson Procedure to calculate heating / cooling demand of buildings
following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
/ Korrigenda C2 zum Mekblatt SIA 2044:2011
Special procedures for updating ventilation air AC-heated and AC-cooled buildings
Author: Gabriel Happle
Date: 01/2017
:param bpr: building properties row object
:param tsd: time series data dict
:param t: time step / hour of year [0..8760]
:return: dict of rc_model_temperatures
"""
# following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
# / Korrigenda C2 zum Mekblatt SIA 2044:2011
# STEP 1
# ******
# calculate temperatures
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_no_heating_cooling(bpr, tsd, t, config)
# calculate humidity
tsd['g_hu_ld'][t] = 0.0 # no humidification or dehumidification
tsd['g_dhu_ld'][t] = 0.0
latent_loads.calc_moisture_content_in_zone_local(bpr, tsd, t)
# write to tsd
tsd['T_int'][t] = rc_model_temperatures['T_int']
tsd['theta_m'][t] = rc_model_temperatures['theta_m']
tsd['theta_c'][t] = rc_model_temperatures['theta_c']
tsd['theta_o'][t] = rc_model_temperatures['theta_o']
update_tsd_no_cooling(tsd, t)
update_tsd_no_heating(tsd, t)
tsd['sys_status_ahu'][t] = 'system off'
tsd['sys_status_aru'][t] = 'system off'
tsd['sys_status_sen'][t] = 'system off'
return rc_model_temperatures
def calc_rc_heating_demand(bpr, tsd, t, config):
"""
Crank-Nicholson Procedure to calculate heating / cooling demand of buildings
following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
/ Korrigenda C2 zum Mekblatt SIA 2044:2011
Special procedures for updating ventilation air AC-heated and AC-cooled buildings
Author: Gabriel Happle
Date: 01/2017
:param bpr: building properties row object
:param tsd: time series data dict
:param t: time step / hour of year [0..8760]
:return: phi_h_act, rc_model_temperatures
"""
# following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
# / Korrigenda C2 zum Mekblatt SIA 2044:2011
# STEP 1
# ******
# calculate temperatures with 0 heating power
rc_model_temperatures_0 = rc_model_SIA.calc_rc_model_temperatures_no_heating_cooling(bpr, tsd, t, config)
t_int_0 = rc_model_temperatures_0['T_int']
# CHECK FOR DEMAND
if not rc_model_SIA.has_sensible_heating_demand(t_int_0, tsd, t):
# return zero demand
rc_model_temperatures = rc_model_temperatures_0
phi_h_act = 0.0
elif rc_model_SIA.has_sensible_heating_demand(t_int_0, tsd, t):
# continue
# STEP 2
# ******
# calculate temperatures with 10 W/m2 heating power
phi_hc_10 = 10.0 * bpr.rc_model['Af']
rc_model_temperatures_10 = rc_model_SIA.calc_rc_model_temperatures_heating(phi_hc_10, bpr, tsd, t, config)
t_int_10 = rc_model_temperatures_10['T_int']
t_int_set = tsd['ta_hs_set'][t]
# interpolate heating power
# (64) in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011 / Korrigenda C2 zum Mekblatt SIA 2044:2011
phi_hc_ul = phi_hc_10 * (t_int_set - t_int_0) / (t_int_10 - t_int_0)
# STEP 3
# ******
# check if available power is sufficient
phi_h_max = bpr.hvac['Qhsmax_Wm2'] * bpr.rc_model['Af']
if 0.0 < phi_hc_ul <= phi_h_max:
# case heating with phi_hc_ul
# calculate temperatures with this power
phi_h_act = phi_hc_ul
elif 0.0 < phi_hc_ul > phi_h_max:
# case heating with max power available
# calculate temperatures with this power
phi_h_act = phi_h_max
else:
raise Exception("Unexpected status in 'calc_rc_heating_demand'")
# STEP 4
# ******
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_heating(phi_h_act, bpr, tsd, t, config)
else:
raise Exception("Unexpected status in 'calc_rc_heating_demand'")
return phi_h_act, rc_model_temperatures
def calc_rc_cooling_demand(bpr, tsd, t, config):
"""
Crank-Nicholson Procedure to calculate heating / cooling demand of buildings
following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
/ Korrigenda C2 zum Mekblatt SIA 2044:2011
Special procedures for updating ventilation air AC-heated and AC-cooled buildings
Author: Gabriel Happle
Date: 01/2017
:param bpr: building properties row object
:param tsd: time series data dict
:param t: time step / hour of year [0..8760]
:return: phi_c_act, rc_model_temperatures
"""
# following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011
# / Korrigenda C2 zum Mekblatt SIA 2044:2011
# ++++++++++++++++
# CASE 2 - COOLING
# ++++++++++++++++
# case for cooling
# tsd['system_status'][t] = 'Radiative cooling'
# STEP 1
# ******
# calculate temperatures with 0 heating power
rc_model_temperatures_0 = rc_model_SIA.calc_rc_model_temperatures_no_heating_cooling(bpr, tsd, t, config)
t_int_0 = rc_model_temperatures_0['T_int']
# CHECK FOR DEMAND
if not rc_model_SIA.has_sensible_cooling_demand(t_int_0, tsd, t):
# return zero demand
rc_model_temperatures = rc_model_temperatures_0
phi_c_act = 0.0
elif rc_model_SIA.has_sensible_cooling_demand(t_int_0, tsd, t):
# continue
# STEP 2
# ******
# calculate temperatures with 10 W/m2 cooling power
phi_hc_10 = 10.0 * bpr.rc_model['Af']
rc_model_temperatures_10 = rc_model_SIA.calc_rc_model_temperatures_cooling(phi_hc_10, bpr, tsd, t, config)
t_int_10 = rc_model_temperatures_10['T_int']
t_int_set = tsd['ta_cs_set'][t]
# interpolate heating power
# (64) in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011 / Korrigenda C2 zum Mekblatt SIA 2044:2011
phi_hc_ul = phi_hc_10 * (t_int_set - t_int_0) / (t_int_10 - t_int_0)
# STEP 3
# ******
# check if available power is sufficient
phi_c_max = -bpr.hvac['Qcsmax_Wm2'] * bpr.rc_model['Af']
if 0.0 > phi_hc_ul >= phi_c_max:
# case heating with phi_hc_ul
# calculate temperatures with this power
phi_c_act = phi_hc_ul
elif 0.0 > phi_hc_ul < phi_c_max:
# case heating with max power available
# calculate temperatures with this power
phi_c_act = phi_c_max
else:
raise Exception("Unexpected status in 'calc_rc_cooling_demand'")
# STEP 4
# ******
rc_model_temperatures = rc_model_SIA.calc_rc_model_temperatures_cooling(phi_c_act, bpr, tsd, t, config)
else:
raise Exception("Unexpected status in 'calc_rc_cooling_demand'")
return phi_c_act, rc_model_temperatures
| 36.922144
| 141
| 0.658177
|
06d4c54edea895edecbaa413d3495c8273bff956
| 3,150
|
py
|
Python
|
ginjinn/commandline/commandline_app/commandline_app.py
|
AGOberprieler/GinJinn2
|
527feac125f476165e332277823c11016565f99d
|
[
"Apache-2.0"
] | 17
|
2021-07-24T20:55:58.000Z
|
2022-02-09T05:15:04.000Z
|
ginjinn/commandline/commandline_app/commandline_app.py
|
AGOberprieler/GinJinn2
|
527feac125f476165e332277823c11016565f99d
|
[
"Apache-2.0"
] | 1
|
2021-12-21T06:33:56.000Z
|
2022-02-05T13:57:53.000Z
|
ginjinn/commandline/commandline_app/commandline_app.py
|
AGOberprieler/GinJinn2
|
527feac125f476165e332277823c11016565f99d
|
[
"Apache-2.0"
] | null | null | null |
''' GinJinn commandline application code.
'''
from ginjinn.commandline.argument_parser import GinjinnArgumentParser
class GinjinnCommandlineApplication:
'''GinjinnCommandlineApplication
GinJinn commandline application.
'''
def __init__(self):
self.parser = GinjinnArgumentParser()
self.args = None
def run(self, args=None, namespace=None):
'''run
Start GinJinn commandline application.
Parameters
----------
args
List of strings to parse. If None, the strings are taken from sys.argv.
namespace
An object to take the attributes. The default is a new empty argparse Namespace object.
'''
self.args = self.parser.parse_args(args=args, namespace=namespace)
# print(self.args)
if self.args.subcommand == 'new':
self._run_new()
elif self.args.subcommand == 'split':
self._run_split()
elif self.args.subcommand == 'simulate':
self._run_simulate()
elif self.args.subcommand == 'train':
self._run_train()
elif self.args.subcommand == 'utils':
self._run_utils()
elif self.args.subcommand in ['evaluate', 'eval']:
self._run_evaluate()
elif self.args.subcommand == 'predict':
self._run_predict()
elif self.args.subcommand == 'info':
self._run_info()
elif self.args.subcommand in ['visualize', 'vis']:
self._run_visualize()
def _run_split(self):
'''_run_split
Run the GinJinn split command.
'''
from .split import ginjinn_split
ginjinn_split(self.args)
def _run_simulate(self):
'''_run_simulate
Run the GinJinn simulate command.
'''
from .simulate import ginjinn_simulate
ginjinn_simulate(self.args)
def _run_new(self):
'''_run_new
Run the GinJinn new command.
'''
from .new import ginjinn_new
ginjinn_new(self.args)
def _run_train(self):
'''_run_train
Run the GinJinn train command.
'''
from .train import ginjinn_train
ginjinn_train(self.args)
def _run_utils(self):
'''_run_utils
Run the GinJinn utils command.
'''
from .utils import ginjinn_utils
ginjinn_utils(self.args)
def _run_evaluate(self):
'''_run_evaluate
Run the GinJinn evaluate command.
'''
from .evaluate import ginjinn_evaluate
ginjinn_evaluate(self.args)
def _run_predict(self):
'''_run_predict
Run the GinJinn predict command.
'''
from .predict import ginjinn_predict
ginjinn_predict(self.args)
def _run_info(self):
'''_run_info
Run the GinJinn info command.
'''
from .info import ginjinn_info
ginjinn_info(self.args)
def _run_visualize(self):
'''_run_visualize
Run the GinJinn visualize command.
'''
from .visualize import ginjinn_visualize
ginjinn_visualize(self.args)
| 28.636364
| 99
| 0.6
|
1a2fd4e0b8c7ae7228d7768503d24b8a68d0a440
| 2,581
|
py
|
Python
|
lib/galaxy_test/selenium/test_tool_describing_tours.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,085
|
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
lib/galaxy_test/selenium/test_tool_describing_tours.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 11,253
|
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
lib/galaxy_test/selenium/test_tool_describing_tours.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1,000
|
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
import unittest
from .framework import selenium_test, SeleniumTestCase
class ToolDescribingToursTestCase(SeleniumTestCase):
def setUp(self):
super().setUp()
self.home()
@selenium_test
def test_generate_tour_no_data(self):
"""Ensure a tour without data is generated and pops up."""
self._ensure_tdt_available()
self.tool_open('environment_variables')
self.tool_form_generate_tour()
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "environment_variables Tour", title
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(1)
@selenium_test
def test_generate_tour_with_data(self):
"""Ensure a tour with data populates history."""
self._ensure_tdt_available()
self.tool_open('md5sum')
self.tool_form_generate_tour()
self.history_panel_wait_for_hid_ok(1)
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "md5sum Tour", title
self.screenshot("tool_describing_tour_0_start")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
text = popover_component.content.wait_for_visible().text
assert "Select dataset" in text, text
self.screenshot("tool_describing_tour_1_select")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
title = popover_component.title.wait_for_visible().text
assert title == "Execute tool"
self.screenshot("tool_describing_tour_2_execute")
popover_component.end.wait_for_and_click()
popover_component.wait_for_absent_or_hidden()
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(2)
self.screenshot("tool_describing_tour_3_after_execute")
def _ensure_tdt_available(self):
""" Skip a test if the webhook TDT doesn't appear. """
response = self.api_get('webhooks', raw=True)
self.assertEqual(response.status_code, 200)
data = response.json()
webhooks = [x['id'] for x in data]
if 'tour_generator' not in webhooks:
raise unittest.SkipTest('Skipping test, webhook "Tool-Describing-Tours" doesn\'t appear to be configured.')
| 31.864198
| 119
| 0.689655
|
bf84b59b9801dc008cb263c600a8af734fd57bf0
| 3,671
|
py
|
Python
|
tests/bugs/core_0297_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_0297_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_0297_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_0297
# title: bug #585624 IB server stalled by simple script
# decription:
# ::: NB :::
# ### Name of original test has no any relation with actual task of this test: ###
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_23.script
#
# Issue in original script: bug #585624 IB server stalled by simple script"
# Found in FB tracker as: http://tracker.firebirdsql.org/browse/CORE-297
# Fixed in 1.5.0
#
# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC.
#
# tracker_id:
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """
set term ^;
create procedure group_copy (
source integer,
destination integer)
as
begin
exit;
end^
create procedure insert_values (
cont integer,
d_group integer)
as
begin
exit;
end^
set term ;^
create table groups (
gr_id integer not null,
gr_name varchar(40) character set iso8859_1 not null
collate de_de
);
create table test (
id integer not null,
t_group integer not null
);
alter table groups add constraint pk_groups primary key (gr_id);
alter table test add constraint pk_test primary key (id, t_group);
alter table test add constraint fk_test foreign key (t_group) references groups (gr_id);
set term ^;
alter procedure group_copy (
source integer,
destination integer)
as
begin
insert into test( id, t_group )
select a.id, :destination
from test a
where a.t_group = :source
and not exists (
select * from test b
where b.id = a.id
and :destination = b.t_group
);
end
^
alter procedure insert_values (
cont integer,
d_group integer)
as
declare anz integer;
begin
anz = 0;
while ( anz < cont ) do
begin
if ( not exists (
select id
from test where id = :anz
and t_group = :d_group
)
) then
insert into test( id, t_group ) values( :anz, :d_group );
anz = anz +1;
end
end
^
set term ;^
commit;
insert into groups values ( 1 , 'Group1' );
insert into groups values ( 2 , 'Group2' );
commit;
execute procedure insert_values( 3000 , 1);
commit;
delete from test where t_group = 2;
execute procedure group_copy( 1 , 2 );
commit;
set list on;
select count(*) from test;
select * from groups;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
COUNT 6000
GR_ID 1
GR_NAME Group1
GR_ID 2
GR_NAME Group2
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 25.671329
| 97
| 0.53228
|
640f90397134552521cb69369a13f44d8a3cf14f
| 5,180
|
py
|
Python
|
Qualificacao/3dConv.py
|
Hadamanthis/Mestrado
|
f95bed5a5cc9ce1a7b6e8ee5c0f0a97a5e25a4da
|
[
"Apache-2.0"
] | 1
|
2018-06-30T18:54:29.000Z
|
2018-06-30T18:54:29.000Z
|
Qualificacao/3dConv.py
|
Hadamanthis/Mestrado
|
f95bed5a5cc9ce1a7b6e8ee5c0f0a97a5e25a4da
|
[
"Apache-2.0"
] | null | null | null |
Qualificacao/3dConv.py
|
Hadamanthis/Mestrado
|
f95bed5a5cc9ce1a7b6e8ee5c0f0a97a5e25a4da
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.convolutional import Convolution3D, MaxPooling3D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.utils import np_utils
from keras import backend as K
from readDataset import readDataset
# Diretório do dataset
folder = '/mnt/DADOS/LISA_HG_Data/dynamic_gestures/data' # onde estão as imagens
n_classes = 32 # numero de classes
n_epochs = 50 # numero de épocas
resizeShape = (80, 30) # dimensão pretendida dos frames
interpolation = cv2.INTER_AREA # interpolação utilizada no resize
color = cv2.COLOR_BGR2GRAY # Trocar cor
vid_shape = {'rows':50, 'cols':100, 'frames':6} # shape do video
if __name__ == "__main__":
# Mudar ordem de linhas e colunas
K.set_image_dim_ordering('th')
#############################################################
### Ler imagens (250x115) e coloca-las em uma variável X. ###
### Há uma qtd variavel de frames por vídeo ###
### Tem 16 usuarios, performando 32 gestos 3 vezes cada ###
#############################################################
# Dados de treinamento
X_tr = [] # variavel pra guardar todas as entradas
labels = [] # Labels das classes
# Lendo o dataset
X_tr, labels = readDataset(folder, vid_shape['frames'], color, resizeShape, interpolation)
if resizeShape != None:
vid_shape['rows'] = resizeShape[1]
vid_shape['cols'] = resizeShape[0]
###################################################
### (Hardwired) ###
### Transformar cada uma das imagens ###
### em 5 (nivel de cinza, ###
### gradiente_x, gradiente_y, ###
### optflow_x, optflow_y) ###
###################################################
X = [] # Guarda todas as sub-imagens
# Extraindo as imagens da base
for img in X_tr:
X.append(img) # niveis de cinza
X.append( np.absolute(cv2.Sobel(img,cv2.CV_16U,1,0,ksize=5)) ) # gradiente x
X.append( np.absolute(cv2.Sobel(img,cv2.CV_16U,0,1,ksize=5)) ) # gradiente y
#treino_optflwx.append( )
#treino_optflwy.append( )
# Transformando entrada e labels em array Numpy
X = np.array(X)
Y = np.array(labels)
num_samples = len(X_tr)
print('X.shape:', X.shape)
print('Y.shape:', Y.shape)
# input_shape(qtd_imagens, qtd_canais, qtd_linhas, qtd_colunas, qtd_profundidade)
input_shape = (3, vid_shape['rows'], vid_shape['cols'], vid_shape['frames'])
train_set = np.zeros((num_samples, 3, vid_shape['rows'], vid_shape['cols'], vid_shape['frames']))
for h in list(range(num_samples)):
for r in list(range(3)):
train_set[h][r][:][:][:] = X[h,:,:,:]
print('train_set:', train_set.shape)
# Pre-processing
train_set = train_set.astype('float32')
train_set -= np.mean(train_set)
train_set /= np.max(train_set)
# Transformando o vetor de classes em matrizes de classes binárias
Y = np_utils.to_categorical(Y, n_classes)
print('Y.shape:', Y.shape)
# Separando os dados em treino/teste 80/20
X_treino, X_teste, Y_treino, Y_teste = train_test_split(train_set, Y,
test_size = 0.2,
random_state = 4)
print('X_treino:', X_treino.shape)
print('X_teste:', X_teste.shape)
print('Y_treino:', Y_treino.shape)
print('Y_teste:', Y_teste.shape)
######################################################################
### Criar modelo ###
### Adicionar uma camada de convolução 3D com 2 filtros (9x7) ###
### Adicionar uma camada de amostragem (3x3) ###
### Adicionar uma camada de convolução 3D com 3 filtros (7x7) ###
### Adicionar uma camada de amostragem (3x3) ###
### Adicionar uma camada de convolução 3D com um 1 filtro (6x4) ###
### Adicionar camada completamente conectada a 128 feature vectors ###
### Adicionar ultima camada com 32 saidas ###
######################################################################
# Numero de filtros convolucionais em cada layer
n_filters = [16, 16]
# Profundidade de convolução utilizado em cada layer (CONV x CONV)
n_conv = [3, 3, 3]
n_width = [51, 79, 30]
n_height = [21, 35, 12]
# imagens a serem treinadas por vez
batch_size = 2
# Criar modelo
model = Sequential()
model.add(Convolution3D(n_filters[0],
(n_height[0], n_width[0], n_conv[0]),
input_shape=input_shape,
activation='relu'))
model.add(MaxPooling3D(pool_size=(3, 3, 3)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, kernel_initializer='normal'))
model.add(Activation('softmax'))
# Cada ação é classificada da maneira "one-against-rest"
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# Treinando o modelo
hist = model.fit(X_treino, Y_treino, validation_data=(X_teste, Y_teste), epochs=n_epochs, shuffle=True, verbose=1)
# avaliando o modelo o modelo
score = model.evaluate(X_teste, Y_teste)
print('\nTest score:', score[0])
print('Test accuracy', score[1])
# testando o modelo
#model.predict(X_teste)
model.summary()
| 31.779141
| 116
| 0.638031
|
365e2dcb3c38c7644ec87937fdd3a50dc1841bd1
| 132
|
py
|
Python
|
tests/test_zappa_client.py
|
xdssio/zappa_client
|
f2a511fb7d905dca78b6691b9f3042881e43e3a7
|
[
"MIT"
] | 1
|
2019-03-03T08:01:46.000Z
|
2019-03-03T08:01:46.000Z
|
tests/test_zappa_client.py
|
xdssio/zappa_client
|
f2a511fb7d905dca78b6691b9f3042881e43e3a7
|
[
"MIT"
] | null | null | null |
tests/test_zappa_client.py
|
xdssio/zappa_client
|
f2a511fb7d905dca78b6691b9f3042881e43e3a7
|
[
"MIT"
] | null | null | null |
from zappa_client import ZappaClient
def test_zappa_client():
# TODO
client = ZappaClient()
assert client is not None
| 16.5
| 36
| 0.719697
|
bdd9bd104405a69d33abe62bedbbaf8753070c63
| 188
|
py
|
Python
|
sandbox/array_check.py
|
pixelflux/pyflecs
|
58cf8c7cb92deeae77e5802bb39ea4cbdb6ecc34
|
[
"MIT"
] | null | null | null |
sandbox/array_check.py
|
pixelflux/pyflecs
|
58cf8c7cb92deeae77e5802bb39ea4cbdb6ecc34
|
[
"MIT"
] | null | null | null |
sandbox/array_check.py
|
pixelflux/pyflecs
|
58cf8c7cb92deeae77e5802bb39ea4cbdb6ecc34
|
[
"MIT"
] | null | null | null |
"""
A script to test how to dynamically create a numpy structured array.
"""
import numpy as np
import pyflecs as flecs
values = np.array([1, 2, 3], dtype='uint32')
flecs.test(values)
| 15.666667
| 68
| 0.707447
|
3c6391d520a58cce0c8253b41862e6870f806857
| 40,161
|
py
|
Python
|
spaceinvaders.py
|
fustilio/Space_Invaders
|
37d55837b8e5e0c542e53ac03167680326b0418a
|
[
"MIT"
] | null | null | null |
spaceinvaders.py
|
fustilio/Space_Invaders
|
37d55837b8e5e0c542e53ac03167680326b0418a
|
[
"MIT"
] | 3
|
2019-10-11T08:51:45.000Z
|
2019-10-12T04:25:21.000Z
|
spaceinvaders.py
|
fustilio/Space_Invaders
|
37d55837b8e5e0c542e53ac03167680326b0418a
|
[
"MIT"
] | 1
|
2019-10-11T15:27:21.000Z
|
2019-10-11T15:27:21.000Z
|
#!/usr/bin/env python
# Space Invaders
# Created by Lee Robinson
from pygame import *
import sys
from os.path import abspath, dirname
from random import choice
import numpy as np
from enum import Enum
from model.circuit_grid_model import CircuitGridModel
from controls.circuit_grid import CircuitGrid, CircuitGridNode
from copy import deepcopy
from utils.navigation import MOVE_UP, MOVE_DOWN, MOVE_LEFT, MOVE_RIGHT
from utils.parameters import WIDTH_UNIT, WINDOW_HEIGHT, WINDOW_WIDTH, \
LEFT, RIGHT, NOTHING, NO, YES, MEASURE_LEFT, MEASURE_RIGHT, WINDOW_SIZE
from qiskit import BasicAer, execute, ClassicalRegister
BASE_PATH = abspath(dirname(__file__))
FONT_PATH = BASE_PATH + '/fonts/'
IMAGE_PATH = BASE_PATH + '/images/'
SOUND_PATH = BASE_PATH + '/sounds/'
# Colors (R, G, B)
WHITE = (255, 255, 255)
GREEN = (78, 255, 87)
YELLOW = (241, 255, 0)
BLUE = (80, 255, 239)
PURPLE = (203, 0, 255)
RED = (237, 28, 36)
#SCREEN_HEIGHT = 640
SCREEN_HEIGHT = 300
SCREEN = display.set_mode((800, 640))
#screen = display.set_mode(WINDOW_SIZE)
FONT = FONT_PATH + 'space_invaders.ttf'
IMG_NAMES = ['ship', 'mystery',
'enemy1_1', 'enemy1_2',
'enemy2_1', 'enemy2_2',
'enemy3_1', 'enemy3_2',
'explosionblue', 'explosiongreen', 'explosionpurple',
'laser', 'enemylaser']
IMAGES = {name: image.load(IMAGE_PATH + '{}.png'.format(name)).convert_alpha()
for name in IMG_NAMES}
POSITIONS = [20, 120, 220, 320, 420, 520, 620, 720]
# OFFSETS = [-100, 0, -200, 100, -300, 200, -400, 300]
OFFSETS = [-400, -300, -200, -100, 0, 100, 200, 300]
DISTRIBUTIONS = [25.0, 25.0, 15.0, 15.0, 7.0, 7.0, 3.0, 3.0]
LABEL_TEXT = ["|000>", "|001>", "|010>", "|011>", "|100>", "|101>", "|110>", "|111>"]
NUMBER_OF_SHIPS = 8
BLOCKERS_POSITION = 450
ENEMY_DEFAULT_POSITION = 65 # Initial value for a new game
ENEMY_MOVE_DOWN = 35
ENEMY_HEALTH = 96
BULLET_MAX_DAMAGE = 96
class ShipState(Enum):
SUPERPOSITION = 0
MEASURED = 1
class Ship(sprite.Sprite):
def __init__(self, id):
sprite.Sprite.__init__(self)
self.id = id
self.probability = DISTRIBUTIONS[id] / 100.0
self.image = IMAGES['ship'].copy()
self.image.fill((255, 255, 255, self.probability * 500), None, BLEND_RGBA_MULT)
self.speed = 5
self.rect = self.image.get_rect(topleft=(POSITIONS[self.id], 540))
self.classical = False
def update(self, *args):
self.update_opacity(self.probability)
game.screen.blit(self.image, self.rect)
def fire(self, measuring, measured_ship):
if measuring:
if self is measured_ship:
bullet = Bullet(self.rect.x + 23,
self.rect.y + 5, -1,
15, 'laser', 'center', 1.0)
game.bullets.add(bullet)
game.allSprites.add(game.bullets)
game.sounds['shoot'].play()
else:
bullet = Bullet(self.rect.x + 23,
self.rect.y + 5, -1,
15, 'laser', 'center', self.probability)
game.bullets.add(bullet)
game.allSprites.add(game.bullets)
game.sounds['shoot'].play()
def update_opacity(self, prob):
self.image = IMAGES['ship'].copy()
opacity = 0
if self.classical:
opacity = 1
else:
if prob > 0.75:
opacity = 1
elif prob > 0.5:
opacity = 0.8
elif prob > 0.25:
opacity = 0.6
elif prob > 0.1:
opacity = 0.35
self.image.fill((255, 255, 255, opacity * 255), None, BLEND_RGBA_MULT)
class ShipGroup(sprite.Group):
def __init__(self, number_of_ships, position):
sprite.Group.__init__(self)
self.ships = [None] * number_of_ships
self.number_of_ships = number_of_ships
self.position = position
self.measured_ship = None
self.measuring = False
self.timer = 0.0
self.state = ShipState.SUPERPOSITION
def update(self, keys, *args):
passed = time.get_ticks() - self.timer
if self.measuring and passed > 600:
self.measuring = False
for ship in self:
if self.state == ShipState.SUPERPOSITION:
ship.classical = False
ship.rect.x = (OFFSETS[ship.id] + POSITIONS[self.position]) % 800
elif self.state == ShipState.MEASURED:
if ship == self.measured_ship:
ship.classical = True
ship.rect.x = (OFFSETS[ship.id] + POSITIONS[self.position]) % 800
else:
ship.rect.x = 999999999
ship.update_opacity(ship.probability)
ship.update()
def add_internal(self, *sprites):
super(ShipGroup, self).add_internal(*sprites)
for s in sprites:
self.ships[s.id] = s
def remove_internal(self, *sprites):
super(ShipGroup, self).remove_internal(*sprites)
for s in sprites:
self.kill(s)
def fire(self):
for ship in self:
ship.fire(self.state == ShipState.MEASURED, self.measured_ship)
def kill(self, ship):
self.ships[ship.id] = None
def explode_ships(self, explosionsGroup, measured_ship_id):
for ship in self.ships:
if ship is not None:
if ship.id == measured_ship_id:
ship.update_opacity(1.0)
ship.update()
self.measured_ship = ship
else:
ship.kill()
# ShipExplosion(ship, sprite.Group())
def update_probabilities(self, probabilities):
for ship in self:
p_amp = probabilities[ship.id]
ship.probability = p_amp.real*p_amp.real + p_amp.imag*p_amp.imag
ship.update_opacity(ship.probability)
def measure(self, measured_ship_id):
for ship in self.ships:
if ship is not None:
if ship.id == measured_ship_id:
self.measured_ship = ship
self.measuring = True
self.timer = time.get_ticks()
self.state = ShipState.MEASURED
self.update([])
def draw(self, screen):
for ship in self.ships:
if ship is not None and ship is Ship:
text = Text(FONT, 50, '000', WHITE, 50, 50)
text.draw(screen)
class Bullet(sprite.Sprite):
def __init__(self, xpos, ypos, direction, speed, filename, side, multiplier=1.0):
sprite.Sprite.__init__(self)
self.image = IMAGES[filename]
self.rect = self.image.get_rect(topleft=(xpos, ypos))
self.speed = speed
self.direction = direction
self.side = side
self.filename = filename
self.multiplier = multiplier
self.damage = BULLET_MAX_DAMAGE * multiplier + 1 # accounting for floating point issue
def update(self, keys, *args):
self.image = IMAGES[self.filename].copy()
alpha = 0
if self.multiplier > 0.01: # if alpha is above 0 basically
alpha = max(self.multiplier * 255, 128)
self.image.fill((255, 255, 255, alpha), None, BLEND_RGBA_MULT)
game.screen.blit(self.image, self.rect)
self.rect.y += self.speed * self.direction
if self.rect.y < 15 or self.rect.y > 650:
self.kill()
class Enemy(sprite.Sprite):
def __init__(self, row, column):
sprite.Sprite.__init__(self)
self.row = row
self.column = column
self.images = []
self.load_images()
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
self.health = ENEMY_HEALTH
def toggle_image(self):
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
def update(self, *args):
self.image = self.images[self.index].copy()
alpha = max(255 * self.health / ENEMY_HEALTH, 50)
self.image.fill((255, 255, 255, alpha), None, BLEND_RGBA_MULT)
game.screen.blit(self.image, self.rect)
def load_images(self):
images = {0: ['1_2', '1_1'],
1: ['2_2', '2_1'],
2: ['2_2', '2_1'],
3: ['3_1', '3_2'],
4: ['3_1', '3_2'],
}
img1, img2 = (IMAGES['enemy{}'.format(img_num)] for img_num in
images[self.row])
self.images.append(transform.scale(img1, (40, 35)))
self.images.append(transform.scale(img2, (40, 35)))
class EnemiesGroup(sprite.Group):
def __init__(self, columns, rows):
sprite.Group.__init__(self)
self.enemies = [[None] * columns for _ in range(rows)]
self.columns = columns
self.rows = rows
self.leftAddMove = 0
self.rightAddMove = 0
self.moveTime = 600
self.direction = 1
self.rightMoves = 30
self.leftMoves = 30
self.moveNumber = 15
self.timer = time.get_ticks()
self.bottom = game.enemyPosition + ((rows - 1) * 45) + 35
self._aliveColumns = list(range(columns))
self._leftAliveColumn = 0
self._rightAliveColumn = columns - 1
def update(self, current_time):
if current_time - self.timer > self.moveTime:
if self.direction == 1:
max_move = self.rightMoves + self.rightAddMove
else:
max_move = self.leftMoves + self.leftAddMove
if self.moveNumber >= max_move:
self.leftMoves = 30 + self.rightAddMove
self.rightMoves = 30 + self.leftAddMove
self.direction *= -1
self.moveNumber = 0
self.bottom = 0
for enemy in self:
enemy.rect.y += ENEMY_MOVE_DOWN
enemy.toggle_image()
if self.bottom < enemy.rect.y + 35:
self.bottom = enemy.rect.y + 35
else:
velocity = 10 if self.direction == 1 else -10
for enemy in self:
enemy.rect.x += velocity
enemy.toggle_image()
self.moveNumber += 1
self.timer = current_time + self.moveTime
def add_internal(self, *sprites):
super(EnemiesGroup, self).add_internal(*sprites)
for s in sprites:
self.enemies[s.row][s.column] = s
def remove_internal(self, *sprites):
super(EnemiesGroup, self).remove_internal(*sprites)
for s in sprites:
self.kill(s)
self.update_speed()
def is_column_dead(self, column):
return not any(self.enemies[row][column]
for row in range(self.rows))
def random_bottom(self):
col = choice(self._aliveColumns)
col_enemies = (self.enemies[row - 1][col]
for row in range(self.rows, 0, -1))
return next((en for en in col_enemies if en is not None), None)
def update_speed(self):
if len(self) == 1:
self.moveTime = 200
elif len(self) <= 10:
self.moveTime = 400
def kill(self, enemy):
self.enemies[enemy.row][enemy.column] = None
is_column_dead = self.is_column_dead(enemy.column)
if is_column_dead:
self._aliveColumns.remove(enemy.column)
if enemy.column == self._rightAliveColumn:
while self._rightAliveColumn > 0 and is_column_dead:
self._rightAliveColumn -= 1
self.rightAddMove += 5
is_column_dead = self.is_column_dead(self._rightAliveColumn)
elif enemy.column == self._leftAliveColumn:
while self._leftAliveColumn < self.columns and is_column_dead:
self._leftAliveColumn += 1
self.leftAddMove += 5
is_column_dead = self.is_column_dead(self._leftAliveColumn)
class Blocker(sprite.Sprite):
def __init__(self, size, color, row, column):
sprite.Sprite.__init__(self)
self.height = size
self.width = size
self.color = color
self.image = Surface((self.width, self.height))
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.row = row
self.column = column
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
class Mystery(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = IMAGES['mystery']
self.image = transform.scale(self.image, (75, 35))
self.rect = self.image.get_rect(topleft=(-80, 45))
self.row = 5
self.moveTime = 25000
self.direction = 1
self.timer = time.get_ticks()
self.mysteryEntered = mixer.Sound(SOUND_PATH + 'mysteryentered.wav')
self.mysteryEntered.set_volume(0.3)
self.playSound = True
def update(self, keys, currentTime, *args):
resetTimer = False
passed = currentTime - self.timer
if passed > self.moveTime:
if (self.rect.x < 0 or self.rect.x > 800) and self.playSound:
self.mysteryEntered.play()
self.playSound = False
if self.rect.x < 840 and self.direction == 1:
self.mysteryEntered.fadeout(4000)
self.rect.x += 2
game.screen.blit(self.image, self.rect)
if self.rect.x > -100 and self.direction == -1:
self.mysteryEntered.fadeout(4000)
self.rect.x -= 2
game.screen.blit(self.image, self.rect)
if self.rect.x > 830:
self.playSound = True
self.direction = -1
resetTimer = True
if self.rect.x < -90:
self.playSound = True
self.direction = 1
resetTimer = True
if passed > self.moveTime and resetTimer:
self.timer = currentTime
class EnemyExplosion(sprite.Sprite):
def __init__(self, enemy, *groups):
super(EnemyExplosion, self).__init__(*groups)
self.image = transform.scale(self.get_image(enemy.row), (40, 35))
self.image2 = transform.scale(self.get_image(enemy.row), (50, 45))
self.rect = self.image.get_rect(topleft=(enemy.rect.x, enemy.rect.y))
self.timer = time.get_ticks()
@staticmethod
def get_image(row):
img_colors = ['purple', 'blue', 'blue', 'green', 'green']
return IMAGES['explosion{}'.format(img_colors[row])]
def update(self, current_time, *args):
passed = current_time - self.timer
if passed <= 100:
game.screen.blit(self.image, self.rect)
elif passed <= 200:
game.screen.blit(self.image2, (self.rect.x - 6, self.rect.y - 6))
elif 400 < passed:
self.kill()
class MysteryExplosion(sprite.Sprite):
def __init__(self, mystery, score, *groups):
super(MysteryExplosion, self).__init__(*groups)
self.text = Text(FONT, 20, str(score), WHITE,
mystery.rect.x + 20, mystery.rect.y + 6)
self.timer = time.get_ticks()
def update(self, current_time, *args):
passed = current_time - self.timer
if passed <= 200 or 400 < passed <= 600:
self.text.draw(game.screen)
elif 600 < passed:
self.kill()
class ShipExplosion(sprite.Sprite):
def __init__(self, ship, *groups):
super(ShipExplosion, self).__init__(*groups)
self.image = IMAGES['ship']
self.rect = self.image.get_rect(topleft=(ship.rect.x, ship.rect.y))
self.timer = time.get_ticks()
def update(self, current_time, *args):
passed = current_time - self.timer
if 300 < passed <= 600:
game.screen.blit(self.image, self.rect)
elif 900 < passed:
self.kill()
class Life(sprite.Sprite):
def __init__(self, xpos, ypos):
sprite.Sprite.__init__(self)
self.image = IMAGES['ship']
self.image = transform.scale(self.image, (23, 23))
self.rect = self.image.get_rect(topleft=(xpos, ypos))
def update(self, *args):
game.screen.blit(self.image, self.rect)
class Text(object):
def __init__(self, textFont, size, message, color, xpos, ypos):
self.font = font.Font(textFont, size)
self.surface = self.font.render(message, True, color)
self.rect = self.surface.get_rect(topleft=(xpos, ypos))
def draw(self, surface):
surface.blit(self.surface, self.rect)
class Labels(object):
def __init__(self):
self.labels = []
def initialize(self, position):
self.labels = []
for i in range(8):
self.labels.append(Text(FONT, 20, LABEL_TEXT[i], WHITE, POSITIONS[position] + OFFSETS[i], 600))
def update(self, screen, position):
for i in range(len(self.labels)):
self.labels[i].rect = self.labels[i].surface.get_rect(topleft=((POSITIONS[position] + OFFSETS[i]) % 800, 600))
for label in self.labels:
label.draw(screen)
"""
class PauseBar(sprite.Sprite):
def __init__(self, mystery, score, *groups):
self.readytext = Text(FONT, 20, "QUANTUM READY", WHITE,
400, 0)
self.text = Text(FONT, 20, "QUANTUM NOT READY", RED,
400, 0)
self.timer = time.get_ticks()
self.ready = False
def update(self, current_time, *args):
passed = current_time - self.timer
if passed <= 600:
self.text.draw(game.screen)
elif 600 > passed:
self.readytext.draw(game.screen)
self.ready = True
self.timer = currentTime
def reset():
self.ready = False
"""
class SpaceInvaders(object):
def __init__(self):
# It seems, in Linux buffersize=512 is not enough, use 4096 to prevent:
# ALSA lib pcm.c:7963:(snd_pcm_recover) underrun occurred
mixer.pre_init(44100, -16, 1, 4096)
init()
self.clock = time.Clock()
self.caption = display.set_caption('Space Invaders')
self.screen = SCREEN
self.background = image.load(IMAGE_PATH + 'qiskit.png').convert()
self.startGame = False
self.mainScreen = True
self.gameOver = False
# Counter for enemy starting position (increased each new round)
self.enemyPosition = ENEMY_DEFAULT_POSITION
self.titleText = Text(FONT, 50, 'Space Invaders', WHITE, 164, 155)
self.titleText2 = Text(FONT, 25, 'Press any key to continue', WHITE,
201, 225)
self.pauseText = Text(FONT, 42, 'Quantum Circuit Composer', WHITE, 50, 155)
self.pauseText2 = Text(FONT, 25, 'Press ENTER key to continue', WHITE,
164, 225)
self.gameOverText = Text(FONT, 50, 'Game Over', WHITE, 250, 270)
self.nextRoundText = Text(FONT, 50, 'Next Round', WHITE, 240, 270)
self.enemy1Text = Text(FONT, 25, ' = 10 pts', GREEN, 368, 270)
self.enemy2Text = Text(FONT, 25, ' = 20 pts', BLUE, 368, 320)
self.enemy3Text = Text(FONT, 25, ' = 30 pts', PURPLE, 368, 370)
self.enemy4Text = Text(FONT, 25, ' = ?????', RED, 368, 420)
self.scoreText = Text(FONT, 20, 'Score', WHITE, 5, 5)
self.livesText = Text(FONT, 20, 'Lives ', WHITE, 640, 5)
self.life1 = Life(715, 3)
self.life2 = Life(742, 3)
self.life3 = Life(769, 3)
self.livesGroup = sprite.Group(self.life1, self.life2, self.life3)
self.shipPosition = 4
self.circuit_grid_model = CircuitGridModel(3, 10)
self.circuit_grid = CircuitGrid(0, SCREEN_HEIGHT , self.circuit_grid_model)
self.paused = False
#self.pause_bar = 0
#self.pause_ready = False
#self.pause_increment_time = 50
self.labels = Labels()
#self.pause_bar = PauseBar()
def reset(self, score):
self.player = ShipGroup(NUMBER_OF_SHIPS, self.shipPosition)
self.make_ships()
self.labels.initialize(self.player.position)
self.playerGroup = sprite.Group(self.player)
self.explosionsGroup = sprite.Group()
self.bullets = sprite.Group()
self.mysteryShip = Mystery()
self.mysteryGroup = sprite.Group(self.mysteryShip)
self.enemyBullets = sprite.Group()
self.make_enemies()
self.allSprites = sprite.Group(self.player, self.enemies,
self.livesGroup, self.mysteryShip)
self.keys = key.get_pressed()
self.timer = time.get_ticks()
self.noteTimer = time.get_ticks()
self.shipTimer = time.get_ticks()
self.score = score
self.create_audio()
self.makeNewShip = False
self.shipAlive = True
def make_blockers(self, number):
blockerGroup = sprite.Group()
for row in range(4):
for column in range(9):
blocker = Blocker(10, GREEN, row, column)
blocker.rect.x = 50 + (200 * number) + (column * blocker.width)
blocker.rect.y = BLOCKERS_POSITION + (row * blocker.height)
blockerGroup.add(blocker)
return blockerGroup
def create_audio(self):
mixer.init(48000, -16, 1, 1024)
mixer.music.load('sounds/SG.mp3')
mixer.music.play()
mixer.music.set_volume(0.35)
self.sounds = {}
for sound_name in ['shoot', 'shoot2', 'invaderkilled', 'mysterykilled',
'shipexplosion']:
self.sounds[sound_name] = mixer.Sound(
SOUND_PATH + '{}.wav'.format(sound_name))
self.sounds[sound_name].set_volume(0.2)
self.musicNotes = [mixer.Sound(SOUND_PATH + '{}.wav'.format(i)) for i
in range(4)]
for sound in self.musicNotes:
sound.set_volume(0.5)
self.noteIndex = 0
def play_main_music(self, currentTime):
if currentTime - self.noteTimer > self.enemies.moveTime:
self.note = self.musicNotes[self.noteIndex]
if self.noteIndex < 3:
self.noteIndex += 1
else:
self.noteIndex = 0
self.note.play()
self.noteTimer += self.enemies.moveTime
@staticmethod
def should_exit(evt):
# type: (pygame.event.EventType) -> bool
return evt.type == QUIT or (evt.type == KEYUP and evt.key == K_ESCAPE)
def get_probability_amplitudes(self, circuit, qubit_num, shot_num):
backend_sv_sim = BasicAer.get_backend('statevector_simulator')
job_sim = execute(circuit, backend_sv_sim, shots=shot_num)
result_sim = job_sim.result()
quantum_state = result_sim.get_statevector(circuit, decimals=3)
return quantum_state
def get_measurement(self, circuit, qubit_num, shot_num):
backend_sv_sim = BasicAer.get_backend('qasm_simulator')
cr = ClassicalRegister(qubit_num)
measure_circuit = deepcopy(circuit) # make a copy of circuit
measure_circuit.add_register(cr) # add classical registers for measurement readout
measure_circuit.measure(measure_circuit.qregs[0], measure_circuit.cregs[0])
job_sim = execute(measure_circuit, backend_sv_sim, shots=shot_num)
result_sim = job_sim.result()
counts = result_sim.get_counts(circuit)
return int(list(counts.keys())[0], 2)
def check_input(self):
self.keys = key.get_pressed()
for e in event.get():
if self.should_exit(e):
sys.exit()
"""
if e.type == KEYDOWN:
if e.key == K_SPACE:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
self.player.fire()
# bullet = Bullet(self.player.rect.x + 23,
# self.player.rect.y + 5, -1,
# 15, 'laser', 'center')
# self.bullets.add(bullet)
# self.allSprites.add(self.bullets)
# self.sounds['shoot'].play()
# else:
# leftbullet = Bullet(self.player.rect.x + 8,
# self.player.rect.y + 5, -1,
# 15, 'laser', 'left')
# rightbullet = Bullet(self.player.rect.x + 38,
# self.player.rect.y + 5, -1,
# 15, 'laser', 'right')
# self.bullets.add(leftbullet)
# self.bullets.add(rightbullet)
# self.allSprites.add(self.bullets)
# self.sounds['shoot2'].play()
"""
if e.type == KEYDOWN:
if e.key == K_ESCAPE:
self.running = False
elif e.key == K_RETURN:
self.paused = not(self.paused)
elif not self.paused:
if e.key == K_SPACE:
if len(self.bullets) == 0 and self.shipAlive:
self.player.fire()
elif e.key == K_o:
self.player.state = ShipState.SUPERPOSITION
if self.player.position >= 0:
self.player.position = (self.player.position - 1) % 8
self.player.update(self.keys)
elif e.key == K_p:
self.player.state = ShipState.SUPERPOSITION
if self.player.position <= 7:
self.player.position = (self.player.position + 1) % 8
self.player.update(self.keys)
else:
if e.key == K_a:
self.circuit_grid.move_to_adjacent_node(MOVE_LEFT)
elif e.key == K_d:
self.circuit_grid.move_to_adjacent_node(MOVE_RIGHT)
elif e.key == K_w:
self.circuit_grid.move_to_adjacent_node(MOVE_UP)
elif e.key == K_s:
self.circuit_grid.move_to_adjacent_node(MOVE_DOWN)
elif e.key == K_x:
self.circuit_grid.handle_input_x()
elif e.key == K_y:
self.circuit_grid.handle_input_y()
elif e.key == K_z:
self.circuit_grid.handle_input_z()
elif e.key == K_h:
self.circuit_grid.handle_input_h()
elif e.key == K_BACKSPACE:
self.circuit_grid.handle_input_delete()
elif e.key == K_c:
# Add or remove a control
self.circuit_grid.handle_input_ctrl()
elif e.key == K_UP:
# Move a control qubit up
self.circuit_grid.handle_input_move_ctrl(MOVE_UP)
elif e.key == K_DOWN:
# Move a control qubit down
self.circuit_grid.handle_input_move_ctrl(MOVE_DOWN)
elif e.key == K_LEFT:
# Rotate a gate
self.circuit_grid.handle_input_rotate(-np.pi / 8)
elif e.key == K_RIGHT:
# Rotate a gate
self.circuit_grid.handle_input_rotate(np.pi / 8)
circuit = self.circuit_grid.circuit_grid_model.compute_circuit()
state = self.get_probability_amplitudes(circuit, 3, 100)
self.player.update_probabilities(state)
self.circuit_grid.draw(self.screen)
self.player.update(self.keys)
display.flip()
def make_enemies(self):
enemies = EnemiesGroup(10, 5)
for row in range(5):
for column in range(10):
enemy = Enemy(row, column)
enemy.rect.x = 157 + (column * 50)
enemy.rect.y = self.enemyPosition + (row * 45)
enemies.add(enemy)
self.enemies = enemies
def make_ships(self):
ships = ShipGroup(NUMBER_OF_SHIPS, self.shipPosition)
for i in range(NUMBER_OF_SHIPS):
ship = Ship(i)
ships.add(ship)
ships.update([])
self.player = ships
circuit = self.circuit_grid.circuit_grid_model.compute_circuit()
state = self.get_probability_amplitudes(circuit, 3, 100)
self.player.update_probabilities(state)
def make_enemies_shoot(self):
if (time.get_ticks() - self.timer) > 700 and self.enemies:
enemy = self.enemies.random_bottom()
self.enemyBullets.add(
Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5,
'enemylaser', 'center'))
self.allSprites.add(self.enemyBullets)
self.timer = time.get_ticks()
def calculate_score(self, row):
scores = {0: 30,
1: 20,
2: 20,
3: 10,
4: 10,
5: choice([50, 100, 150, 300])
}
score = scores[row]
self.score += score
return score
def create_main_menu(self):
self.enemy1 = IMAGES['enemy3_1']
self.enemy1 = transform.scale(self.enemy1, (40, 40))
self.enemy2 = IMAGES['enemy2_2']
self.enemy2 = transform.scale(self.enemy2, (40, 40))
self.enemy3 = IMAGES['enemy1_2']
self.enemy3 = transform.scale(self.enemy3, (40, 40))
self.enemy4 = IMAGES['mystery']
self.enemy4 = transform.scale(self.enemy4, (80, 40))
self.screen.blit(self.enemy1, (318, 270))
self.screen.blit(self.enemy2, (318, 320))
self.screen.blit(self.enemy3, (318, 370))
self.screen.blit(self.enemy4, (299, 420))
def check_collisions(self):
sprite.groupcollide(self.bullets, self.enemyBullets, True, True)
col = sprite.groupcollide(self.enemies, self.bullets, False, True)
for enemy in col.keys():
self.sounds['invaderkilled'].play()
enemy.health -= col[enemy][0].damage
if enemy.health <= 0:
enemy.kill()
self.calculate_score(enemy.row)
EnemyExplosion(enemy, self.explosionsGroup)
self.gameTimer = time.get_ticks()
for mystery in sprite.groupcollide(self.mysteryGroup, self.bullets,
True, True).keys():
mystery.mysteryEntered.stop()
self.sounds['mysterykilled'].play()
score = self.calculate_score(mystery.row)
MysteryExplosion(mystery, score, self.explosionsGroup)
newShip = Mystery()
self.allSprites.add(newShip)
self.mysteryGroup.add(newShip)
collision_handled = False
circuit = self.circuit_grid.circuit_grid_model.compute_circuit()
state = self.get_measurement(circuit, 3, 1)
hits = sprite.groupcollide(self.playerGroup, self.enemyBullets,
False, True)
self.player.measuring = False
if self.player.state == ShipState.SUPERPOSITION:
for ship in hits:
if ship.probability > 0.0:
self.player.measure(state)
if state == ship.id: # quantum case
if not collision_handled:
if self.life3.alive():
self.life3.kill()
elif self.life2.alive():
self.life2.kill()
elif self.life1.alive():
self.life1.kill()
else:
self.gameOver = True
self.startGame = False
self.sounds['shipexplosion'].play()
self.player.explode_ships(self.explosionsGroup, ship.id)
self.makeNewShip = True
self.shipPosition = self.player.position
self.shipTimer = time.get_ticks()
self.shipAlive = False
collision_handled = True
elif self.player.state == ShipState.MEASURED:
for ship in hits:
print("collision detected, state is MEASURED")
if not collision_handled:
if self.life3.alive():
self.life3.kill()
elif self.life2.alive():
self.life2.kill()
elif self.life1.alive():
self.life1.kill()
else:
self.gameOver = True
self.startGame = False
self.sounds['shipexplosion'].play()
self.player.explode_ships(self.explosionsGroup, -1)
self.makeNewShip = True
self.shipPosition = self.player.position
self.shipTimer = time.get_ticks()
self.shipAlive = False
collision_handled = True
# for ship in hits:
# if not self.player.measuring and ship.probability > 0.0:
# self.player.measure(state)
# self.player.measuring = True
if self.enemies.bottom >= 540:
sprite.groupcollide(self.enemies, self.playerGroup, True, True)
if not self.player.alive() or self.enemies.bottom >= 600:
self.gameOver = True
self.startGame = False
sprite.groupcollide(self.bullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)
if self.enemies.bottom >= BLOCKERS_POSITION:
sprite.groupcollide(self.enemies, self.allBlockers, False, True)
def create_new_ship(self, createShip, currentTime):
if createShip and (currentTime - self.shipTimer > 900):
self.player = ShipGroup(NUMBER_OF_SHIPS, self.shipPosition)
self.make_ships()
self.labels.initialize(self.player.position)
self.allSprites.add(self.player)
self.playerGroup.add(self.player)
self.makeNewShip = False
self.shipAlive = True
elif createShip and (currentTime - self.shipTimer > 300):
if self.player.measured_ship:
self.player.measured_ship.kill()
def create_game_over(self, currentTime):
self.screen.blit(self.background, (0, 0))
passed = currentTime - self.timer
if passed < 750:
self.gameOverText.draw(self.screen)
elif 750 < passed < 1500:
self.screen.blit(self.background, (0, 0))
elif 1500 < passed < 2250:
self.gameOverText.draw(self.screen)
elif 2250 < passed < 2750:
self.screen.blit(self.background, (0, 0))
elif passed > 3000:
self.mainScreen = True
for e in event.get():
if self.should_exit(e):
sys.exit()
def main(self):
while True:
if self.mainScreen:
self.screen.blit(self.background, (0, 0))
self.titleText.draw(self.screen)
self.titleText2.draw(self.screen)
self.enemy1Text.draw(self.screen)
self.enemy2Text.draw(self.screen)
self.enemy3Text.draw(self.screen)
self.enemy4Text.draw(self.screen)
self.create_main_menu()
for e in event.get():
if self.should_exit(e):
sys.exit()
if e.type == KEYUP:
# Only create blockers on a new game, not a new round
self.allBlockers = sprite.Group(self.make_blockers(0),
self.make_blockers(1),
self.make_blockers(2),
self.make_blockers(3))
self.livesGroup.add(self.life1, self.life2, self.life3)
self.reset(0)
self.startGame = True
self.mainScreen = False
elif self.startGame:
if not self.enemies and not self.explosionsGroup and not self.paused:
currentTime = time.get_ticks()
if currentTime - self.gameTimer < 3000:
self.screen.blit(self.background, (0, 0))
self.scoreText2 = Text(FONT, 20, str(self.score),
GREEN, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.nextRoundText.draw(self.screen)
self.livesText.draw(self.screen)
#self.circuit_grid.draw(self.screen)
# self.player.draw(self.screen)
self.livesGroup.update()
self.check_input()
self.labels.update(self.screen, self.player.position)
if currentTime - self.gameTimer > 3000:
# Move enemies closer to bottom
self.enemyPosition += ENEMY_MOVE_DOWN
self.reset(self.score)
self.gameTimer += 3000
else:
self.screen.blit(self.background, (0, 0))
self.allBlockers.update(self.screen)
self.scoreText2 = Text(FONT, 20, str(self.score), GREEN,
85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.livesText.draw(self.screen)
#self.circuit_grid.draw(self.screen)
self.labels.update(self.screen, self.player.position)
# self.player.draw(self.screen)
self.check_input()
if not self.paused:
currentTime = time.get_ticks()
self.play_main_music(currentTime)
self.enemies.update(currentTime)
self.make_enemies_shoot()
self.allSprites.update(self.keys, currentTime)
self.explosionsGroup.update(currentTime)
self.check_collisions()
self.create_new_ship(self.makeNewShip, currentTime)
else:
self.pauseText.draw(self.screen)
self.pauseText2.draw(self.screen)
self.circuit_grid.draw(self.screen)
self.player.update(self.keys)
# self.ships.update(self.keys)
elif self.gameOver:
currentTime = time.get_ticks()
# Reset enemy starting position
self.enemyPosition = ENEMY_DEFAULT_POSITION
self.create_game_over(currentTime)
display.update()
self.clock.tick(60)
if __name__ == '__main__':
game = SpaceInvaders()
game.main()
| 39.567488
| 122
| 0.542218
|
8a877d4a63e2a8d989747b24ad4b8d87079d291a
| 6,413
|
py
|
Python
|
self_finance/back_end/insights/image_registry.py
|
MaksimDan/self-finance
|
788fe067e6814eacde65ec2b9c7122826a61a89b
|
[
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"X11",
"MS-PL"
] | null | null | null |
self_finance/back_end/insights/image_registry.py
|
MaksimDan/self-finance
|
788fe067e6814eacde65ec2b9c7122826a61a89b
|
[
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"X11",
"MS-PL"
] | null | null | null |
self_finance/back_end/insights/image_registry.py
|
MaksimDan/self-finance
|
788fe067e6814eacde65ec2b9c7122826a61a89b
|
[
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"X11",
"MS-PL"
] | null | null | null |
import datetime
import logging
from io import StringIO
from threading import BoundedSemaphore
from threading import Lock
from threading import Thread
from self_finance.back_end.date_range import DateRange
from self_finance.back_end.insights._plot import _Plot
from self_finance.back_end.plot_cache import PlotCache
from self_finance.constants import BankSchema
from self_finance.constants import Visuals
logger = logging.getLogger(__name__)
class ImageRegistry:
"""
obj: interaction class that maps human friendly string and parameters
to Plot
"""
__global_lock = Lock()
_plot_interface = {
'Income vs Expenses Over Time': {
'func': _Plot.income_vs_expenses_over_time,
'supported_plots': {'line', 'bar', 'violin'},
'figsize': (11, 8)
},
'Income by Category': {
'func': _Plot.income_by_category,
'supported_plots': {'line', 'bar', 'violin'},
'figsize': (11, 5)
},
'Expenses by Category': {
'func': _Plot.expenses_by_category,
'supported_plots': {'line', 'bar', 'violin'},
'figsize': (11, 5)
},
'Frequency of Transactions by Category': {
'func': _Plot.transactional_frequency_over_time,
'supported_plots': {'line', 'bar'},
'figsize': (11, 5)
},
'Income by Month': {
'func': _Plot.income_by_month,
'supported_plots': {'bar'},
'figsize': (11, 5)
},
'Expenses by Month': {
'func': _Plot.expenses_by_month,
'supported_plots': {'bar'},
'figsize': (11, 5)
},
'Spending Heatmap': {
'func': _Plot.spending_heatmap,
}
}
@staticmethod
def _get_plot_interface_keys():
return ImageRegistry._plot_interface.keys()
@staticmethod
def get_supported_plots(plot_key):
try:
return ImageRegistry._plot_interface[plot_key]['supported_plots']
except KeyError:
return None
@staticmethod
def get_plot_func(plot_key):
try:
return ImageRegistry._plot_interface[plot_key]['func']
except KeyError:
return None
@staticmethod
def get_plot_figsize(plot_key):
try:
return ImageRegistry._plot_interface[plot_key]['figsize']
except KeyError:
return None
@staticmethod
def get_all_plot_ids():
plot_ids = []
for plt_basis in ImageRegistry._get_plot_interface_keys():
support_plot_types = ImageRegistry.get_supported_plots(plt_basis)
# logic in place to support plots that don't have types like bar, violin, etc
if support_plot_types:
for plt_type in support_plot_types:
plot_ids.append(ImageRegistry._make_plot_key_title(plt_basis, plt_type))
else:
plot_ids.append(plt_basis)
return sorted(plot_ids)
@staticmethod
def plot_all(plot_ids, df, start_date, end_date):
threads = []
df = df.sort_values(by=BankSchema.SCHEMA_BANK_DATE.name)
semaphor_pool = BoundedSemaphore(value=Visuals.PLOT_MAX_THREADS)
logging.info(f'Beginning plotting using {Visuals.PLOT_MAX_THREADS} threads.')
for plt_id in plot_ids:
semaphor_pool.acquire()
t = Thread(target=ImageRegistry._plot, args=(plt_id, start_date, end_date, df))
threads.append(t)
t.start()
semaphor_pool.release()
# wait for all of them to finish
for x in threads:
x.join()
@staticmethod
def _plot(plt_id, start_date, end_date, df):
# the logic here is put in place also handle the case when we want to plot heat map
plot_branch = [s.strip() for s in plt_id.split('-')]
is_matplotlib = len(plot_branch) == 2
if is_matplotlib:
plot_basis, plot_type = plot_branch
plot_type = plot_type.lower()
ImageRegistry.__plot(plot_basis, start_date, end_date, plot_type, df)
else:
plot_basis = plot_branch[0].strip()
ImageRegistry.__plot(plot_basis, start_date, end_date)
@staticmethod
def __plot(plot_basis, start_date, end_date, plot_type=None, df=None):
# check if we've plotted this same plot in the past before
kwargs = {}
now = datetime.datetime.now().strftime(BankSchema.DATE_FORMAT2)
title = ImageRegistry._make_plot_key_title(plot_basis, plot_type)
plot_cache_result = PlotCache.hit(title, start_date, end_date, now)
if plot_cache_result is None:
logging.info(f'Plot cache miss for plot: {title}, replotting.')
if plot_type and plot_type not in ImageRegistry.get_supported_plots(plot_basis):
raise KeyError(f"Provided plot type {plot_type} is not supported.")
kwargs['plot_type'] = plot_type
kwargs['title'] = title
kwargs['figsize'] = ImageRegistry.get_plot_figsize(plot_basis)
with ImageRegistry.__global_lock:
if df is not None:
fig_or_html = ImageRegistry.get_plot_func(plot_basis)(df, **kwargs)
else:
# heatmap
fig_or_html = ImageRegistry.get_plot_func(plot_basis)(DateRange(start_date, end_date), **kwargs)
# is figure
if fig_or_html is not None and not isinstance(fig_or_html, str):
stream_reader = StringIO()
fig_or_html.savefig(stream_reader, format='svg', bbox_inches='tight')
stream_reader.seek(0)
html = stream_reader.getvalue()
PlotCache.add_cache_miss(title, start_date, end_date, now, html)
elif fig_or_html is None:
logging.warning(f'Ignoring plot for {plot_basis}.')
else:
# heat-map plot
PlotCache.add_cache_miss(title, start_date, end_date, now, fig_or_html)
else:
logging.info(f'Plot cache hit for plot: {title}, ignoring replotting.')
@staticmethod
def _make_plot_key_title(plot_basis, plot_type):
if plot_type is None:
return plot_basis
else:
return f"{plot_basis} - {plot_type.capitalize()}"
| 38.401198
| 116
| 0.614377
|
e478d460f9f1df16455260026b2002f822406c48
| 2,425
|
py
|
Python
|
common/src/stack/command/stack/commands/report/host/resolv/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/report/host/resolv/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/report/host/resolv/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
class Command(stack.commands.report.host.command):
"""
Generate the /etc/resolv.conf for a host
<arg optional='0' repeat='1' type='string' name='host'>
Host name of machine
</arg>
"""
def outputResolv(self, host):
zones = {}
dns = {}
for row in self.call('list.network'):
zones[row['network']] = row['zone']
dns[row['network']] = row['dns']
search = []
# The default search path should always have the
# hosts default network first in the list, after
# that go by whatever ordering list.network returns.
for intf in self.call('list.host.interface', [host, 'expanded=True']):
if intf['default'] is True and intf['zone']:
search.append(intf['zone'])
for zone in zones.values():
if zone and zone not in search:
search.append(zone)
if search:
self.addOutput(host, 'search %s' % ' '.join(search))
#
# If the default network is 'public' use the
# public DNS rather that the server on the boss.
#
# or
#
#
# if any network has 'dns' set to true, then the frontend
# is serving DNS for that network, so make sure the
# frontend is listed as the first DNS server, then list
# the public DNS server. The IP address of the DNS server
# should be the one on the network that serves out
# DNS. Not the primary network of the frontend.
#
for row in self.call('list.host.interface', [ host ]):
network = row['network']
if network in dns and dns[network]:
frontend = self.getHostAttr(host, 'Kickstart_PrivateAddress')
for intf in self.call('list.host.interface', ['localhost']):
if intf['network'] == network:
frontend = intf['ip']
self.addOutput(host, 'nameserver %s' % frontend)
break
remotedns = self.getHostAttr(host, 'Kickstart_PublicDNSServers')
if not remotedns:
remotedns = self.getHostAttr(host, 'Kickstart_PrivateDNSServers')
if remotedns:
servers = remotedns.split(',')
for server in servers:
self.addOutput(host, 'nameserver %s' % server.strip())
def run(self, params, args):
self.beginOutput()
hosts = self.getHostnames(args)
for host in hosts:
osname = self.db.getHostOS(host)
self.runImplementation(osname, [host])
self.endOutput(padChar='', trimOwner=True)
| 27.247191
| 72
| 0.677938
|
f6ef3ebb291025a3dbd4d203d96f08188a6b88a6
| 3,971
|
py
|
Python
|
manualLabelCv.py
|
hz-ants/6d-annotator
|
d9502aa7469bc7edfea05a6303cf11b1f31f0417
|
[
"MIT"
] | 13
|
2019-09-10T01:44:56.000Z
|
2022-03-12T04:07:02.000Z
|
manualLabelCv.py
|
hz-ants/6d-annotator
|
d9502aa7469bc7edfea05a6303cf11b1f31f0417
|
[
"MIT"
] | 1
|
2022-03-07T14:19:31.000Z
|
2022-03-07T14:19:31.000Z
|
manualLabelCv.py
|
hz-ants/6d-annotator
|
d9502aa7469bc7edfea05a6303cf11b1f31f0417
|
[
"MIT"
] | 2
|
2019-10-18T06:28:59.000Z
|
2020-07-09T19:03:05.000Z
|
import argparse
import cv2
import os
from utils import *
from matrixUtils import *
from BoundingBox import BoundingBox
# Settings
refPt = []
files = []
file_counter = 0
angle_max = 720000000
angle_full_range = 6.4
t_range = 320
t_grnlrty = 160
t_z_grnlrty = 200
image = None
clone = None
# Variables dependent on the settings
t_start = int(t_range / 2)
angle_subdiv = angle_max/angle_full_range
def move_bounding_box(save=False):
global bb_calc, image, clone, a_x, a_y, a_z, t_x, t_y, t_z
R = eulerAnglesToRotationMatrix([a_x/(angle_subdiv),a_y/(angle_subdiv),a_z/(angle_subdiv)])
t = np.array([((t_x-(t_range/2))/t_grnlrty,(t_y-(t_range/2))/t_grnlrty,t_z/t_z_grnlrty)], dtype=float).T
image_tmp = clone.copy()
image = bb_calc.draw_on_img(image_tmp, R, t, save=save)
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global refPt, image, clone, a_x, a_y, a_z, t_x, t_y, t_z
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
result = bb_calc.add_clicked_point(np.array([x, y]))
if (result is not None):
angles = (result[0] * angle_subdiv).astype(int)
t = (result[1] * t_grnlrty + t_start).astype(int)
t_x = t[0][0]
t_y = t[1][0]
t_z = abs(int(result[1][2][0] * t_z_grnlrty))
cv2.setTrackbarPos('t_x', 'image', t_x)
cv2.setTrackbarPos('t_y', 'image', t_y)
cv2.setTrackbarPos('t_z', 'image', t_z)
a_x = angles[0]
if a_x < 0:
print('ding')
a_x = angle_max + a_x
a_y = angles[1]
if a_y < 0:
print('ding')
a_y = angle_max + a_y
a_z = angles[2]
if a_z < 0:
print('ding')
a_z = angle_max + a_z
cv2.setTrackbarPos('R_x', 'image', a_x)
cv2.setTrackbarPos('R_y', 'image', a_y)
cv2.setTrackbarPos('R_z', 'image', a_z)
move_bounding_box()
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--imagesFolder", required=True, help="Path to the image")
ap.add_argument("-m", "--mesh", required=True, help="Path to the image")
args = vars(ap.parse_args())
bb_calc = BoundingBox(args["mesh"])
# load the image, clone it, and setup the mouse callback function
files = sorted(os.listdir(args["imagesFolder"]))
if os.path.exists('./labels'):
file_counter = len(sorted(os.listdir("./labels")))
if (len(files) <= file_counter):
print('All images seem to have labels. Exiting...')
print('If this is not the case empty the "labels" folder and start again.')
exit()
image = cv2.imread(os.path.join(args["imagesFolder"], files[file_counter]))
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
cv2.createTrackbar('t_x','image',t_start,t_range,move_bounding_box)
cv2.createTrackbar('t_y','image',t_start,t_range,move_bounding_box)
cv2.createTrackbar('t_z','image',0,t_range,move_bounding_box)
cv2.createTrackbar('R_x','image',0,angle_max,move_bounding_box)
cv2.createTrackbar('R_y','image',0,angle_max,move_bounding_box)
cv2.createTrackbar('R_z','image',0,angle_max,move_bounding_box)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
t_x = cv2.getTrackbarPos('t_x','image')
t_y = cv2.getTrackbarPos('t_y','image')
t_z = cv2.getTrackbarPos('t_z','image')
a_x = cv2.getTrackbarPos('R_x','image')
a_y = cv2.getTrackbarPos('R_y','image')
a_z = cv2.getTrackbarPos('R_z','image')
if key == ord("n"):
move_bounding_box(save=True)
file_counter = len(sorted(os.listdir("./labels")))
if (len(files) <= file_counter):
break
else:
image = cv2.imread(os.path.join(args["imagesFolder"], files[file_counter]))
clone = image.copy()
move_bounding_box()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
# close all open windows
cv2.destroyAllWindows()
| 30.782946
| 105
| 0.699824
|
235c36889a0952a2232065573e4d5bf176a595b0
| 3,499
|
py
|
Python
|
Testing/cryptographic/crypto.py
|
taoluwork/bcai
|
6097ee88e11f99824a251f2c2a65b2656b75a5de
|
[
"Apache-2.0"
] | 1
|
2020-03-05T20:11:31.000Z
|
2020-03-05T20:11:31.000Z
|
Testing/cryptographic/crypto.py
|
taoluwork/bcai
|
6097ee88e11f99824a251f2c2a65b2656b75a5de
|
[
"Apache-2.0"
] | 7
|
2020-04-06T01:38:09.000Z
|
2022-01-22T11:47:18.000Z
|
Testing/cryptographic/crypto.py
|
taoluwork/bcai
|
6097ee88e11f99824a251f2c2a65b2656b75a5de
|
[
"Apache-2.0"
] | 3
|
2019-08-14T14:17:22.000Z
|
2019-09-19T19:04:40.000Z
|
#import cryptography
import getpass
import sys
import cryptography.hazmat.backends as backends
import cryptography.hazmat.primitives.asymmetric.rsa as rsa
import cryptography.hazmat.primitives.serialization as serial
import cryptography.hazmat.primitives.hashes as hashes
import cryptography.hazmat.primitives as primitives
import cryptography.hazmat.primitives.asymmetric.padding as padding
class crypto:
def generate(self, passW):
keyPair = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend= backends.default_backend()
)
privatePem = keyPair.private_bytes(
encoding=serial.Encoding.PEM,
format=serial.PrivateFormat.PKCS8,
encryption_algorithm=serial.BestAvailableEncryption(passW.encode())
)
publicPem = keyPair.public_key().public_bytes(
serial.Encoding.PEM,
serial.PublicFormat.SubjectPublicKeyInfo
)
privateFile = open("privKey.txt", "w")
publicFile = open("pubKey.txt", "w")
privateFile.write(privatePem.decode())
publicFile.write(publicPem.decode())
def encrypt(self, message="",mode=0):
#mode 0 = string
#mode 1 = file
publicFile = None
pubKey = None
outMess = None
publicFile = open("pubKey.txt", 'rb')
pubKey = serial.load_pem_public_key(
publicFile.read(),
backend=backends.default_backend()
)
if mode == 0:
return pubKey.encrypt(
message.encode(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
enc = pubKey.encrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(enc)
return(message)
def decrypt(self, message="",mode=0, passW=""):
#mode 0 = string
#mode 1 = file
privateFile = None
privKey = None
privateFile = open("privKey.txt", 'rb')
privKey = serial.load_pem_private_key(
privateFile.read(),
password=passW.encode(),
backend=backends.default_backend()
)
if mode == 0:
return privKey.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
if mode == 1:
dec = privKey.decrypt(
open(message, 'rb').read(),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
open(message, "wb").write(dec)
return message
password= ""
if len(sys.argv) < 2:
password = getpass.getpass("password->")
else:
password = sys.argv[1]
print(password)
cry = crypto()
cry.generate(password)
encrypted = cry.encrypt("image.zip",1)
decrypted = cry.decrypt(encrypted, 1 , password)
print(decrypted)
| 31.522523
| 75
| 0.547299
|
825812f83c270a0776563ea1eeb97d10111cb2b5
| 4,902
|
py
|
Python
|
benchmarks/benchmark.py
|
lorentzenchr/formulaic
|
678c91640475c7da4d45f9cde4dac2896e8264dd
|
[
"MIT"
] | 95
|
2020-07-06T17:39:03.000Z
|
2022-03-28T13:50:01.000Z
|
benchmarks/benchmark.py
|
lorentzenchr/formulaic
|
678c91640475c7da4d45f9cde4dac2896e8264dd
|
[
"MIT"
] | 60
|
2020-07-06T17:38:51.000Z
|
2022-03-26T21:40:41.000Z
|
benchmarks/benchmark.py
|
lorentzenchr/formulaic
|
678c91640475c7da4d45f9cde4dac2896e8264dd
|
[
"MIT"
] | 8
|
2020-07-18T23:54:46.000Z
|
2022-01-19T20:08:44.000Z
|
import functools
import os
import sys
import time
from collections import namedtuple
import formulaic
import numpy
import pandas
import patsy
from formulaic import Formula
from uncertainties import ufloat
ALL_TOOLINGS = ['patsy', 'formulaic', 'formulaic_sparse', 'R', 'R_sparse']
formulas = {
"a": ALL_TOOLINGS,
"A": ALL_TOOLINGS,
"a+A": ALL_TOOLINGS,
"a:A": ALL_TOOLINGS,
"A+B": ALL_TOOLINGS,
"a:A:B": ALL_TOOLINGS,
"A:B:C:D": ALL_TOOLINGS,
"a*b*A*B": ALL_TOOLINGS,
"a*b*c*A*B*C": ALL_TOOLINGS,
}
# Utility libraries
TimedResult = namedtuple("TimedResult", ['times', 'mean', 'stderr'])
def timed_func(func, min_repetitions=7, max_time=20, get_time=None):
@functools.wraps(func)
def wrapper(*args, **kwargs):
times = []
start = time.time()
while len(times) < min_repetitions and time.time() - start < max_time:
f_start = time.time()
result = func(*args, **kwargs)
f_time = time.time() - f_start
if get_time:
f_time = get_time(result, f_time)
del result
times.append(f_time)
return TimedResult(times, numpy.mean(times), numpy.std(times))
return wrapper
# Generate data for benchmarks
s = 1000000
df = pandas.DataFrame({
'A': ['a', 'b', 'c'] * s,
'B': ['d', 'e', 'f'] * s,
'C': ['g', 'h', 'i'] * s,
'D': ['j', 'k', 'l'] * s,
'a': numpy.random.randn(3 * s),
'b': numpy.random.randn(3 * s),
'c': numpy.random.randn(3 * s),
'd': numpy.random.randn(3 * s),
})
df.head()
@timed_func
def time_patsy(formula):
return patsy.dmatrix(formula, df)
@timed_func
def time_formulaic(formula):
return Formula(formula).get_model_matrix(df)
@timed_func
def time_formulaic_sparse(formula):
return Formula(formula).get_model_matrix(df, sparse=True)
toolings = {
'patsy': time_patsy,
'formulaic': time_formulaic,
'formulaic_sparse': time_formulaic_sparse,
}
try:
import rpy2
import rpy2.robjects as robjs
R_VERSION = rpy2.situation.r_version_from_subprocess()
R_MATRIX_VERSION = '.'.join(str(i) for i in robjs.r("packageVersion('Matrix')")[0])
robjs.r("""
library(Matrix)
library(glue)
s <- 1000000
df <- data.frame(
"A"=rep(c('a', 'b', 'c'), s),
"B"=rep(c('d', 'e', 'f'), s),
"C"=rep(c('g', 'h', 'i'), s),
"D"=rep(c('j', 'k', 'l'), s),
"a"=rnorm(3*s),
"b"=rnorm(3*s),
"c"=rnorm(3*s),
"d"=rnorm(3*s)
)
""")
time_R = timed_func(robjs.r("""
function (formula) {
start_time <- Sys.time()
model.matrix(as.formula(glue("~ ", formula)), df)
end_time <- Sys.time()
difftime(end_time, start_time, units="secs")
}
"""), get_time=lambda result, time: result[0])
time_R_sparse = timed_func(robjs.r("""
function (formula) {
start_time <- Sys.time()
sparse.model.matrix(as.formula(glue("~ ", formula)), df)
end_time <- Sys.time()
difftime(end_time, start_time, units="secs")
}
"""), get_time=lambda result, time: result[0])
toolings.update({
'R': time_R,
'R_sparse': time_R_sparse,
})
except Exception as e:
R_VERSION = None
print(f"Could not set up R benchmarking functions. Error was: {repr(e)}.")
if __name__ == "__main__":
# Print package versions
PYTHON_VERSION = sys.version.split('\n')[0].strip()
print(
"version information\n"
f" python: {PYTHON_VERSION}\n"
f" formulaic: {formulaic.__version__}\n"
f" patsy: {patsy.__version__}\n"
f" pandas: {pandas.__version__}"
)
if R_VERSION:
print(
f" R: {R_VERSION}\n"
f" model.matrix: (inbuilt into R)\n"
f" Matrix (sparse.model.matrix): {R_MATRIX_VERSION}\n"
)
# Perform benchmarks
results = {}
for formula, config in formulas.items():
print(formula)
results[formula] = {}
for tooling, time_func in toolings.items():
result = time_func(formula) if tooling in config else TimedResult(None, numpy.nan, numpy.nan)
results[formula][tooling] = result
if not numpy.isnan(result.mean):
print(f" {tooling}: {ufloat(result.mean, result.stderr):.2uP} (mean of {len(result.times)})")
# Dump results into a csv file
rows = []
for formula, tooling_results in results.items():
for tooling, times in tooling_results.items():
rows.append({'formula': formula, 'tooling': tooling, 'mean': times.mean, 'stderr': times.stderr})
data = pandas.DataFrame(rows)
data.to_csv(os.path.join(os.path.dirname(__file__), 'benchmarks.csv'))
| 27.385475
| 112
| 0.572827
|
71d72cb0ec420585c619955ed4fc81dde2264a8f
| 929
|
py
|
Python
|
sandbox/bloom_count.py
|
maarten1983/khmer
|
417aaa57f0659685c01887a6910de1c08d0a73e5
|
[
"BSD-3-Clause"
] | 1
|
2019-11-02T15:12:44.000Z
|
2019-11-02T15:12:44.000Z
|
sandbox/bloom_count.py
|
ibest/khmer
|
fbc307abd64363b329745709846d77444ce0c025
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/bloom_count.py
|
ibest/khmer
|
fbc307abd64363b329745709846d77444ce0c025
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
# using bloom filter to count unique kmers
import khmer
import sys
import screed
from screed.fasta import fasta_iter
filename = sys.argv[1]
K = int(sys.argv[2]) # size of kmer
HT_SIZE = int(sys.argv[3]) # size of hashtable
N_HT = int(sys.argv[4]) # number of hashtables
ht = khmer.new_hashbits(K, HT_SIZE, N_HT)
n_unique = 0
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0, seq_len + 1 - K):
kmer = sequence[n:n + K]
if (not ht.get(kmer)):
n_unique += 1
ht.count(kmer)
print n_unique
print ht.n_occupied()
print ht.n_unique_kmers()
| 26.542857
| 74
| 0.684607
|
5ecb73f618065fa2e1b97e07cbdc6ad4a84724e4
| 214
|
py
|
Python
|
src/sysinfo/main.py
|
derekwu90/sysinfo
|
5cb64fa64b564cda78813da80e76da7a52288b88
|
[
"MIT"
] | null | null | null |
src/sysinfo/main.py
|
derekwu90/sysinfo
|
5cb64fa64b564cda78813da80e76da7a52288b88
|
[
"MIT"
] | null | null | null |
src/sysinfo/main.py
|
derekwu90/sysinfo
|
5cb64fa64b564cda78813da80e76da7a52288b88
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import platform
def get_platform_info():
return platform.platform()
def main():
print(platform.platform())
return
if __name__ == '__main__':
main()
| 14.266667
| 30
| 0.640187
|
99e39de7c285ccfdefd4ae3aa0d997436c4f7cb0
| 1,218
|
py
|
Python
|
app/utils/s3_helpers.py
|
alex-pezzati/sonic-fog
|
ccbfeb0b9d4c1ed9688eb2e9b79090a6855d7310
|
[
"PostgreSQL",
"Unlicense"
] | 3
|
2021-03-17T01:49:42.000Z
|
2021-03-23T03:05:23.000Z
|
app/utils/s3_helpers.py
|
alex-pezzati/Sound_Cloud
|
ccbfeb0b9d4c1ed9688eb2e9b79090a6855d7310
|
[
"PostgreSQL",
"Unlicense"
] | 40
|
2021-03-18T21:47:21.000Z
|
2021-03-26T16:45:25.000Z
|
app/utils/s3_helpers.py
|
alex-pezzati/sonic-fog
|
ccbfeb0b9d4c1ed9688eb2e9b79090a6855d7310
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
import boto3
import botocore
import os
import uuid
BUCKET_NAME = os.environ.get("S3_BUCKET")
S3_LOCATION = f"https://{BUCKET_NAME}.s3.amazonaws.com/"
ALLOWED_EXTENSIONS = {"pdf", "png", "jpg", "jpeg", "gif"}
s3 = boto3.client(
"s3",
aws_access_key_id=os.environ.get("S3_KEY"),
aws_secret_access_key=os.environ.get("S3_SECRET")
)
# checks for certain file extensions before proceeding
def allowed_file(filename):
return "." in filename and \
filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
# converts filename to a unique filename to prevent collisions
def get_unique_filename(filename):
ext = filename.rsplit(".", 1)[1].lower()
unique_filename = uuid.uuid4().hex
return f"{unique_filename}.{ext}"
# actual upload of file to aws s3
def upload_file_to_s3(file, acl="public-read"):
try:
s3.upload_fileobj(
file,
BUCKET_NAME,
file.filename,
ExtraArgs={
"ACL": acl,
"ContentType": file.content_type
}
)
except Exception as e:
# in case the our s3 upload fails
return {"errors": str(e)}
return {"url": f"{S3_LOCATION}{file.filename}"}
| 26.478261
| 67
| 0.633826
|
a1e55c5253519961a0da8478da13ea28526129e6
| 4,918
|
py
|
Python
|
GPL/traffic_profiles/trex/trex-stl-2n3n-ethip4-ip4src254-1c4n.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
GPL/traffic_profiles/trex/trex-stl-2n3n-ethip4-ip4src254-1c4n.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
GPL/traffic_profiles/trex/trex-stl-2n3n-ethip4-ip4src254-1c4n.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stream profile for T-rex traffic generator.
Stream profile:
- Two streams sent in directions 0 --> 1 and 1 --> 0 at the same time.
- Packet: ETH / IP /
- Direction 0 --> 1:
- Destination MAC address: 52:54:00:00:nf_id:01
- Source IP address range: 10.10.10.1 - 10.10.10.254
- Destination IP address range: 20.20.20.1
- Direction 1 --> 0:
- Destination MAC address: 52:54:00:00:nf_id:02
- Source IP address range: 20.20.20.1 - 20.20.20.254
- Destination IP address range: 10.10.10.1
"""
from trex.stl.api import *
from profile_trex_stateless_base_class import TrafficStreamsBaseClass
class TrafficStreams(TrafficStreamsBaseClass):
"""Stream profile."""
def __init__(self):
"""Initialization and setting of streams' parameters."""
super(TrafficStreamsBaseClass, self).__init__()
# Service density parameters.
self.nf_chains = 1
self.nf_nodes = 4
# MACs used in packet headers.
self.p1_dst_start_mac = u"52:54:00:00:00:01"
self.p2_dst_start_mac = u"52:54:00:00:00:02"
# IPs used in packet headers.
self.p1_src_start_ip = u"10.10.10.1"
self.p1_src_end_ip = u"10.10.10.254"
self.p1_dst_start_ip = u"20.20.20.1"
self.p2_src_start_ip = u"20.20.20.1"
self.p2_src_end_ip = u"20.20.20.254"
self.p2_dst_start_ip = u"10.10.10.1"
def define_packets(self):
"""Defines the packets to be sent from the traffic generator.
Packet definition: | ETH | IP |
:returns: Packets to be sent from the traffic generator.
:rtype: tuple
"""
# Direction 0 --> 1
base_pkt_a = (
Ether(
dst=self.p1_dst_start_mac
) /
IP(
src=self.p1_src_start_ip,
dst=self.p1_dst_start_ip,
proto=61
)
)
# Direction 1 --> 0
base_pkt_b = (
Ether(
dst=self.p2_dst_start_mac
) /
IP(
src=self.p2_src_start_ip,
dst=self.p2_dst_start_ip,
proto=61
)
)
# Direction 0 --> 1
vm1 = STLScVmRaw(
[
STLVmFlowVar(
name=u"mac_dst",
min_value=1,
max_value=self.nf_chains*self.nf_nodes,
size=1,
step=self.nf_nodes,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"mac_dst",
pkt_offset=4
),
STLVmFlowVar(
name=u"src",
min_value=self.p1_src_start_ip,
max_value=self.p1_src_end_ip,
size=4,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"src",
pkt_offset=u"IP.src"
),
STLVmFixIpv4(
offset=u"IP"
)
]
)
# Direction 1 --> 0
vm2 = STLScVmRaw(
[
STLVmFlowVar(
name=u"mac_dst",
min_value=self.nf_nodes,
max_value=self.nf_chains*self.nf_nodes,
size=1,
step=self.nf_nodes,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"mac_dst",
pkt_offset=4
),
STLVmFlowVar(
name=u"src",
min_value=self.p2_src_start_ip,
max_value=self.p2_src_end_ip,
size=4,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"src",
pkt_offset=u"IP.src"
),
STLVmFixIpv4(
offset=u"IP"
)
]
)
return base_pkt_a, base_pkt_b, vm1, vm2
def register():
"""Register this traffic profile to T-rex.
Do not change this function.
:return: Traffic streams.
:rtype: Object
"""
return TrafficStreams()
| 29.806061
| 74
| 0.502847
|
808f77d8d0754f29b2aab8b1efd53e0fbe3226ba
| 3,548
|
py
|
Python
|
cosmotools/gansystem.py
|
nperraud/3DcosmoGAN
|
0d566bca495f764c1287c5e216e20601fa975cda
|
[
"MIT"
] | 10
|
2019-08-17T10:07:23.000Z
|
2022-03-14T16:36:58.000Z
|
cosmotools/gansystem.py
|
nperraud/3DcosmoGAN
|
0d566bca495f764c1287c5e216e20601fa975cda
|
[
"MIT"
] | null | null | null |
cosmotools/gansystem.py
|
nperraud/3DcosmoGAN
|
0d566bca495f764c1287c5e216e20601fa975cda
|
[
"MIT"
] | 1
|
2019-08-17T21:17:44.000Z
|
2019-08-17T21:17:44.000Z
|
import numpy as np
import tensorflow as tf
from gantools.gansystem import UpscaleGANsystem
from .model import cosmo_metric_list
import itertools
class CosmoUpscaleGANsystem(UpscaleGANsystem):
def default_params(self):
# Global parameters
# -----------------
d_param = super().default_params()
d_param['Nstats_cubes'] = 10
return d_param
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.params['Nstats_cubes']:
self._cosmo_metric_list = cosmo_metric_list()
for met in self._cosmo_metric_list:
met.add_summary(collections="cubes")
self._cubes_summaries = tf.summary.merge(tf.get_collection("cubes"))
def train(self, dataset, **kwargs):
if self.params['Nstats_cubes']:
# Only implented for the 3dimentional case...
assert(self.net.params['generator']['data_size']>= 2)
assert(len(dataset._X)>=self.params['Nstats_cubes'])
self.summary_dataset_cubes = itertools.cycle(dataset.iter_cubes(self.params['Nstats_cubes'], downscale=self.net.params['upscaling']))
offset = next(self.summary_dataset_cubes).shape[1]//8
self.offset = offset
self.preprocess_summaries(dataset._X[:,offset:,offset:,offset:], rerun=False)
self._global_score = np.inf
super().train(dataset, **kwargs)
def preprocess_summaries(self, X_real, **kwargs):
if self.net.params['cosmology']['backward_map']:
X_real = self.params['net']['cosmology']['backward_map'](X_real)
for met in self._cosmo_metric_list:
met.preprocess(X_real, **kwargs)
def _train_log(self, feed_dict):
super()._train_log(feed_dict)
if self.params['Nstats_cubes']:
X_real = next(self.summary_dataset_cubes)
if self.net.params['upscaling']:
small = X_real
small = np.expand_dims(small, axis=self.net.params['generator']['data_size']+1)
else:
small = None
X_fake = self.upscale_image(N=self.params['Nstats_cubes'],
small=small,
resolution=X_real.shape[1],
sess=self._sess)
offset = self.offset
feed_dict = self.compute_summaries(X_fake[:,offset:,offset:,offset:], feed_dict)
# m = self._cosmo_metric_list[0]
# print(m.last_metric)
# print(m._metrics[0].last_metric)
# print(m._metrics[1].last_metric)
# print(m._metrics[2].last_metric)
new_val = self._cosmo_metric_list[0].last_metric
if new_val <= self._global_score:
self._global_score = new_val
self._save(self._counter)
print('New lower score at {}'.format(new_val))
summary = self._sess.run(self._cubes_summaries, feed_dict=feed_dict)
self._summary_writer.add_summary(summary, self._counter)
def compute_summaries(self, X_fake, feed_dict={}):
if self.net.params['cosmology']['backward_map']:
# if X_real is not None:
# X_real = self.params['cosmology']['backward_map'](X_real)
X_fake = self.net.params['cosmology']['backward_map'](X_fake)
for met in self._cosmo_metric_list:
feed_dict = met.compute_summary(X_fake, None, feed_dict)
return feed_dict
| 42.238095
| 145
| 0.600056
|
e62d414a7c48b94291153483ec8ca105fa11c271
| 19,015
|
py
|
Python
|
python/test_perforce.py
|
inflexiongames/perforce-buildkite-plugin
|
8778467616c24e755b95b89bf22ba0c3088cc793
|
[
"MIT"
] | null | null | null |
python/test_perforce.py
|
inflexiongames/perforce-buildkite-plugin
|
8778467616c24e755b95b89bf22ba0c3088cc793
|
[
"MIT"
] | null | null | null |
python/test_perforce.py
|
inflexiongames/perforce-buildkite-plugin
|
8778467616c24e755b95b89bf22ba0c3088cc793
|
[
"MIT"
] | null | null | null |
"""
Test perforce module for managing workspaces
"""
from contextlib import closing, contextmanager
from functools import partial
from threading import Thread
import os
import shutil
import socket
import subprocess
import tempfile
import time
import zipfile
import pytest
from perforce import P4Repo
def find_free_port():
"""Find an open port that we could run a perforce server on"""
# pylint: disable=no-member
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock.getsockname()[1]
@contextmanager
def run_p4d(p4port, from_zip=None):
"""Start a perforce server with the given hostname:port.
Optionally unzip server state from a file
"""
prefix = 'bk-p4d-test-'
parent = tempfile.gettempdir()
for item in os.listdir(parent):
if item.startswith(prefix):
try:
shutil.rmtree(os.path.join(parent, item))
except Exception: # pylint: disable=broad-except
print("Failed to remove", item)
tmpdir = tempfile.mkdtemp(prefix=prefix)
if from_zip:
zip_path = os.path.join(os.path.dirname(__file__), 'fixture', from_zip)
with zipfile.ZipFile(zip_path) as archive:
archive.extractall(tmpdir)
p4ssldir = os.path.join(tmpdir, 'ssl')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'fixture', 'insecure-ssl'), p4ssldir)
# Like a beautifully crafted work of art, p4d fails to start if permissions on the secrets are too open.
# https://www.perforce.com/manuals/v18.1/cmdref/Content/CmdRef/P4SSLDIR.html
os.chmod(p4ssldir, 0o700)
os.chmod(os.path.join(p4ssldir, 'privatekey.txt'), 0o600)
os.chmod(os.path.join(p4ssldir, 'certificate.txt'), 0o600)
os.environ['P4SSLDIR'] = p4ssldir
try:
p4d = subprocess.Popen(['p4d', '-r', tmpdir, '-p', p4port])
yield p4d
finally:
p4d.terminate()
@pytest.fixture(scope='package')
def server():
"""Start a p4 server in the background and return the address"""
port = find_free_port()
p4port = 'ssl:localhost:%s' % port
os.environ['P4PORT'] = p4port
with run_p4d(p4port, from_zip='server.zip'):
time.sleep(1)
yield p4port
def store_server(repo, to_zip):
"""Zip up a server to use as a unit test fixture"""
serverRoot = repo.info()['serverRoot']
zip_path = os.path.join(os.path.dirname(__file__), 'fixture', to_zip)
with zipfile.ZipFile(zip_path, 'w') as archive:
for root, _, files in os.walk(serverRoot):
for filename in files:
abs_path = os.path.join(root, filename)
archive.write(abs_path, os.path.relpath(abs_path, serverRoot))
def test_server_fixture(capsys, server):
"""Check that tests can start and connect to a local perforce server"""
with capsys.disabled():
print('port:', server, 'user: carl')
repo = P4Repo()
# To change the fixture server, uncomment the line below with 'store_server' and put a breakpoint on it
# Run unit tests in the debugger and hit the breakpoint
# Log in using details printed to stdout (port/user) via p4v or the command line
# Make changes to the p4 server
# Continue execution so that the 'store_server' line executes
# Replace server.zip with new_server.zip
# Update validation code below to document the new server contents
# store_server(repo, 'new_server.zip')
# Validate contents of server fixture @HEAD
depotfiles = [info['depotFile'] for info in repo.perforce.run_files('//...')]
depotfile_to_content = {depotfile: repo.perforce.run_print(depotfile)[1] for depotfile in depotfiles}
assert depotfile_to_content == {
"//depot/file.txt": "Hello World\n",
"//stream-depot/main/file.txt": "Hello Stream World\n",
"//stream-depot/main/file_2.txt": "file_2\n",
"//stream-depot/dev/file.txt": "Hello Stream World (dev)\n",
}
# Check submitted changes
submitted_changes = repo.perforce.run_changes('-s', 'submitted')
submitted_changeinfo = {change["change"]: repo.perforce.run_describe(change["change"])[0] for change in submitted_changes}
# Filter info to only contain relevant keys for submitted changes
submitted_changeinfo = {
change: {key: info.get(key)
for key in ['depotFile', 'desc', 'action']}
for change, info in submitted_changeinfo.items()
}
assert submitted_changeinfo == {
'1' :{
'action': ['add'],
'depotFile': ['//depot/file.txt'],
'desc': 'Initial Commit'
},
'2' :{
'action': ['add'],
'depotFile': ['//stream-depot/main/file.txt'],
'desc': 'Initial Commit to Stream\n'
},
'6' :{
'action': ['edit'],
'depotFile': ['//depot/file.txt'],
'desc': 'modify //depot/file.txt\n'
},
'7': {
'action': ['branch'],
'depotFile': ['//stream-depot/dev/file.txt'],
'desc': 'Copy files from //stream-depot/main to //stream-depot/dev\n'
},
'8': {
'action': ['edit'],
'depotFile': ['//stream-depot/dev/file.txt'],
'desc': 'Update contents of //stream-depot/dev/file.txt\n'
},
'9': {
'action': ['add'],
'depotFile': ['//stream-depot/main/file_2.txt'],
'desc': 'file_2.txt - exists in main but not dev\n'
}
}
# Check shelved changes
shelved_changes = repo.perforce.run_changes('-s', 'pending')
shelved_changeinfo = {change["change"]: repo.perforce.run_describe('-S', change["change"])[0] for change in shelved_changes}
# Filter info to only contain relevant keys for submitted changes
shelved_changeinfo = {
change: {key: info.get(key)
for key in ['depotFile', 'desc', 'action']}
for change, info in shelved_changeinfo.items()
}
assert shelved_changeinfo == {
'3' :{
'action': ['edit'],
'depotFile': ['//depot/file.txt'],
'desc': 'Modify file in shelved change\n',
# Change content from 'Hello World\n' to 'Goodbye World\n'
},
'4' :{
'action': ['delete'],
'depotFile': ['//depot/file.txt'],
'desc': 'Delete file in shelved change\n',
},
'5' :{
'action': ['add'],
'depotFile': ['//depot/newfile.txt'],
'desc': 'Add file in shelved change\n',
},
}
labels = repo.perforce.run_labels()
# Filter info to only contain relevant keys
labelinfo = {
label.get('label'): {key: label.get(key)
for key in ['Revision']
}
for label in labels
}
assert labelinfo == {
'my-label': {'Revision': '@2'}
}
def test_head(server, tmpdir):
"""Test resolve of HEAD changelist"""
# workspace with no changes in view defaults to global view
repo = P4Repo(root=tmpdir, view="//depot/empty_dir/... empty_dir/...")
assert repo.head() == "@9", "Unexpected global HEAD revision"
repo = P4Repo(root=tmpdir, stream='//stream-depot/dev')
assert repo.head() == "@8", "Unexpected HEAD revision for stream"
repo = P4Repo(root=tmpdir, stream='//stream-depot/idontexist')
with pytest.raises(Exception, match=r"Stream '//stream-depot/idontexist' doesn't exist."):
repo.head()
assert repo.head_at_revision("@my-label") == "2", "Unexpected HEAD revision for label"
def test_checkout(server, tmpdir):
"""Test normal flow of checking out files"""
repo = P4Repo(root=tmpdir)
assert os.listdir(tmpdir) == [], "Workspace should be empty"
repo.sync()
assert sorted(os.listdir(tmpdir)) == sorted([
"file.txt", "p4config"]), "Workspace sync not as expected"
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello World\n", "Unexpected content in workspace file"
repo.sync(revision='@0')
assert "file.txt" not in os.listdir(tmpdir), "Workspace file wasn't de-synced"
# Validate p4config
with open(os.path.join(tmpdir, "p4config")) as content:
assert "P4PORT=%s\n" % repo.perforce.port in content.readlines(), "Unexpected p4config content"
def test_checkout_partial_path(server, tmpdir):
"""Test checking out a subset of view with one path"""
repo = P4Repo(root=tmpdir, sync=['//depot/file.txt'])
repo.sync()
assert 'file.txt' in os.listdir(tmpdir)
def test_checkout_partial_dir(server, tmpdir):
"""Test checking out a subset of view with one directory"""
repo = P4Repo(root=tmpdir, sync=['//depot/...'])
repo.sync()
assert 'file.txt' in os.listdir(tmpdir)
def test_checkout_partial_multiple(server, tmpdir):
"""Test checking out a subset of view with multiple paths"""
repo = P4Repo(root=tmpdir, sync=['//depot/fake-dir/...', '//depot/file.txt'])
repo.sync()
assert 'file.txt' in os.listdir(tmpdir)
def test_checkout_stream(server, tmpdir):
"""Test checking out a stream depot"""
repo = P4Repo(root=tmpdir, stream='//stream-depot/main')
assert os.listdir(tmpdir) == [], "Workspace should be empty"
repo.sync()
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello Stream World\n", "Unexpected content in workspace file"
def test_checkout_label(server, tmpdir):
"""Test checking out at a specific label"""
repo = P4Repo(root=tmpdir)
with pytest.raises(Exception, match=r'Invalid changelist/client/label/date'):
repo.sync(revision="@nonexistent-label")
repo.sync(revision="@my-label")
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello World\n", "Unexpected content in workspace file"
def test_readonly_client(server, tmpdir):
"""Test creation of a readonly client"""
repo = P4Repo(root=tmpdir, client_type='readonly')
repo.sync()
assert "file.txt" in os.listdir(tmpdir), "Workspace file was not synced"
def test_partitioned_client(server, tmpdir):
"""Test creation of a partitioned client"""
repo = P4Repo(root=tmpdir, client_type='partitioned')
repo.sync()
assert "file.txt" in os.listdir(tmpdir), "Workspace file was not synced"
def test_modify_client_type(server, tmpdir):
"""Test modifying a clients type"""
repo = P4Repo(root=tmpdir, client_type='writeable')
repo.sync()
with pytest.raises(Exception, match=r'Client storage type cannot be changed after client is created'):
repo = P4Repo(root=tmpdir, client_type='readonly')
repo.sync()
def test_workspace_recovery(server, tmpdir):
"""Test that we can detect and recover from various workspace snafus"""
repo = P4Repo(
root=tmpdir,
# allow unit test to delete otherwise readonly files from workspace
client_options='allwrite'
)
# clobber writeable file
# partially synced writeable files may be left in the workspace if a machine was shutdown mid-sync
with open(os.path.join(tmpdir, "file.txt"), 'w') as depotfile:
depotfile.write("Overwrite this file")
repo.sync() # By default, would raise 'cannot clobber writable file'
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello World\n", "Unexpected content in workspace file"
# p4 clean
os.remove(os.path.join(tmpdir, "file.txt"))
open(os.path.join(tmpdir, "added.txt"), 'a').close()
repo.clean()
assert sorted(os.listdir(tmpdir)) == sorted([
"file.txt", "p4config"]), "Failed to restore workspace file with repo.clean()"
os.remove(os.path.join(tmpdir, "file.txt"))
os.remove(os.path.join(tmpdir, "p4config"))
repo = P4Repo(root=tmpdir) # Open a fresh tmpdir, as if this was a different job
repo.sync() # Normally: "You already have file.txt", but since p4config is missing it will restore the workspace
assert sorted(os.listdir(tmpdir)) == sorted([
"file.txt", "p4config"]), "Failed to restore corrupt workspace due to missing p4config"
def test_p4print_unshelve(server, tmpdir):
"""Test unshelving a pending changelist by p4printing content into a file"""
repo = P4Repo(root=tmpdir)
repo.sync()
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello World\n", "Unexpected content in workspace file"
repo.p4print_unshelve('3') # Modify a file
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Goodbye World\n", "Unexpected content in workspace file"
repo.p4print_unshelve('4') # Delete a file
assert not os.path.exists(os.path.join(tmpdir, "file.txt"))
repo.p4print_unshelve('5') # Add a file
assert os.path.exists(os.path.join(tmpdir, "newfile.txt"))
with pytest.raises(Exception, match=r'Changelist 999 does not contain any shelved files.'):
repo.p4print_unshelve('999')
assert len(repo._read_patched()) == 2 # changes to file.txt and newfile.txt
# Unshelved changes are removed in following syncs
repo.sync()
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello World\n", "Unexpected content in workspace file"
assert not os.path.exists(os.path.join(tmpdir, "newfile.txt")), "File unshelved for add was not deleted"
# Shelved changes containing files not selected for sync are skipped
repo = P4Repo(root=tmpdir, sync=['//depot/fake-dir/...'])
repo.sync()
repo.p4print_unshelve('3') # Modify file.txt
assert not os.path.exists(os.path.join(tmpdir, "file.txt"))
# Shelved changes containing files not mapped into this workspace do not throw an exception
repo = P4Repo(root=tmpdir, stream='//stream-depot/main')
repo.p4print_unshelve('3') # Modify a file
def copytree(src, dst):
"""Shim to get around shutil.copytree requiring root dir to not exist"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
def test_client_migration(server, tmpdir):
"""Test re-use of workspace data when moved to another host"""
repo = P4Repo(root=tmpdir)
assert os.listdir(tmpdir) == [], "Workspace should be empty"
synced = repo.sync()
assert len(synced) > 0, "Didn't sync any files"
with tempfile.TemporaryDirectory(prefix="bk-p4-test-") as second_client:
copytree(tmpdir, second_client)
# Client names include path on disk, so this creates a new unique client
repo = P4Repo(root=second_client)
synced = repo.sync() # Flushes to match previous client, since p4config is there on disk
assert synced == [], "Should not have synced any files in second client"
def test_stream_switching(server, tmpdir):
"""Test stream-switching within the same depot"""
repo = P4Repo(root=tmpdir, stream='//stream-depot/main')
synced = repo.sync()
assert len(synced) > 0, "Didn't sync any files"
assert set(os.listdir(tmpdir)) == set([
"file.txt", "file_2.txt", "p4config"])
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello Stream World\n", "Unexpected content in workspace file"
# Re-use the same checkout directory, but switch streams
repo = P4Repo(root=tmpdir, stream='//stream-depot/dev')
repo.sync()
assert len(synced) > 0, "Didn't sync any files"
assert set(os.listdir(tmpdir)) == set([
"file.txt", "p4config"]) # file_2.txt was de-synced
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello Stream World (dev)\n", "Unexpected content in workspace file"
def test_stream_switching_migration(server, tmpdir):
"""Test stream-switching and client migration simultaneously"""
repo = P4Repo(root=tmpdir, stream='//stream-depot/main')
synced = repo.sync()
assert len(synced) > 0, "Didn't sync any files"
assert set(os.listdir(tmpdir)) == set([
"file.txt", "file_2.txt", "p4config"])
with open(os.path.join(tmpdir, "file.txt")) as content:
assert content.read() == "Hello Stream World\n", "Unexpected content in workspace file"
with tempfile.TemporaryDirectory(prefix="bk-p4-test-") as second_client:
copytree(tmpdir, second_client)
# Client names include path on disk, so this creates a new unique client
# Re-use the same checkout directory and switch streams at the same time
repo = P4Repo(root=second_client, stream='//stream-depot/dev')
repo.sync()
assert len(synced) > 0, "Didn't sync any files"
assert set(os.listdir(second_client)) == set([
"file.txt", "p4config"]) # file_2.txt was de-synced
with open(os.path.join(second_client, "file.txt")) as content:
assert content.read() == "Hello Stream World (dev)\n", "Unexpected content in workspace file"
# fingerprint here matches to the cert in the test fixture directory, and you can check that with
# P4SSLDIR=$(pwd)/python/fixture/insecure-ssl p4d -Gf
__LEGIT_P4_FINGERPRINT__ = '7A:10:F6:00:95:87:5B:2E:D4:33:AB:44:42:05:85:94:1C:93:2E:A2'
def test_fingerprint_good(server, tmpdir):
"""Test supplying the correct fingerprint"""
os.environ['P4TRUST'] = os.path.join(tmpdir, 'trust.txt')
repo = P4Repo(root=tmpdir, fingerprint=__LEGIT_P4_FINGERPRINT__)
synced = repo.sync()
assert len(synced) > 0, "Didn't sync any files"
def test_fingerprint_bad(server, tmpdir):
"""Test supplying an incorrect fingerprint"""
os.environ['P4TRUST'] = os.path.join(tmpdir, 'trust.txt')
repo = P4Repo(root=tmpdir, fingerprint='FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF')
with pytest.raises(Exception, match=r"The authenticity of '.+' can't be established"):
repo.sync()
def test_fingerprint_changed(server, tmpdir):
"""Test updating a fingerprint"""
os.environ['P4TRUST'] = os.path.join(tmpdir, 'trust.txt')
repo = P4Repo(root=tmpdir, fingerprint='FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF')
with pytest.raises(Exception, match=r"The authenticity of '.*' can't be established"):
repo.sync()
repo = P4Repo(root=tmpdir, fingerprint=__LEGIT_P4_FINGERPRINT__)
synced = repo.sync()
assert len(synced) > 0, "Didn't sync any files"
# def test_live_server():
# """Reproduce production issues quickly by writing tests which run against a real server"""
# os.environ["P4USER"] = "carljohnson"
# os.environ["P4PORT"] = "ssl:live-server:1666"
# root = "/Users/carl/p4-test-client"
# repo = P4Repo(root=root)
# repo.p4print_unshelve("28859")
| 41.608315
| 128
| 0.649435
|
2d4d577d301e34486329580b18e4862431e81424
| 1,680
|
py
|
Python
|
hello.py
|
niechao136/python_add_json_to_execl
|
c2191e02aa1102f3b1515964de4f12698b2e6bbc
|
[
"MIT"
] | null | null | null |
hello.py
|
niechao136/python_add_json_to_execl
|
c2191e02aa1102f3b1515964de4f12698b2e6bbc
|
[
"MIT"
] | null | null | null |
hello.py
|
niechao136/python_add_json_to_execl
|
c2191e02aa1102f3b1515964de4f12698b2e6bbc
|
[
"MIT"
] | null | null | null |
import json
import xlwt
cn_value = []
tw_value = []
en_value = []
cn_key = {}
tw_key = {}
en_key = {}
def find_str(_json, n, s, value, key):
if isinstance(_json, dict):
for index in _json:
if isinstance(_json[index], str):
key[s + "." + index] = len(value)
value.append(_json[index])
else:
find_str(_json[index], n + 1, s + "." + index, value, key)
elif isinstance(_json, list):
for index in range(len(_json)):
if isinstance(_json[index], str):
key[s + "[" + str(index) + "]"] = len(value)
value.append(_json[index])
else:
find_str(_json[index], n + 1, s + "[" + str(index) + "]", value, key)
else:
key[s] = len(value)
value.append(_json)
def read_file(file):
with open(file, 'r', encoding='utf8') as fr:
data = json.load(fr) # 用json中的load方法,将json串转换成字典
return data
tw = read_file('./zh-tw.json')
cn = read_file('./zh-cn.json')
en = read_file('./en-us.json')
find_str(cn, 0, "lang", cn_value, cn_key)
find_str(tw, 0, "lang", tw_value, tw_key)
find_str(en, 0, "lang", en_value, en_key)
book = xlwt.Workbook() # 创建一个excel对象
sheet = book.add_sheet('Sheet1', cell_overwrite_ok=True) # 添加一个sheet页
title = ["string_id", "zh-CN", "zh-TW", "en-US"]
for i in range(len(title)):
sheet.write(0, i, title[i]) # 将title数组中的字段写入到0行i列中
for i in cn_key:
sheet.write(cn_key[i] + 1, 0, i)
sheet.write(cn_key[i] + 1, 1, cn_value[cn_key[i]])
sheet.write(cn_key[i] + 1, 2, tw_value[tw_key[i]])
sheet.write(cn_key[i] + 1, 3, en_value[en_key[i]])
book.save('demo.xls')
| 30.545455
| 85
| 0.572024
|
291c5f56a54c8ab792453de4503c89f52c02fd25
| 2,038
|
py
|
Python
|
pydrake_implementation/test/allegro_ik_test.py
|
JasonJZLiu/Kinematic-Tree-IK-Solver
|
5d434dea76542b4a4e94bb92139997317f727747
|
[
"MIT"
] | 3
|
2021-05-20T22:14:33.000Z
|
2022-03-16T02:07:04.000Z
|
pydrake_implementation/test/allegro_ik_test.py
|
JasonJZLiu/Kinematic-Tree-IK-Solver
|
5d434dea76542b4a4e94bb92139997317f727747
|
[
"MIT"
] | 1
|
2022-01-10T09:47:24.000Z
|
2022-01-10T09:47:24.000Z
|
pydrake_implementation/test/allegro_ik_test.py
|
JasonJZLiu/Kinematic-Tree-IK-Solver
|
5d434dea76542b4a4e94bb92139997317f727747
|
[
"MIT"
] | null | null | null |
from ik_solver import IKSolver
import numpy as np
if __name__ == "__main__":
AllegroIKSolver = IKSolver(urdf_file = "allegro_assets/allegro_hand_description_right.urdf",
model_name = "allegro_hand_right",
root_link_name = "palm_link",
visualize = True,
position_tolerance = 0.001)
AllegroIKSolver.end_effector_frames = ['link_3_tip', 'link_7_tip', 'link_11_tip', 'link_15_tip']
desired_EE_poses = [np.array([-0.17066993, -0.32635043, 0.77864744, 0, 0, 0]),
np.array([-0.18389982, -0.28697679, 0.76127432 , 0, 0, 0]),
np.array([-0.19232582, -0.24936115, 0.73991827, 0, 0, 0]),
np.array([-0.16467229, -0.24722305, 0.77046255, 0, 0, 0])]
desired_root_pose = np.array([-0.25287761, -0.25028728, 0.80106794, 0, 0, 0])
q_initial_guess = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
import time
start = time.time()
q_result = AllegroIKSolver.get_ik_solution(desired_EE_poses, desired_root_pose, q_initial_guess, verbose = True)
end = time.time()
print("time: ", end-start)
AllegroIKSolver.update_meshcat()
# input("Wait")
# desired_EE_poses = [np.array([-0.17066993, -0.32635043, 0.77864744, 0, 0, 0]),
# np.array([-0.18389982, -0.28697679, 0.76127432 , 0, 0, 0]),
# np.array([-0.19232582, -0.24936115, 0.73991827, 0, 0, 0]),
# np.array([-0.16467229, -0.24722305, 0.77046255, 0, 0, 0])]
# desired_root_pose = np.array([-0.25287761, -0.25028728, 0.80106794, 0, 0, 0])
# q_initial_guess = [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
# q_result = AllegroIKSolver.get_ik_solution(desired_EE_poses, desired_root_pose, q_initial_guess, verbose = True)
# AllegroIKSolver.update_meshcat()
while True:
pass
#EOF
| 45.288889
| 118
| 0.560353
|
9b3b99a693c75912658409974d10929aeb908e8f
| 852
|
py
|
Python
|
tests/test_homework.py
|
Majroch/vulcan-api
|
4448eeb64d2481b5deb643bcb32f2c9ee04463f5
|
[
"MIT"
] | null | null | null |
tests/test_homework.py
|
Majroch/vulcan-api
|
4448eeb64d2481b5deb643bcb32f2c9ee04463f5
|
[
"MIT"
] | null | null | null |
tests/test_homework.py
|
Majroch/vulcan-api
|
4448eeb64d2481b5deb643bcb32f2c9ee04463f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from .utils import PARAMS_HOMEWORK_LIST
@pytest.mark.private
@pytest.mark.parametrize("date, homework_expected_list", PARAMS_HOMEWORK_LIST)
class TestHomework:
@pytest.mark.online
def test(self, client, date, homework_expected_list):
homework_list = client.get_homework(date)
for homework in homework_list:
assert homework.date == date
def test_private(self, client, date, homework_expected_list):
homework_list = client.get_homework(date)
for i, homework in enumerate(homework_list):
homework_expected = homework_expected_list[i]
assert homework.id == homework_expected["Id"]
assert homework.subject.id == homework_expected["IdPrzedmiot"]
assert homework.teacher.id == homework_expected["IdPracownik"]
| 32.769231
| 78
| 0.703052
|
32d0c29b83169cc0186b100f48e75ff82f60a373
| 684
|
py
|
Python
|
VariablesNcomments/PyVariables.py
|
IMBLACK-CMD/Pythonister
|
801a8fef7ef67fa84df2496a7d93738ced25e9d8
|
[
"MIT"
] | null | null | null |
VariablesNcomments/PyVariables.py
|
IMBLACK-CMD/Pythonister
|
801a8fef7ef67fa84df2496a7d93738ced25e9d8
|
[
"MIT"
] | null | null | null |
VariablesNcomments/PyVariables.py
|
IMBLACK-CMD/Pythonister
|
801a8fef7ef67fa84df2496a7d93738ced25e9d8
|
[
"MIT"
] | null | null | null |
import constants
#Variables are named locations that store data in memory
#example
number = 10
print("This is Number ", number)
#assigning multiple variables the same value
a,b,c = 1,2,"validate"
print(a)
print(b)
print(c)
#constants
print("..... constants......")
print(constants.PI)
print(constants.GRAVITY)
print("....Literal Collections.....")
officeEquip = ["laptops","desktop","files"]
print("this is a list :\n ", officeEquip)
print("turples:")
numbers=(1,2,3,4,5)
print("\n", numbers)
print("Dictionaries:")
bio = {'fname':'Nelson','lname':'Ongiti','subject':'python variables'}
print(bio)
print("set:")
tableFields = {'fname','lname','dob',}
print(unsorted(tableFields))
| 22.064516
| 70
| 0.694444
|
5a69a118f7f45bdd7e68d688d92b580a0fd74897
| 693
|
py
|
Python
|
homework-4/handout/hw4/tests.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
homework-4/handout/hw4/tests.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
homework-4/handout/hw4/tests.py
|
neelpawarcmu/deep-learning-library
|
401483fce40e3a025054596cbec368ff4f647661
|
[
"MIT"
] | null | null | null |
import numpy as np
def log_softmax(x, axis):
ret = x - np.max(x, axis=axis, keepdims=True)
lsm = np.log(np.sum(np.exp(ret), axis=axis, keepdims=True))
return ret - lsm
def array_to_str(arr, vocab):
return " ".join(vocab[a] for a in arr)
def test_prediction(out, targ):
out = log_softmax(out, 1)
nlls = out[np.arange(out.shape[0]), targ]
nll = -np.mean(nlls)
return nll
def test_generation(inp, pred, vocab):
outputs = u""
for i in range(inp.shape[0]):
w1 = array_to_str(inp[i], vocab)
w2 = array_to_str(pred[i], vocab)
outputs += u"Input | Output #{}: {} | {}\n".format(i, w1, w2)
return outputs
| 27.72
| 70
| 0.587302
|
a25307a90642531c26d1ee5abbab5412450da97f
| 5,583
|
py
|
Python
|
src/network-manager/azext_network_manager/vendored_sdks/aio/operations/_effective_connectivity_configurations_operations.py
|
ravithanneeru/azure-cli-extensions
|
e0de87f3563ae39525370e9912589aac33e7bded
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01_preview/aio/operations/_effective_connectivity_configurations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01_preview/aio/operations/_effective_connectivity_configurations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class EffectiveConnectivityConfigurationsOperations:
"""EffectiveConnectivityConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.QueryRequestOptions",
**kwargs: Any
) -> "_models.NetworkManagerEffectiveConnectivityConfigurationListResult":
"""List all effective connectivity configurations applied on a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to list correct page.
:type parameters: ~azure.mgmt.network.v2021_02_01_preview.models.QueryRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkManagerEffectiveConnectivityConfigurationListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01_preview.models.NetworkManagerEffectiveConnectivityConfigurationListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkManagerEffectiveConnectivityConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryRequestOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkManagerEffectiveConnectivityConfigurationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/listNetworkManagerEffectiveConnectivityConfigurations'} # type: ignore
| 51.694444
| 232
| 0.710013
|
a71b506208c2accac8d8d79bb0922e488b75f8c3
| 3,629
|
py
|
Python
|
setup.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 2
|
2019-09-13T20:27:39.000Z
|
2020-03-05T02:24:47.000Z
|
setup.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 46
|
2015-09-25T14:34:34.000Z
|
2020-10-05T22:04:36.000Z
|
setup.py
|
chop-dbhi/ehb-datasources
|
ff26dafa0a0919abbe53277e85e019c6df3f8f88
|
[
"BSD-2-Clause"
] | 5
|
2016-04-25T15:01:39.000Z
|
2016-08-29T20:40:31.000Z
|
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
main_package = 'ehb_datasources'
exclude_dirs = ['tests', 'fixtures']
class osx_install_data(install_data):
'''
On MacOS, the platform-specific lib dir is
/System/Library/Framework/Python/.../ which is wrong. Python 2.5 supplied
with MacOS 10.5 has an Apple-specific fix for this in
distutils.command.install_data#306. It fixes install_lib but not
install_data, which is why we roll our own install_data class.
'''
def finalize_options(self):
'''
By the time finalize_options is called, install.install_lib is set to
the fixed directory, so we set the installdir to install_lib. The
install_data class uses ('install_data', 'install_dir') instead.
'''
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in list(INSTALL_SCHEMES.values()):
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk(main_package):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
elif dirname in exclude_dirs:
del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
files = [dirpath, [os.path.join(dirpath, f) for f in filenames]]
data_files.append(files)
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
version = __import__(main_package).get_version()
setup(
version=version,
name='ehb-datasources',
author='DBHi',
author_email='cbmisupport@email.chop.edu',
description='ehb datasource drivers',
license='GPL',
keywords='HTTP informatics bioinformatics REDCap',
url='https://github.com/chop-dbhi/ehb-datasources/',
install_requires=['jinja2>=2'],
packages=packages,
cmdclass=cmdclasses,
data_files=data_files,
classifiers=[
'Development Status :: 1 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: GPL License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Informatics :: Bioinformatics :: REDCap', # noqa
],
)
| 33.601852
| 104
| 0.680904
|
84b35fc6a814d5050b7509c2cb47576ae8334c07
| 3,775
|
py
|
Python
|
tools/viz/check_gt.py
|
XinGuoZJU/horizon_detection
|
50e35ae5027f92c169bc8b2cf2d6333258b60b1d
|
[
"FSFAP"
] | null | null | null |
tools/viz/check_gt.py
|
XinGuoZJU/horizon_detection
|
50e35ae5027f92c169bc8b2cf2d6333258b60b1d
|
[
"FSFAP"
] | null | null | null |
tools/viz/check_gt.py
|
XinGuoZJU/horizon_detection
|
50e35ae5027f92c169bc8b2cf2d6333258b60b1d
|
[
"FSFAP"
] | null | null | null |
import os
import json
import matplotlib.pyplot as plt
import numpy as np
def visualize(line_seg, pred_group, save_name, vp=None):
# line_seg: n_number x 3, pred_group: list vp: group_num x dim
fig = plt.figure()
if vp is not None:
axis_list = [1e8, -1e8, 1e8, -1e8]
for item in vp:
# x
if item[0] < axis_list[0]:
axis_list[0] = item[0]
if item[0] > axis_list[1]:
axis_list[1] = item[0]
# y
if item[1] < axis_list[2]:
axis_list[2] = item[1]
if item[1] > axis_list[3]:
axis_list[3] = item[1]
axis_list = [(-1) ** (i + 1) * 1 + int(axis_list[i]) for i in range(4)]
else:
axis_list = [-10, 10, -10, 10]
axis_list = [-5, 5, -5, 5]
plt.axis(axis_list)
if vp is not None:
# draw vp
for point in vp:
plt.scatter(point[0], point[1], c='k', s=10, zorder=2)
color_list = ['y', 'b', 'm', 'r', 'c', 'g', 'w', 'k']
# draw lines
#######
log = np.zeros(8)
#######
for i in range(len(line_seg)):
line = line_seg[i]
group = int(pred_group[i])
#######
threshold = 10
if log[group+1] > threshold:
continue
log[group+1] += 1
#######
if group == -1:
color = 'k--'
else:
color = color_list[group]
a, b, c = line
if b == 0:
y_r = np.arange(axis_list[0], axis_list[1] + 1, 0.1)
x_r = -np.ones(len(y_r)) * c / a
else:
x_r = np.arange(axis_list[0], axis_list[1] + 1, 0.1)
y_r = (-c - a * x_r) / b
idx_low = y_r > axis_list[2]
idx_up = y_r < axis_list[3]
idx = idx_low * idx_up
x_r = x_r[idx]
y_r = y_r[idx]
plt.plot(x_r, y_r, color, linewidth=0.5, zorder=1)
plt.savefig(save_name)
plt.close()
if __name__ == '__main__':
data_name = 'SUNCG' # 'YUD', 'ScanNet', 'SceneCityUrban3D', 'SUNCG'
if data_name == 'YUD':
image_size = [480, 640]
elif data_name == 'ScanNet':
image_size = [512, 512]
elif data_name == 'SceneCityUrban3D':
image_size = [512, 512]
elif data_name == 'SUNCG':
image_size = [480, 640]
org_path = '/n/fs/vl/xg5/workspace/baseline/horizon_detection/dataset/' + data_name + '/data/data.json'
save_path = '/n/fs/vl/xg5/workspace/baseline/horizon_detection/dataset/' + data_name + '/viz_line'
gt_file = '/n/fs/vl/xg5/Datasets/' + data_name + '/label/label.txt'
os.makedirs(save_path, exist_ok=True)
gt_dict = {}
with open(gt_file, 'r') as op:
content = op.readlines()
for line in content:
line_list = line.split()
image_name = line_list[0]
vps = [[(float(line_list[2*i+2]) - image_size[0] / 2) / (image_size[0] / 2),
(float(line_list[2*i+3]) - image_size[1] / 2) / (image_size[1] / 2)] for i in range(3)]
gt_dict[image_name] = vps
with open(org_path, 'r') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
print(idx)
item = json.loads(line)
file_name = item['image_path']
group = np.array(item['group'])
line_seg = np.array(item['line']).tolist()
# vp = item['vp']
vp = gt_dict[file_name]
img_dir = file_name.split('/')[-2]
savepath = os.path.join(save_path, img_dir)
os.makedirs(savepath, exist_ok=True)
save_name = os.path.join(save_path, file_name)
visualize(line_seg, group, save_name, vp=vp)
| 30.691057
| 107
| 0.50649
|
df56791921ee626d8fd7cac54dd1a745955f9643
| 2,428
|
py
|
Python
|
fn_jira/fn_jira/components/jira_create_comment.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
fn_jira/fn_jira/components/jira_create_comment.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
fn_jira/fn_jira/components/jira_create_comment.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
"""Function implementation
Preprocessor script:
inputs.jira_url = incident.properties.jiraurl
inputs.jira_comment = note.text.content
"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from .jira_common import create_comment
from fn_jira.lib.resilient_common import validateFields, html2markdwn, parse_bool
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'jira_create_comment"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("jira", {})
self.log = logging.getLogger(__name__)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("jira", {})
@function("jira_create_comment")
def _jira_create_comment_function(self, event, *args, **kwargs):
"""Function: create a jira comment"""
try:
# Get the function parameters:
appDict = self._build_comment_appDict(kwargs)
yield StatusMessage("starting...")
resp = create_comment(self.log, appDict)
# Produce a FunctionResult with the return value
yield FunctionResult(resp)
except Exception as err:
yield FunctionError(err)
def _build_comment_appDict(self, kwargs):
"""
build the dictionary used to create a comment
:param kwargs:
:return: dictionary of values to use
"""
# test for required fields
validateFields(['jira_url', 'jira_comment'], kwargs)
jira_comment = html2markdwn(self.get_textarea_param(kwargs['jira_comment']))
if jira_comment is None or len(jira_comment.strip()) == 0:
raise FunctionError("comment is empty after rich text is removed")
appDict = {
'user': self.options['user'],
'password': self.options['password'],
'url': kwargs['jira_url'],
'verifyFlag': parse_bool(self.options.get('verify_cert', True)),
'comment': jira_comment
}
return appDict
| 35.705882
| 114
| 0.66145
|
7e3e5cdbb6eee328c30249cd40cd71b9d4e957c1
| 862
|
py
|
Python
|
Config Tool/page/examples/complex/complex_support.py
|
FrauBluher/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | 51
|
2015-01-17T16:08:08.000Z
|
2022-01-02T05:06:25.000Z
|
Config Tool/page/examples/complex/complex_support.py
|
hhintoglu/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | null | null | null |
Config Tool/page/examples/complex/complex_support.py
|
hhintoglu/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | 39
|
2016-06-18T05:43:14.000Z
|
2022-03-16T13:19:15.000Z
|
#! /usr/bin/env python
#
# Support module generated by PAGE version 4.2
# In conjunction with Tcl version 8.6
# Jan. 27, 2014 12:16:28 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
def set_Tk_var():
# These are Tk variables used passed to Tkinter and must be
# defined before the widgets using them are created.
global zzz
zzz = StringVar()
zzz.set('Buried')
def qqq():
print ('complex_support.qqq')
sys.stdout.flush()
def quit():
sys.exit()
def init(top, gui):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window ():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
| 17.24
| 63
| 0.645012
|
098ad9c36ed2cf30f9c5d1bfc87837203353e78e
| 6,035
|
py
|
Python
|
console.py
|
Shinoa-Fores/bitcoind-ncurses2
|
76577831877857f6d196da3532bf8d28e9c9780a
|
[
"MIT"
] | null | null | null |
console.py
|
Shinoa-Fores/bitcoind-ncurses2
|
76577831877857f6d196da3532bf8d28e9c9780a
|
[
"MIT"
] | null | null | null |
console.py
|
Shinoa-Fores/bitcoind-ncurses2
|
76577831877857f6d196da3532bf8d28e9c9780a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014-2017 esotericnonsense (Daniel Edgecumbe)
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/licenses/mit-license.php
import curses
import curses.textpad
import asyncio
import decimal
try:
import ujson as json
except ImportError:
import json
import view
from rpc import RPCError
class ConsoleView(view.View):
_mode_name = "console"
def __init__(self, client):
self._client = client
self._textbox_active = False
# TODO: implement history properly
self._command_history = [""]
self._response_history = []
self._response_history_strings = []
self._response_history_offset = 0
super().__init__()
async def _draw(self):
self._erase_init_pad()
CGREEN = curses.color_pair(1)
CRED = curses.color_pair(3)
CYELLOW = curses.color_pair(5)
CBOLD = curses.A_BOLD
CREVERSE = curses.A_REVERSE
self._pad.addstr(0, 63, "[UP/DOWN: browse, TAB: enter command]", CYELLOW)
offset = self._response_history_offset
if offset > 0:
self._pad.addstr(0, 36, "... ^ ...", CBOLD)
if offset < len(self._response_history_strings) - 17:
self._pad.addstr(17, 36, "... v ...", CBOLD)
for i, (t, string) in enumerate(self._response_history_strings):
if i < offset:
continue
if i > offset+15: # TODO
break
color = CBOLD + CGREEN if t == 0 else CBOLD
self._pad.addstr(1+i-offset, 1, string, color)
cmd = self._command_history[-1]
cmd2 = None
if len(cmd) > 97:
cmd2, cmd = cmd[97:], cmd[:97]
self._pad.addstr(18, 1, "> {}".format(cmd),
CRED + CBOLD + CREVERSE if self._textbox_active else 0)
if cmd2 is not None:
self._pad.addstr(19, 3, cmd2,
CRED + CBOLD + CREVERSE if self._textbox_active else 0)
self._draw_pad_to_screen()
@staticmethod
def _convert_reqresp_to_strings(request, response):
srequest = [
(0, request[i:i+95])
for i in range(0, len(request), 95)
]
srequest[0] = (0, ">>> " + srequest[0][1])
jresponse = json.dumps(response, indent=4, sort_keys=True).split("\n")
# TODO: if error, set 2 not 1
sresponse = [
(1, l[i:i+99])
for l in jresponse
for i in range(0, len(l), 99)
]
return srequest + sresponse + [(-1, "")]
async def _submit_command(self):
# TODO: parse, allow nested, use brackets etc
request = self._command_history[-1]
if len(request) == 0:
return
parts = request.split(" ")
for i in range(len(parts)):
# TODO: parse better.
if parts[i].isdigit():
parts[i] = int(parts[i])
elif parts[i] == "false" or parts[i] == "False":
parts[i] = False
elif parts[i] == "true" or parts[i] == "True":
parts[i] = True
else:
try:
parts[i] = decimal.Decimal(parts[i])
except:
pass
cmd = parts[0]
if len(parts) > 1:
params = parts[1:]
else:
params = None
try:
response = await self._client.request(cmd, params=params)
except RPCError as e:
response = str(e)
self._response_history.append(
(request, response),
)
self._response_history_strings.extend(
self._convert_reqresp_to_strings(request, response),
)
self._command_history.append("") # add a new, empty command
self._response_history_offset = len(self._response_history_strings) - 17
self._textbox_active = not self._textbox_active
await self._draw_if_visible()
async def _scroll_back_response_history(self):
if self._response_history_offset == 0:
return # At the beginning already.
self._response_history_offset -= 1
await self._draw_if_visible()
async def _scroll_forward_response_history(self):
if self._response_history_offset > len(self._response_history_strings) - 18:
return # At the end already.
self._response_history_offset += 1
await self._draw_if_visible()
async def handle_keypress(self, key):
if key == "\t" or key == "KEY_TAB":
self._textbox_active = not self._textbox_active
key = None
elif self._textbox_active:
if (len(key) == 1 and ord(key) == 127) or key == "KEY_BACKSPACE":
self._command_history[-1] = self._command_history[-1][:-1]
key = None
elif key == "KEY_RETURN" or key == "\n":
# We use ensure_future so as not to block the keypad loop on
# an RPC call
# asyncio.ensure_future(self._submit_command())
await self._submit_command()
return None
elif len(key) == 1:
# TODO: check if it's printable etc
if len(self._command_history[-1]) < 190:
self._command_history[-1] += key
key = None
else:
if key == "KEY_UP":
await self._scroll_back_response_history()
key = None
elif key == "KEY_DOWN":
await self._scroll_forward_response_history()
key = None
await self._draw_if_visible()
return key
async def on_mode_change(self, newmode):
""" Overrides view.View to set the textbox inactive. """
if newmode != self._mode_name:
self._textbox_active = False
self._visible = False
return
self._visible = True
await self._draw_if_visible()
| 31.26943
| 84
| 0.557746
|
5341cb810c77dc954b3807cbd06d90cc55b64abe
| 3,702
|
py
|
Python
|
tests/test_user_model.py
|
IgorIvkin/Children
|
a43bbfae3f9390b12df83099437ff6bde7bfcc5d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_user_model.py
|
IgorIvkin/Children
|
a43bbfae3f9390b12df83099437ff6bde7bfcc5d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_user_model.py
|
IgorIvkin/Children
|
a43bbfae3f9390b12df83099437ff6bde7bfcc5d
|
[
"Apache-2.0"
] | null | null | null |
"""
Author: Igor, Lena
Date: 2020.05.30
"""
from children import create_app
from models.user import User
from services.user_service import UserService
from services.base_service import BaseService
from exceptions.model_exceptions import ColumnValidationError
import pytest
@pytest.fixture(scope='module')
def app():
app = create_app(test_mode=True)
yield app
with app.app_context():
user_service = UserService(app)
user_service.delete_all()
def test_create_user(app):
with app.app_context():
user_service = UserService(app)
user = User()
user.login = 'test@example.com'
user.password = 'test'
user = user_service.create(user)
assert user.id is not None
def test_create_user_bad_login(app):
with app.app_context():
user = User()
with pytest.raises(ColumnValidationError, match=r"Wrong user login provided, an email is expected*"):
user.login = 'ololo login'
def test_create_user_bad_title(app):
with app.app_context():
user = User()
with pytest.raises(ColumnValidationError, match=r"User title is too short, at least 2 characters expected*"):
user.title = '1'
def test_create_user_fail_none_entity_provided(app):
with app.app_context():
user_service = UserService(app)
with pytest.raises(ValueError, match=r"None object was provided to create new entity.*"):
user = user_service.create(None)
def test_get_by_id(app):
with app.app_context():
user_service = UserService(app)
user = User()
user.login = 'test2@example.com'
user.password = 'test'
user = user_service.create(user)
assert user_service.get_by_id(user.id).id == user.id and user_service.get_by_id(user.id).login == user.login
def test_update_user_cannot_change_login(app):
with app.app_context():
user_service = UserService(app)
with pytest.raises(ValueError, match=r"Login cannot be presented to update user*"):
updated_user = user_service.update(id_entity=1, fields_to_update={'login': 'test3@example.com'})
def test_update_user_change_title_and_password(app):
with app.app_context():
user_service = UserService(app)
user = User()
user.login = 'test4@example.com'
user.password = 'test1'
user.title = 'First title'
user = user_service.create(user)
new_title = 'Test title!'
new_password = 'newpwd'
updated_user = user_service.update(user.id, {'title': new_title, 'password': new_password})
assert updated_user.title == new_title
assert user_service.check_password_hash(user.password, new_password)
def test_delete_by_id(app):
with app.app_context():
user_service = UserService(app)
user = User()
user.login = 'test5@example.com'
user.password = 'test1'
user = user_service.create(user)
user_service.delete_by_id(user.id)
assert user_service.get_by_id(user.id) is None
def test_update_user_cannot_assign_bad_column(app):
with app.app_context():
user_service = UserService(app)
user = User()
user.login = 'test6@example.com'
user.password = 'test1'
user = user_service.create(user)
with pytest.raises(ValueError, match=r"Model definition does not have such key*"):
updated_user = user_service.update(user.id, {'metadata': 'test'})
def test_create_base_service(app):
with app.app_context():
with pytest.raises(ValueError, match=r"You cannot instantiate an object of a class BaseService*"):
service = BaseService(app)
| 31.641026
| 117
| 0.67423
|
4dae71dfa955617daaee343e729b4cee90ba175d
| 2,670
|
py
|
Python
|
ironic_inspector/common/keystone.py
|
LinkleYping/ironic-inspector-vul
|
0a99b93fc36b2055ea668bff60d2a9a705b8411b
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/common/keystone.py
|
LinkleYping/ironic-inspector-vul
|
0a99b93fc36b2055ea668bff60d2a9a705b8411b
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/common/keystone.py
|
LinkleYping/ironic-inspector-vul
|
0a99b93fc36b2055ea668bff60d2a9a705b8411b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keystoneauth1 import loading
from oslo_config import cfg
CONF = cfg.CONF
DEFAULT_VALID_INTERFACES = ['internal', 'public']
# TODO(pas-ha) set default values in conf.opts.set_defaults()
def register_auth_opts(group, service_type):
loading.register_session_conf_options(CONF, group)
loading.register_auth_conf_options(CONF, group)
CONF.set_default('auth_type', default='password', group=group)
loading.register_adapter_conf_options(CONF, group)
CONF.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES,
group=group)
CONF.set_default('service_type', service_type, group=group)
def get_session(group):
auth = loading.load_auth_from_conf_options(CONF, group)
session = loading.load_session_from_conf_options(
CONF, group, auth=auth)
return session
def get_adapter(group, **adapter_kwargs):
return loading.load_adapter_from_conf_options(CONF, group,
**adapter_kwargs)
# TODO(pas-ha) set default values in conf.opts.set_defaults()
def add_auth_options(options, service_type):
def add_options(opts, opts_to_add):
for new_opt in opts_to_add:
for opt in opts:
if opt.name == new_opt.name:
break
else:
opts.append(new_opt)
opts = copy.deepcopy(options)
opts.insert(0, loading.get_auth_common_conf_options()[0])
# NOTE(dims): There are a lot of auth plugins, we just generate
# the config options for a few common ones
plugins = ['password', 'v2password', 'v3password']
for name in plugins:
plugin = loading.get_plugin_loader(name)
add_options(opts, loading.get_auth_plugin_conf_options(plugin))
add_options(opts, loading.get_session_conf_options())
adapter_opts = loading.get_adapter_conf_options(
include_deprecated=False)
cfg.set_defaults(adapter_opts, service_type=service_type,
valid_interfaces=DEFAULT_VALID_INTERFACES)
add_options(opts, adapter_opts)
opts.sort(key=lambda x: x.name)
return opts
| 36.575342
| 71
| 0.710487
|
22cb61c5aec70129da019213d28ed1f1055adba0
| 12,172
|
py
|
Python
|
src/statistics/sentences.py
|
rmjacobson/privacy-crawler-parser-tokenizer
|
73d4cf884000ed20341ea15458834022360d22c5
|
[
"MIT"
] | null | null | null |
src/statistics/sentences.py
|
rmjacobson/privacy-crawler-parser-tokenizer
|
73d4cf884000ed20341ea15458834022360d22c5
|
[
"MIT"
] | null | null | null |
src/statistics/sentences.py
|
rmjacobson/privacy-crawler-parser-tokenizer
|
73d4cf884000ed20341ea15458834022360d22c5
|
[
"MIT"
] | null | null | null |
"""
Privacy Policy Project
"""
import argparse, datetime, json, matplotlib, matplotlib.pyplot as plt, os, random, re, signal
from csv import reader, writer
from math import ceil, sqrt
from matplotlib.ticker import MaxNLocator
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, Lock, Value, cpu_count
from nltk.tokenize import sent_tokenize
from numpy import bincount, arange
from random import sample
from utils.utils import mkdir_clean, print_progress_bar, VerifyJsonExtension
class Policy:
def __init__(self, file, rule_dict):
self.file = file
self.lengths = []
self.sentences = []
self.rule_hits = rule_dict.copy()
self.rule_hits = self.rule_hits.fromkeys(self.rule_hits, 0)
self.rule_hits["GOOD"] = 0
# self.rule_hits["length"] = 0
def build_rule_dict(file):
"""
Build rule dictionary from input JSON file, then compile the regexs
contained in that file so they can be matched. Note that the input
JSON must be properly formatted for *reading* in from the JSON,
not necessarily properly formatted for native regex. Escaped
characters must be double-escaped with an extra backslash because
they will otherwise not be read in as correct JSON. If you make
changes to the rules.json file, please ensure that you have done
this or the Python json module will complain. Most good text
editors will notify you about this if you have the language set to
JSON when you edit the rules.json file.
In: string path to rules.json file.
Out: dict of compiled regexs with rule names as keys.
"""
with open(file, "r") as fp:
rule_dict = json.load(fp)
for name in rule_dict:
if name == "HEAD_FRAG" or name == "SHORT":
continue
rule_dict[name][0] = re.compile(rule_dict[name][0])
return rule_dict
def is_short(sentence, threshold=5):
"""
Check if the text contains too few words to be a valid sentence.
The threshold is set to 5 by default but can be changed depending
on the input SHORT rule.
In: sentence text, threshold integer.
Out: Boolean.
"""
words = sentence.split()
return len(words) < threshold
def is_header_fragment(sentence, threshold=0.6):
"""
> threshold percentage of words start with a capital letter,
usually when things # that are usually in <hX> tags are part
of <p> tags.
In: sentence text, threshold float.
Out: Boolean.
"""
words = sentence.split()
ncaps = 0
for word in words:
caps = [l for l in word if l.isupper()]
if len(caps) > 0:
ncaps += 1
if (ncaps / len(words)) > threshold:
return True
else:
return False
def apply_sentence_rules(sentence, rule_dict):
"""
Take in sentence rules from the rule_dict provided as an input to
this program. Match every rule against the regex in the rule_dict
(except HEAD_FRAG because it has its own function), and append
the name of the rule to the list to be returned.
In: sentence string, rule dictionary of regexs.
Out: list of rule names that apply to the sentence.
"""
# print(sentence)
rule_hits = []
for name, rule in rule_dict.items():
if name == "SHORT":
if is_short(sentence, rule_dict[name][0]):
rule_hits.append(name)
continue
if name == "HEAD_FRAG":
if is_header_fragment(sentence, rule_dict[name][0]):
rule_hits.append(name)
continue
if rule[1] == "True" and rule[0].match(sentence):
hit = True
rule_hits.append(name)
if rule[1] == "False" and not rule[0].match(sentence):
hit = True
rule_hits.append(name)
if len(rule_hits) == 0:
rule_hits.append("GOOD")
# rule_hits.append("length")
return rule_hits
def generate_rule_bar_fig(rule_hits, outfile):
"""
Creates bar graph of policy's sentence rule hits, saves to file.
In: rule_hits (list of rule names as strings), output file.
Out: N/A
"""
plt.bar(range(len(rule_hits)), list(rule_hits.values()), align="center", color="blue")
plt.xticks(range(len(rule_hits)), list(rule_hits.keys()), rotation=30, fontsize=8)
plt.ylabel("# of Sentences in Policy")
plt.savefig(outfile)
def extract_sentences(file):
"""
Reads in csv file from pre-generated parser output and looks at
every line to gather sentences from it, then apply the input
ruleset on those sentences, and return statistics.
"""
policy_stats = Policy(file, rule_dict)
with open(parser_output_dir + file, "r") as fp:
csv_reader = reader(fp)
elements = list(csv_reader)
sentence_list = []
for elem in elements: # for every possible object
sentences = sent_tokenize(elem[-1])
for sentence in sentences: # for every sentence in that object
rule_hits = apply_sentence_rules(sentence, rule_dict)
policy_stats.lengths.append(len(sentence.split()))
sentence_list.append((len(sentence.split()), sentence, rule_hits))
for name in policy_stats.rule_hits.keys(): # loop through all the keys in the dict
if name in rule_hits: # and increment the policy_stats dict if that key is in the sentence's keys
policy_stats.rule_hits[name] += 1
policy_stats.sentences.append(rule_hits)
# write sentences to csv file
headings = ("Number of Words","Sentence Text","Rule Hits")
with open(output_folder + file + "_sentences.csv", "w") as fp:
csv_writer = writer(fp)
csv_writer.writerow(headings)
csv_writer.writerows(sentence_list)
# create bar graphs of policy's sentence rule hits
generate_rule_bar_fig(policy_stats.rule_hits, output_folder + file[:-4] + "_rule_bar.pdf")
# Update progress bar
with index.get_lock():
index.value += 1
print_progress_bar(index.value, len(random_files), prefix = "Sentence Statistics Progress:", suffix = "Complete", length = 50)
return policy_stats
def generate_rule_hist_figs(files, rule_hits, lengths, num_files, rule_dict, outfile):
"""
Creates aggregate representation of the rule_vals dictionaries
collected from every successfully parsed file. Produces histograms
of every sentence parsing rule and presents them as a single image.
Does not include "GOOD" rules.
In: rule_hits list of all policies rule_hits dictionaries,
the number of files that were inspected for sentences,
the rule dict providing the names of rules as keys,
string filepath to output figure to.
Out: figure containing histograms of all rules.
"""
num_files = len(rule_hits)
rows = ceil(sqrt(len(rule_dict)) + 1) + 1
cols = ceil(sqrt(len(rule_dict))) - 1
# fig = plt.figure(figsize=(rows*10,cols*10))
# i = 0
# for i, (name, rule) in enumerate(rule_dict.items(), start=1):
# count = [d[name] for d in rule_hits]
# subfig = fig.add_subplot(rows,cols,i)
# subfig.set_xlabel(name + " Rule Hit Count")
# subfig.set_ylabel("Number of Policies")
# subfig.xaxis.set_major_locator(MaxNLocator(integer=True))
# subfig.hist(count, num_files, rwidth=0.5)
# print(i)
# len_boxplot = fig.add_subplot(rows-1,1,5)
# len_boxplot.set_xlabel("Sentence Length per Policy")
# len_boxplot.set_ylabel("")
# filenames = ["_".join(i.split("_", 2)[:2]) for i in files]
# len_boxplot.boxplot(lengths)
# fig.tight_layout()
# fig.savefig(outfile)
fig = plt.figure(figsize=(rows*10,cols*10))
gs = fig.add_gridspec(rows, cols)
r = 0
c = 0
for i, (name, rule) in enumerate(rule_dict.items(), start=1):
count = [d[name] for d in rule_hits]
# standalone_fig = hist(count, bins=arange(num_files + 1) - 0.5)
standalone_fig = plt.figure()
plt.hist(count, bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400], rwidth=0.5, figure=standalone_fig)
plt.xlabel(name + " Rule Hit Count", figure=standalone_fig)
plt.ylabel("# of Policies", figure=standalone_fig)
standalone_fig.savefig(outfile[:-4] + "_" + name + ".pdf")
subfig = fig.add_subplot(gs[r, c])
subfig.set_xlabel(name + " Rule Hit Count")
subfig.set_ylabel("# of Policies")
# subfig.hist(count, bins=arange(num_files + 1) - 0.5)
subfig.hist(count, bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400], rwidth=0.5)
if c < cols-1:
c += 1
else:
c = 0
r += 1
print_progress_bar(i, len(rule_dict.items()) + 1, prefix = "Rule Histograms Progress:", suffix = "Complete", length = 50)
if c != 0:
r +=1
len_boxplot = fig.add_subplot(gs[r:, :])
len_boxplot.set_xlabel("Sentence Length per Policy")
len_boxplot.set_ylabel("")
len_boxplot.tick_params(bottom=False, labelbottom=False)
len_boxplot.boxplot(lengths)
print_progress_bar(i + 1, len(rule_dict.items()) + 1, prefix = "Rule Histograms Progress:", suffix = "Complete", length = 50)
fig.tight_layout()
fig.savefig(outfile)
def start_process(i):
"""
Set inter-process shared values to global so they can be accessed.
Ignore SIGINT in child workers, will be handled to enable restart.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
global index
index = i
if __name__ == '__main__':
timestamp = "_{0:%Y%m%d-%H%M%S}".format(datetime.datetime.now())
argparse = argparse.ArgumentParser(description="Read and apply sentence rules to contents of parser output.")
argparse.add_argument( "-n", "--num_samples",
type=int,
default=0,
required=False,
help="number of files this program should read from the directory.")
argparse.add_argument( "rules",
help="json file containing list of sentence rules.",
action=VerifyJsonExtension)
argparse.add_argument( "parser_output_dir",
help="directory containing html files to verify.")
argparse.add_argument( "-o", "--output_folder",
default="./sentence_stats_output" + timestamp + "/",
required=False,
help="directory to dump sentence stats output. Will be created if does not exist.")
args = argparse.parse_args()
parser_output_dir = args.parser_output_dir
output_folder = args.output_folder
rule_dict = build_rule_dict(args.rules)
mkdir_clean(output_folder)
files = [name for name in os.listdir(parser_output_dir) if name.endswith("paragraphs.csv")]
try:
random_files = sample(files, args.num_samples)
except ValueError:
print("ValueError: args.num_samples > # files in parser_output_dir, defaulting to all files in that directory.")
random_files = files
if args.num_samples == 0:
random_files = files
print("Tokenizing " + str(len(random_files)) + " files...")
index = Value("i",0) # shared val, index of current parsed file
pool_size = cpu_count() * 2
matplotlib.use("agg") # don't know why this works, but allows matplotlib to execute in child procs
pool = Pool(
processes=pool_size,
initializer=start_process,
initargs=[index]
)
policy_list = pool.map(extract_sentences, random_files) # map keeps domain_list order
pool.close() # no more tasks
pool.join() # merge all child processes
# print("Generating last rule histogram...")
rule_hits = [p.rule_hits for p in policy_list]
lengths = [p.lengths for p in policy_list]
generate_rule_hist_figs(random_files, rule_hits, lengths, len(rule_hits), rule_dict, output_folder + "rule_hists.pdf")
print("Done")
| 41.542662
| 154
| 0.644594
|
126049237ee4b20f001f0f00e6b7efcd499cb322
| 16,042
|
py
|
Python
|
edk2basetools/Workspace/MetaFileTable.py
|
matthewfcarlson/edk2-pytool-base
|
ddf78ca6e2110f03e020a5bd0ca32b2a463fecff
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
edk2basetools/Workspace/MetaFileTable.py
|
matthewfcarlson/edk2-pytool-base
|
ddf78ca6e2110f03e020a5bd0ca32b2a463fecff
|
[
"BSD-2-Clause-Patent"
] | 1
|
2020-04-14T22:23:01.000Z
|
2020-04-15T06:47:53.000Z
|
edk2basetools/Workspace/MetaFileTable.py
|
matthewfcarlson/edk2-pytool-base
|
ddf78ca6e2110f03e020a5bd0ca32b2a463fecff
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
## @file
# This file is used to create/update/query/erase a meta file table
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import uuid
import edk2basetools.Common.EdkLogger as EdkLogger
from edk2basetools.Common.BuildToolError import FORMAT_INVALID
from edk2basetools.CommonDataClass.DataClass import MODEL_FILE_DSC, MODEL_FILE_DEC, MODEL_FILE_INF, \
MODEL_FILE_OTHERS
from edk2basetools.Common.DataType import *
class MetaFileTable():
# TRICK: use file ID as the part before '.'
_ID_STEP_ = 1
_ID_MAX_ = 99999999
## Constructor
def __init__(self, DB, MetaFile, FileType, Temporary, FromItem=None):
self.MetaFile = MetaFile
self.TableName = ""
self.DB = DB
self._NumpyTab = None
self.CurrentContent = []
DB.TblFile.append([MetaFile.Name,
MetaFile.Ext,
MetaFile.Dir,
MetaFile.Path,
FileType,
MetaFile.TimeStamp,
FromItem])
self.FileId = len(DB.TblFile)
self.ID = self.FileId * 10**8
if Temporary:
self.TableName = "_%s_%s_%s" % (FileType, len(DB.TblFile), uuid.uuid4().hex)
else:
self.TableName = "_%s_%s" % (FileType, len(DB.TblFile))
def IsIntegrity(self):
Result = False
try:
TimeStamp = self.MetaFile.TimeStamp
if not self.CurrentContent:
Result = False
else:
Result = self.CurrentContent[-1][0] < 0
except Exception as Exc:
EdkLogger.debug(EdkLogger.DEBUG_5, str(Exc))
return False
return Result
def SetEndFlag(self):
self.CurrentContent.append(self._DUMMY_)
def GetAll(self):
return [item for item in self.CurrentContent if item[0] >= 0 and item[-1]>=0]
## Python class representation of table storing module data
class ModuleTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
BelongsToItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====', -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Db, MetaFile, Temporary):
MetaFileTable.__init__(self, Db, MetaFile, MODEL_FILE_INF, Temporary)
## Insert a record into table Inf
#
# @param Model: Model of a Inf item
# @param Value1: Value1 of a Inf item
# @param Value2: Value2 of a Inf item
# @param Value3: Value3 of a Inf item
# @param Scope1: Arch of a Inf item
# @param Scope2 Platform os a Inf item
# @param BelongsToItem: The item belongs to which another item
# @param StartLine: StartLine of a Inf item
# @param StartColumn: StartColumn of a Inf item
# @param EndLine: EndLine of a Inf item
# @param EndColumn: EndColumn of a Inf item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON,
BelongsToItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0):
(Value1, Value2, Value3, Scope1, Scope2) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip())
self.ID = self.ID + self._ID_STEP_
if self.ID >= (MODEL_FILE_INF + self._ID_MAX_):
self.ID = MODEL_FILE_INF + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
# @param Platform The Platform attribute of Record
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None, Platform=None, BelongsToItem=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>=0 ]
if Arch is not None and Arch != TAB_ARCH_COMMON:
ArchList = set(['COMMON'])
ArchList.add(Arch)
result = [item for item in result if item[5] in ArchList]
if Platform is not None and Platform != TAB_COMMON:
Platformlist = set( ['COMMON','DEFAULT'])
Platformlist.add(Platform)
result = [item for item in result if item[6] in Platformlist]
if BelongsToItem is not None:
result = [item for item in result if item[7] == BelongsToItem]
result = [ [r[2],r[3],r[4],r[5],r[6],r[0],r[9]] for r in result ]
return result
## Python class representation of table storing package data
class PackageTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
BelongsToItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====', -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Cursor, MetaFile, Temporary):
MetaFileTable.__init__(self, Cursor, MetaFile, MODEL_FILE_DEC, Temporary)
## Insert table
#
# Insert a record into table Dec
#
# @param Model: Model of a Dec item
# @param Value1: Value1 of a Dec item
# @param Value2: Value2 of a Dec item
# @param Value3: Value3 of a Dec item
# @param Scope1: Arch of a Dec item
# @param Scope2: Module type of a Dec item
# @param BelongsToItem: The item belongs to which another item
# @param StartLine: StartLine of a Dec item
# @param StartColumn: StartColumn of a Dec item
# @param EndLine: EndLine of a Dec item
# @param EndColumn: EndColumn of a Dec item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON,
BelongsToItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0):
(Value1, Value2, Value3, Scope1, Scope2) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip())
self.ID = self.ID + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>=0 ]
if Arch is not None and Arch != TAB_ARCH_COMMON:
ArchList = set(['COMMON'])
ArchList.add(Arch)
result = [item for item in result if item[5] in ArchList]
return [[r[2], r[3], r[4], r[5], r[6], r[0], r[8]] for r in result]
def GetValidExpression(self, TokenSpaceGuid, PcdCName):
QueryTab = self.CurrentContent
result = [[item[2], item[8]] for item in QueryTab if item[3] == TokenSpaceGuid and item[4] == PcdCName]
validateranges = []
validlists = []
expressions = []
try:
for row in result:
comment = row[0]
LineNum = row[1]
comment = comment.strip("#")
comment = comment.strip()
oricomment = comment
if comment.startswith("@ValidRange"):
comment = comment.replace("@ValidRange", "", 1)
validateranges.append(comment.split("|")[1].strip())
if comment.startswith("@ValidList"):
comment = comment.replace("@ValidList", "", 1)
validlists.append(comment.split("|")[1].strip())
if comment.startswith("@Expression"):
comment = comment.replace("@Expression", "", 1)
expressions.append(comment.split("|")[1].strip())
except Exception as Exc:
ValidType = ""
if oricomment.startswith("@ValidRange"):
ValidType = "@ValidRange"
if oricomment.startswith("@ValidList"):
ValidType = "@ValidList"
if oricomment.startswith("@Expression"):
ValidType = "@Expression"
EdkLogger.error('Parser', FORMAT_INVALID, "The syntax for %s of PCD %s.%s is incorrect" % (ValidType, TokenSpaceGuid, PcdCName),
ExtraData=oricomment, File=self.MetaFile, Line=LineNum)
return set(), set(), set()
return set(validateranges), set(validlists), set(expressions)
## Python class representation of table storing platform data
class PlatformTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
Scope3 TEXT,
BelongsToItem REAL NOT NULL,
FromItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====','====', -1, -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Cursor, MetaFile, Temporary, FromItem=0):
MetaFileTable.__init__(self, Cursor, MetaFile, MODEL_FILE_DSC, Temporary, FromItem)
## Insert table
#
# Insert a record into table Dsc
#
# @param Model: Model of a Dsc item
# @param Value1: Value1 of a Dsc item
# @param Value2: Value2 of a Dsc item
# @param Value3: Value3 of a Dsc item
# @param Scope1: Arch of a Dsc item
# @param Scope2: Module type of a Dsc item
# @param BelongsToItem: The item belongs to which another item
# @param FromItem: The item belongs to which dsc file
# @param StartLine: StartLine of a Dsc item
# @param StartColumn: StartColumn of a Dsc item
# @param EndLine: EndLine of a Dsc item
# @param EndColumn: EndColumn of a Dsc item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON, Scope3=TAB_DEFAULT_STORES_DEFAULT,BelongsToItem=-1,
FromItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=1):
(Value1, Value2, Value3, Scope1, Scope2, Scope3) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip(), Scope3.strip())
self.ID = self.ID + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
Scope3,
BelongsToItem,
FromItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Scope1: Arch of a Dsc item
# @param Scope2: Module type of a Dsc item
# @param BelongsToItem: The item belongs to which another item
# @param FromItem: The item belongs to which dsc file
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Scope1=None, Scope2=None, BelongsToItem=None, FromItem=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>0 ]
if Scope1 is not None and Scope1 != TAB_ARCH_COMMON:
Sc1 = set(['COMMON'])
Sc1.add(Scope1)
result = [item for item in result if item[5] in Sc1]
Sc2 = set( ['COMMON','DEFAULT'])
if Scope2 and Scope2 != TAB_COMMON:
if '.' in Scope2:
Index = Scope2.index('.')
NewScope = TAB_COMMON + Scope2[Index:]
Sc2.add(NewScope)
Sc2.add(Scope2)
result = [item for item in result if item[6] in Sc2]
if BelongsToItem is not None:
result = [item for item in result if item[8] == BelongsToItem]
else:
result = [item for item in result if item[8] < 0]
if FromItem is not None:
result = [item for item in result if item[9] == FromItem]
result = [ [r[2],r[3],r[4],r[5],r[6],r[7],r[0],r[10]] for r in result ]
return result
def DisableComponent(self,comp_id):
for item in self.CurrentContent:
if item[0] == comp_id or item[8] == comp_id:
item[-1] = -1
## Factory class to produce different storage for different type of meta-file
class MetaFileStorage(object):
_FILE_TABLE_ = {
MODEL_FILE_INF : ModuleTable,
MODEL_FILE_DEC : PackageTable,
MODEL_FILE_DSC : PlatformTable,
MODEL_FILE_OTHERS : MetaFileTable,
}
_FILE_TYPE_ = {
".inf" : MODEL_FILE_INF,
".dec" : MODEL_FILE_DEC,
".dsc" : MODEL_FILE_DSC,
}
_ObjectCache = {}
## Constructor
def __new__(Class, Cursor, MetaFile, FileType=None, Temporary=False, FromItem=None):
# no type given, try to find one
key = (MetaFile.Path, FileType,Temporary,FromItem)
if key in Class._ObjectCache:
return Class._ObjectCache[key]
if not FileType:
if MetaFile.Type in self._FILE_TYPE_:
FileType = Class._FILE_TYPE_[MetaFile.Type]
else:
FileType = MODEL_FILE_OTHERS
# don't pass the type around if it's well known
if FileType == MODEL_FILE_OTHERS:
Args = (Cursor, MetaFile, FileType, Temporary)
else:
Args = (Cursor, MetaFile, Temporary)
if FromItem:
Args = Args + (FromItem,)
# create the storage object and return it to caller
reval = Class._FILE_TABLE_[FileType](*Args)
if not Temporary:
Class._ObjectCache[key] = reval
return reval
| 37.220418
| 155
| 0.564206
|
99531e29a24c7933a40648b55ae2393c1acbc4d2
| 3,884
|
py
|
Python
|
zs3/dataloaders/custom_transforms.py
|
vaynelau/ZS3-voc12
|
106feb947d79e889032e8e9181a7353a4d22b3a3
|
[
"Apache-2.0"
] | null | null | null |
zs3/dataloaders/custom_transforms.py
|
vaynelau/ZS3-voc12
|
106feb947d79e889032e8e9181a7353a4d22b3a3
|
[
"Apache-2.0"
] | null | null | null |
zs3/dataloaders/custom_transforms.py
|
vaynelau/ZS3-voc12
|
106feb947d79e889032e8e9181a7353a4d22b3a3
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import torch
from PIL import Image, ImageOps, ImageFilter
class Normalize:
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = sample["image"]
mask = sample["label"]
img = np.array(img).astype(np.float32)
mask = np.array(mask).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {"image": img, "label": mask}
class ToTensor:
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = sample["image"]
mask = sample["label"]
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
mask = np.array(mask).astype(np.float32)
img = torch.from_numpy(img).float()
mask = torch.from_numpy(mask).float()
return {"image": img, "label": mask}
class RandomHorizontalFlip:
def __call__(self, sample):
img = sample["image"]
mask = sample["label"]
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
return {"image": img, "label": mask}
class RandomGaussianBlur:
def __call__(self, sample):
img = sample["image"]
mask = sample["label"]
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))
return {"image": img, "label": mask}
class RandomScaleCrop:
def __init__(self, base_size, crop_size, fill=255):
self.base_size = base_size
self.crop_size = crop_size
self.fill = fill
def __call__(self, sample):
img = sample["image"]
mask = sample["label"]
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < self.crop_size:
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - self.crop_size)
y1 = random.randint(0, h - self.crop_size)
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {"image": img, "label": mask}
class FixScale:
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, sample):
img = sample["image"]
mask = sample["label"]
w, h = img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {"image": img, "label": mask}
| 31.322581
| 90
| 0.540937
|
0c3310e4dd863f8643cf81157f3d632dfc57cacd
| 11,376
|
py
|
Python
|
env/lib/python3.6/site-packages/retro/retro_env.py
|
boodahDEV/Soni-IA
|
c452c0b3df3a3ced4b5027c2abb4f3c22fd0f948
|
[
"Apache-2.0"
] | 2
|
2019-04-05T05:51:23.000Z
|
2019-04-07T04:21:47.000Z
|
env/lib/python3.6/site-packages/retro/retro_env.py
|
boodahDEV/Soni-IA
|
c452c0b3df3a3ced4b5027c2abb4f3c22fd0f948
|
[
"Apache-2.0"
] | null | null | null |
env/lib/python3.6/site-packages/retro/retro_env.py
|
boodahDEV/Soni-IA
|
c452c0b3df3a3ced4b5027c2abb4f3c22fd0f948
|
[
"Apache-2.0"
] | 1
|
2019-04-08T07:11:26.000Z
|
2019-04-08T07:11:26.000Z
|
import gc
import gym
import gzip
import gym.spaces
import json
import numpy as np
import os
import retro
import retro.data
#import pygame
#from pygame.locals import *
from gym.utils import seeding
gym_version = tuple(int(x) for x in gym.__version__.split('.'))
__all__ = ['RetroEnv']
class RetroEnv(gym.Env):
"""
Gym Retro environment class
Provides a Gym interface to classic video games
"""
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 60.0}
def __init__(self, game, state=retro.State.DEFAULT, scenario=None, info=None, use_restricted_actions=retro.Actions.FILTERED,
record=False, players=1, inttype=retro.data.Integrations.STABLE, obs_type=retro.Observations.IMAGE):
if not hasattr(self, 'spec'):
self.spec = None
self._obs_type = obs_type
self.img = None
self.ram = None
self.viewer = None
self.gamename = game
self.statename = state
self.initial_state = None
self.players = players
metadata = {}
rom_path = retro.data.get_romfile_path(game, inttype)
metadata_path = retro.data.get_file_path(game, 'metadata.json', inttype)
if state == retro.State.NONE:
self.statename = None
elif state == retro.State.DEFAULT:
self.statename = None
try:
with open(metadata_path) as f:
metadata = json.load(f)
if 'default_player_state' in metadata and self.players <= len(metadata['default_player_state']):
self.statename = metadata['default_player_state'][self.players - 1]
elif 'default_state' in metadata:
self.statename = metadata['default_state']
else:
self.statename = None
except (IOError, json.JSONDecodeError):
pass
if self.statename:
self.load_state(self.statename, inttype)
self.data = retro.data.GameData()
if info is None:
info = 'data'
if info.endswith('.json'):
# assume it's a path
info_path = info
else:
info_path = retro.data.get_file_path(game, info + '.json', inttype)
if scenario is None:
scenario = 'scenario'
if scenario.endswith('.json'):
# assume it's a path
scenario_path = scenario
print(str(scenario_path)) ##YO
else:
scenario_path = retro.data.get_file_path(game, scenario + '.json', inttype)
print(str(scenario_path)) ##YO
self.system = retro.get_romfile_system(rom_path)
# We can't have more than one emulator per process. Before creating an
# emulator, ensure that unused ones are garbage-collected
gc.collect()
self.em = retro.RetroEmulator(rom_path)
self.em.configure_data(self.data)
self.em.step()
core = retro.get_system_info(self.system)
self.buttons = core['buttons']
self.num_buttons = len(self.buttons)
self.button_combos = self.data.valid_actions()
try:
assert self.data.load(info_path, scenario_path), 'Failed to load info (%s) or scenario (%s)' % (info_path, scenario_path)
except Exception:
del self.em
raise
if use_restricted_actions == retro.Actions.DISCRETE:
combos = 1
for combo in self.button_combos:
combos *= len(combo)
self.action_space = gym.spaces.Discrete(combos ** players)
elif use_restricted_actions == retro.Actions.MULTI_DISCRETE:
self.action_space = gym.spaces.MultiDiscrete([len(combos) if gym_version >= (0, 9, 6) else (0, len(combos) - 1) for combos in self.button_combos] * players)
else:
self.action_space = gym.spaces.MultiBinary(self.num_buttons * players)
kwargs = {}
if gym_version >= (0, 9, 6):
kwargs['dtype'] = np.uint8
if self._obs_type == retro.Observations.RAM:
shape = self.get_ram().shape
else:
img = [self.get_screen(p) for p in range(players)]
shape = img[0].shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=shape, **kwargs)
self.use_restricted_actions = use_restricted_actions
self.movie = None
self.movie_id = 0
self.movie_path = None
if record is True:
self.auto_record()
elif record is not False:
self.auto_record(record)
self.seed()
if gym_version < (0, 9, 6):
self._seed = self.seed
self._step = self.step
self._reset = self.reset
self._render = self.render
self._close = self.close
def _update_obs(self):
if self._obs_type == retro.Observations.RAM:
self.ram = self.get_ram()
return self.ram
elif self._obs_type == retro.Observations.IMAGE:
self.img = self.get_screen()
return self.img
else:
raise ValueError('Unrecognized observation type: {}'.format(self._obs_type))
##########################################################################################################################
def action_to_array(self, a):
actions = []
for p in range(self.players):
action = 0
if self.use_restricted_actions == retro.Actions.DISCRETE:
'''for combo in self.button_combos:
current = a % len(combo)
a //= len(combo)
action |= combo[current] '''
elif self.use_restricted_actions == retro.Actions.MULTI_DISCRETE:
ap = a[self.num_buttons * p:self.num_buttons * (p + 0)] #es 1
for i in range(len(ap)):
buttons = self.button_combos[i] #El no entra a esta condincion
action |= buttons[ap[i]]
# print ("\n",ap)
else:
ap = a[self.num_buttons * p:self.num_buttons * (p + 1)] #es 1
for i in range(len(ap)):
action |= int(ap[i]) << i
if self.use_restricted_actions == retro.Actions.FILTERED:
action = self.data.filter_action(action)
ap = np.zeros([self.num_buttons], np.uint8)
for i in range(self.num_buttons):
ap[i] = (action >> i) & 1 #es 1
#ap = [0,1,0,0,0,1,0,0,0,0,0,0]
#print ("\n",p)
actions.append(ap)
return actions
#############################################################################################################################
def step(self, a):
if self.img is None and self.ram is None:
raise RuntimeError('Please call env.reset() before env.step()')
for p, ap in enumerate(self.action_to_array(a)):
if self.movie:
for i in range(self.num_buttons):
self.movie.set_key(i, ap[i], p)
self.em.set_button_mask(ap, p)
if self.movie:
self.movie.step()
self.em.step()
self.data.update_ram()
ob = self._update_obs()
rew, done, info = self.compute_step()
return ob, rew, bool(done), dict(info)
def reset(self):
if self.initial_state:
self.em.set_state(self.initial_state)
for p in range(self.players):
self.em.set_button_mask(np.zeros([self.num_buttons], np.uint8), p)
self.em.step()
if self.movie_path is not None:
rel_statename = os.path.splitext(os.path.basename(self.statename))[0]
self.record_movie(os.path.join(self.movie_path, '%s-%s-%06d.bk2' % (self.gamename, rel_statename, self.movie_id)))
self.movie_id += 1
if self.movie:
self.movie.step()
self.data.reset()
self.data.update_ram()
return self._update_obs()
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2**31
return [seed1, seed2]
def render(self, mode='rgb_array', close=False):
if close:
if self.viewer:
self.viewer.close()
return
img = self.get_screen() if self.img is None else self.img
if mode == "rgb_array":
return img
elif mode == "human":
if self.viewer is None:
from gym.envs.classic_control.rendering import SimpleImageViewer
self.viewer = SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen ####################################################33img
def close(self):
if hasattr(self, 'em'):
del self.em
def get_action_meaning(self, act):
actions = []
for p, action in enumerate(self.action_to_array(act)):
actions.append([self.buttons[i] for i in np.extract(action, np.arange(len(action)))])
if self.players == 1:
return actions[0]
#print(actions)
return actions
def get_ram(self):
blocks = []
for offset in sorted(self.data.memory.blocks):
arr = np.frombuffer(self.data.memory.blocks[offset], dtype=np.uint8)
blocks.append(arr)
return np.concatenate(blocks)
def get_screen(self, player=0):
img = self.em.get_screen()
#print("IMAGEN: ",img)
x, y, w, h = self.data.crop_info(player)
if not w or x + w > img.shape[1]:
w = img.shape[1]
else:
w += x
if not h or y + h > img.shape[0]:
h = img.shape[0]
else:
h += y
if x == 0 and y == 0 and w == img.shape[1] and h == img.shape[0]:
return img
return img[y:500, x:600]
def load_state(self, statename, inttype=retro.data.Integrations.DEFAULT):
if not statename.endswith('.state'):
statename += '.state'
with gzip.open(retro.data.get_file_path(self.gamename, statename, inttype), 'rb') as fh:
self.initial_state = fh.read()
self.statename = statename
def compute_step(self):
if self.players > 1:
reward = [self.data.current_reward(p) for p in range(self.players)]
else:
reward = self.data.current_reward()
done = self.data.is_done()
return reward, done, self.data.lookup_all()
def record_movie(self, path):
self.movie = retro.Movie(path, True, self.players)
self.movie.configure(self.gamename, self.em)
if self.initial_state:
self.movie.set_state(self.initial_state)
def stop_record(self):
self.movie_path = None
self.movie_id = 0
if self.movie:
self.movie.close()
self.movie = None
def auto_record(self, path=None):
if not path:
path = os.getcwd()
self.movie_path = path
| 36.345048
| 168
| 0.551512
|
cc2f018859cf276b5b599749a81a0249b9d8a017
| 82,962
|
py
|
Python
|
src/tests/ftest/launch.py
|
berserk-fury/daos
|
e0a3249aa886962cef2345135b907b45f7109cae
|
[
"BSD-2-Clause-Patent"
] | 2
|
2021-07-14T12:21:50.000Z
|
2021-07-14T12:21:52.000Z
|
src/tests/ftest/launch.py
|
RyuGuo/daos
|
45a750c4e6db9e37a37146d2baa8b9c9c26b0a31
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
src/tests/ftest/launch.py
|
RyuGuo/daos
|
45a750c4e6db9e37a37146d2baa8b9c9c26b0a31
|
[
"BSD-2-Clause-Patent"
] | 1
|
2021-11-03T05:00:42.000Z
|
2021-11-03T05:00:42.000Z
|
#!/usr/bin/python3 -u
"""
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
# pylint: disable=too-many-lines
# this needs to be disabled as list_tests.py is still using python2
# pylint: disable=raise-missing-from
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
import json
import os
import re
import socket
import subprocess
import site
import sys
import time
import yaml
import errno
import xml.etree.ElementTree as ET
from xml.dom import minidom
from avocado.utils.distro import detect
from ClusterShell.NodeSet import NodeSet
from ClusterShell.Task import task_self
try:
# For python versions >= 3.2
from tempfile import TemporaryDirectory
except ImportError:
# Basic implementation of TemporaryDirectory for python versions < 3.2
from tempfile import mkdtemp
from shutil import rmtree
class TemporaryDirectory(object):
# pylint: disable=too-few-public-methods
"""Create a temporary directory.
When the last reference of this object goes out of scope the directory
and its contents are removed.
"""
def __init__(self):
"""Initialize a TemporaryDirectory object."""
self.name = mkdtemp()
def __del__(self):
"""Destroy a TemporaryDirectory object."""
rmtree(self.name)
DEFAULT_DAOS_TEST_LOG_DIR = "/var/tmp/daos_testing"
YAML_KEYS = {
"test_servers": "test_servers",
"test_clients": "test_clients",
"bdev_list": "nvme",
}
YAML_KEY_ORDER = ("test_servers", "test_clients", "bdev_list")
def display(args, message):
"""Display the message if verbosity is set.
Args:
args (argparse.Namespace): command line arguments for this program
message (str): message to display if verbosity is set
"""
if args.verbose > 0:
print(message)
def display_disk_space(path):
"""Display disk space of provided path destination.
Args:
path (str): path to directory to print disk space for.
"""
print("Current disk space usage of {}".format(path))
print(get_output(["df", "-h", path]))
def get_build_environment(args):
"""Obtain DAOS build environment variables from the .build_vars.json file.
Returns:
dict: a dictionary of DAOS build environment variable names and values
"""
build_vars_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../.build_vars.json")
try:
with open(build_vars_file) as vars_file:
return json.load(vars_file)
except ValueError:
if not args.list:
raise
return json.loads('{{"PREFIX": "{}"}}'.format(os.getcwd()))
except IOError as error:
if error.errno == errno.ENOENT:
if not args.list:
raise
return json.loads('{{"PREFIX": "{}"}}'.format(os.getcwd()))
def get_temporary_directory(args, base_dir=None):
"""Get the temporary directory used by functional tests.
Args:
base_dir (str, optional): base installation directory. Defaults to None.
Returns:
str: the full path of the temporary directory
"""
if base_dir is None:
base_dir = get_build_environment(args)["PREFIX"]
if base_dir == "/usr":
tmp_dir = os.getenv(
"DAOS_TEST_SHARED_DIR", os.path.expanduser("~/daos_test"))
else:
tmp_dir = os.path.join(base_dir, "tmp")
# Make sure the temporary directory exists to prevent pydaos import errors
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
return tmp_dir
def set_test_environment(args):
"""Set up the test environment.
Args:
args (argparse.Namespace): command line arguments for this program
Returns:
None
"""
base_dir = get_build_environment(args)["PREFIX"]
bin_dir = os.path.join(base_dir, "bin")
sbin_dir = os.path.join(base_dir, "sbin")
# /usr/sbin is not setup on non-root user for CI nodes.
# SCM formatting tool mkfs.ext4 is located under
# /usr/sbin directory.
usr_sbin = os.path.sep + os.path.join("usr", "sbin")
path = os.environ.get("PATH")
if not args.list:
# Get the default interface to use if OFI_INTERFACE is not set
interface = os.environ.get("OFI_INTERFACE")
if interface is None:
# Find all the /sys/class/net interfaces on the launch node
# (excluding lo)
print("Detecting network devices - OFI_INTERFACE not set")
available_interfaces = {}
net_path = os.path.join(os.path.sep, "sys", "class", "net")
net_list = [dev for dev in os.listdir(net_path) if dev != "lo"]
for device in sorted(net_list):
if device == "bonding_masters":
continue
# Get the interface state - only include active (up) interfaces
device_operstate = os.path.join(net_path, device, "operstate")
with open(device_operstate, "r") as file_handle:
state = file_handle.read().strip()
# Only include interfaces that are up
if state.lower() == "up":
# Get the interface speed - used to select the fastest
# available
device_speed = os.path.join(net_path, device, "speed")
with open(device_speed, "r") as file_handle:
try:
speed = int(file_handle.read().strip())
# KVM/Qemu/libvirt returns an EINVAL
except IOError as ioerror:
if ioerror.errno == errno.EINVAL:
speed = 1000
else:
raise
print(
" - {0:<5} (speed: {1:>6} state: {2})".format(
device, speed, state))
# Only include the first active interface for each speed -
# first is determined by an alphabetic sort: ib0 will be
# checked before ib1
if speed not in available_interfaces:
available_interfaces[speed] = device
print("Available interfaces: {}".format(available_interfaces))
try:
# Select the fastest active interface available by sorting
# the speed
interface = \
available_interfaces[sorted(available_interfaces)[-1]]
except IndexError:
print(
"Error obtaining a default interface from: {}".format(
os.listdir(net_path)))
sys.exit(1)
print("Using {} as the default interface".format(interface))
# Update env definitions
os.environ["CRT_CTX_SHARE_ADDR"] = "0"
os.environ["OFI_INTERFACE"] = os.environ.get("OFI_INTERFACE", interface)
# Set the default location for daos log files written during testing
# if not already defined.
if "DAOS_TEST_LOG_DIR" not in os.environ:
os.environ["DAOS_TEST_LOG_DIR"] = DEFAULT_DAOS_TEST_LOG_DIR
os.environ["D_LOG_FILE"] = os.path.join(
os.environ["DAOS_TEST_LOG_DIR"], "daos.log")
# Assign the default value for transport configuration insecure mode
os.environ["DAOS_INSECURE_MODE"] = str(args.insecure_mode)
# Update PATH
os.environ["PATH"] = ":".join([bin_dir, sbin_dir, usr_sbin, path])
os.environ["COVFILE"] = "/tmp/test.cov"
# Python paths required for functional testing
set_python_environment()
if args.verbose > 0:
print("ENVIRONMENT VARIABLES")
for key in sorted(os.environ):
print(" {}: {}".format(key, os.environ[key]))
def set_python_environment():
"""Set up the test python environment."""
required_python_paths = [
os.path.abspath("util/apricot"),
os.path.abspath("util"),
os.path.abspath("cart/util"),
]
site_packages = site.getsitepackages()
# Including paths for pydaos shim - should be removed when shim is removed
additional_site_packages = []
for site_package in site_packages:
if "/lib64/python3." in site_package:
additional_site_packages.append(
re.sub(r"python[0-9.]+", "python3", site_package))
site_packages.extend(additional_site_packages)
# end of shim work around
required_python_paths.extend(site_packages)
# Check the PYTHONPATH env definition
python_path = os.environ.get("PYTHONPATH")
if python_path is None or python_path == "":
# Use the required paths to define the PYTHONPATH env if it is not set
os.environ["PYTHONPATH"] = ":".join(required_python_paths)
else:
# Append any missing required paths to the existing PYTHONPATH env
defined_python_paths = [
os.path.abspath(os.path.expanduser(path))
for path in python_path.split(":")]
for required_path in required_python_paths:
if required_path not in defined_python_paths:
python_path += ":" + required_path
os.environ["PYTHONPATH"] = python_path
print("Using PYTHONPATH={}".format(os.environ["PYTHONPATH"]))
def run_command(cmd):
"""Get the output of given command executed on this host.
Args:
cmd (list): command from which to obtain the output
Raises:
RuntimeError: if the command fails
Returns:
str: command output
"""
print("Running {}".format(" ".join(cmd)))
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdout, _ = process.communicate()
retcode = process.poll()
if retcode:
raise RuntimeError(
"Error executing '{}':\n\tOutput:\n{}".format(
" ".join(cmd), stdout))
return stdout
def get_output(cmd, check=True):
"""Get the output of given command executed on this host.
Args:
cmd (list): command from which to obtain the output
check (bool, optional): whether to emit an error and exit the
program if the exit status of the command is non-zero. Defaults
to True.
Returns:
str: command output
"""
try:
stdout = run_command(cmd)
except RuntimeError as error:
if check:
print(error)
sys.exit(1)
stdout = str(error)
return stdout
def time_command(cmd):
"""Execute the command on this host and display its duration.
Args:
cmd (list): command to time
Returns:
int: return code of the command
"""
print("Running: {}".format(" ".join(cmd)))
start_time = int(time.time())
return_code = subprocess.call(cmd)
end_time = int(time.time())
print("Total test time: {}s".format(end_time - start_time))
return return_code
def get_remote_output(host_list, command, timeout=120):
"""Run the command on each specified host in parallel.
Args:
host_list (list): list of hosts
command (str): command to run on each host
timeout (int, optional): number of seconds to wait for all jobs to
complete. Defaults to 120 seconds.
Returns:
Task: a Task object containing the result of the running the command on
the specified hosts
"""
# Create a ClusterShell Task to run the command in parallel on the hosts
if isinstance(host_list, list):
nodes = NodeSet.fromlist(host_list)
else:
nodes = NodeSet(host_list)
task = task_self()
# task.set_info('debug', True)
# Enable forwarding of the ssh authentication agent connection
task.set_info("ssh_options", "-oForwardAgent=yes")
print("Running on {}: {}".format(nodes, command))
task.run(command=command, nodes=nodes, timeout=timeout)
return task
def check_remote_output(task, command):
"""Check if a remote command completed successfully on all hosts.
Args:
task (Task): a Task object containing the command result
command (str): command run by the task
Returns:
bool: True if the command completed successfully (rc=0) on each
specified host; False otherwise
"""
# Create a dictionary of hosts for each unique return code
results = {code: hosts for code, hosts in task.iter_retcodes()}
# Determine if the command completed successfully across all the hosts
status = len(results) == 1 and 0 in results
if not status:
print(" Errors detected running \"{}\":".format(command))
# Display the command output
for code in sorted(results):
output_data = list(task.iter_buffers(results[code]))
if not output_data:
output_data = [["<NONE>", results[code]]]
for output, o_hosts in output_data:
n_set = NodeSet.fromlist(o_hosts)
lines = []
lines = list(output.splitlines())
if len(lines) > 1:
# Print the sub-header for multiple lines of output
print(" {}: rc={}, output:".format(n_set, code))
for number, line in enumerate(lines):
if isinstance(line, bytes):
line = line.decode("utf-8")
if len(lines) == 1:
# Print the sub-header and line for one line of output
print(" {}: rc={}, output: {}".format(n_set, code, line))
continue
try:
print(" {}".format(line))
except IOError:
# DAOS-5781 Jenkins doesn't like receiving large
# amounts of data in a short space of time so catch
# this and retry.
print(
"*** DAOS-5781: Handling IOError detected while "
"processing line {}/{} with retry ***".format(
number + 1, len(lines)))
time.sleep(5)
print(" {}".format(line))
# List any hosts that timed out
timed_out = [str(hosts) for hosts in task.iter_keys_timeout()]
if timed_out:
print(" {}: timeout detected".format(NodeSet.fromlist(timed_out)))
return status
def spawn_commands(host_list, command, timeout=120):
"""Run the command on each specified host in parallel.
Args:
host_list (list): list of hosts
command (str): command to run on each host
timeout (int, optional): number of seconds to wait for all jobs to
complete. Defaults to 120 seconds.
Returns:
bool: True if the command completed successfully (rc=0) on each
specified host; False otherwise
"""
# Create a dictionary of hosts for each unique return code
task = get_remote_output(host_list, command, timeout)
# Determine if the command completed successfully across all the hosts
return check_remote_output(task, command)
def find_values(obj, keys, key=None, val_type=list):
"""Find dictionary values of a certain type specified with certain keys.
Args:
obj (obj): a python object; initially the dictionary to search
keys (list): list of keys to find their matching list values
key (str, optional): key to check for a match. Defaults to None.
Returns:
dict: a dictionary of each matching key and its value
"""
def add_matches(found):
"""Add found matches to the match dictionary entry of the same key.
If a match does not already exist for this key add all the found values.
When a match already exists for a key, append the existing match with
any new found values.
For example:
Match Found Updated Match
--------- ------------ -------------
None [A, B] [A, B]
[A, B] [C] [A, B, C]
[A, B, C] [A, B, C, D] [A, B, C, D]
Args:
found (dict): dictionary of matches found for each key
"""
for found_key in found:
if found_key not in matches:
# Simply add the new value found for this key
matches[found_key] = found[found_key]
else:
is_list = isinstance(matches[found_key], list)
if not is_list:
matches[found_key] = [matches[found_key]]
if isinstance(found[found_key], list):
for found_item in found[found_key]:
if found_item not in matches[found_key]:
matches[found_key].append(found_item)
elif found[found_key] not in matches[found_key]:
matches[found_key].append(found[found_key])
if not is_list and len(matches[found_key]) == 1:
matches[found_key] = matches[found_key][0]
matches = {}
if isinstance(obj, val_type) and isinstance(key, str) and key in keys:
# Match found
matches[key] = obj
elif isinstance(obj, dict):
# Recursively look for matches in each dictionary entry
for obj_key, obj_val in list(obj.items()):
add_matches(find_values(obj_val, keys, obj_key, val_type))
elif isinstance(obj, list):
# Recursively look for matches in each list entry
for item in obj:
add_matches(find_values(item, keys, None, val_type))
return matches
def get_test_list(tags):
"""Generate a list of tests and avocado tag filter from a list of tags.
Args:
tags (list): a list of tag or test file names
Returns:
(list, list): a tuple of an avocado tag filter list and lists of tests
"""
test_tags = []
test_list = []
# Check if fault injection is enabled ( 0 return status)
faults_disabled = False
try:
faults_disabled = time_command(["fault_status"])
except OSError as error:
if error.errno == errno.ENOENT:
# Not built yet. Must be trying to figure out which tests are
# run for the given tag(s). Assume this is not a release run
# then and faults are enabled
pass
for tag in tags:
if ".py" in tag:
# Assume '.py' indicates a test and just add it to the list
test_list.append(tag)
fault_filter = "--filter-by-tags=-faults"
if faults_disabled and fault_filter not in test_tags:
test_tags.append(fault_filter)
else:
# Otherwise it is assumed that this is a tag
if faults_disabled:
tag = ",".join((tag, "-faults"))
test_tags.append("--filter-by-tags={}".format(tag))
# Update the list of tests with any test that match the specified tags.
# Exclude any specified tests that do not match the specified tags. If no
# tags and no specific tests have been specified then all of the functional
# tests will be added.
if test_tags or not test_list:
if not test_list:
test_list = ["./"]
version = float(get_output(["avocado", "-v"]).split()[-1])
print("Running with Avocado {}".format(version))
if version >= 83.0:
command = ["avocado", "list"]
elif version >= 82.0:
command = ["avocado", "--paginator=off", "list"]
else:
command = ["avocado", "list", "--paginator=off"]
for test_tag in test_tags:
command.append(str(test_tag))
command.extend(test_list if test_list else ["./"])
tagged_tests = re.findall(r"INSTRUMENTED\s+(.*):", get_output(command))
test_list = list(set(tagged_tests))
return test_tags, test_list
def get_test_files(test_list, args, yaml_dir):
"""Get a list of the test scripts to run and their yaml files.
Args:
test_list (list): list of test scripts to run
args (argparse.Namespace): command line arguments for this program
yaml_dir (str): directory in which to write the modified yaml files
Returns:
list: a list of dictionaries of each test script and yaml file; If
there was an issue replacing a yaml host placeholder the yaml
dictionary entry will be set to None.
"""
test_files = [{"py": test, "yaml": None} for test in test_list]
for test_file in test_files:
base, _ = os.path.splitext(test_file["py"])
test_file["yaml"] = replace_yaml_file(
"{}.yaml".format(base), args, yaml_dir)
return test_files
def get_nvme_replacement(args):
"""Determine the value to use for the '--nvme' command line argument.
Parse the lspci output for any NMVe devices, e.g.
$ lspci | grep 'Non-Volatile memory controller:'
5e:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [3DNAND, Beta Rock Controller]
5f:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [3DNAND, Beta Rock Controller]
81:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [Optane]
da:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [Optane]
Optionally filter the above output even further with a specified search
string (e.g. '--nvme=auto:Optane'):
$ lspci | grep 'Non-Volatile memory controller:' | grep 'Optane'
81:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [Optane]
da:00.0 Non-Volatile memory controller:
Intel Corporation NVMe Datacenter SSD [Optane]
Args:
args (argparse.Namespace): command line arguments for this program
Returns:
str: a comma-separated list of nvme device pci addresses available on
all of the specified test servers
"""
# A list of server host is required to able to auto-detect NVMe devices
if not args.test_servers:
print("ERROR: Missing a test_servers list to auto-detect NVMe devices")
sys.exit(1)
# Get a list of NVMe devices from each specified server host
host_list = list(args.test_servers)
command_list = [
"/sbin/lspci -D", "grep 'Non-Volatile memory controller:'"]
if ":" in args.nvme:
command_list.append("grep '{}'".format(args.nvme.split(":")[1]))
command = " | ".join(command_list)
task = get_remote_output(host_list, command)
# Verify the command was successful on each server host
if not check_remote_output(task, command):
print("ERROR: Issuing commands to detect NVMe PCI addresses.")
sys.exit(1)
# Verify each server host has the same NVMe PCI addresses
output_data = list(task.iter_buffers())
if len(output_data) > 1:
print("ERROR: Non-homogeneous NVMe PCI addresses.")
sys.exit(1)
# Get the list of NVMe PCI addresses found in the output
output_str = "\n".join([line.decode("utf-8") for line in output_data[0][0]])
devices = find_pci_address(output_str)
print("Auto-detected NVMe devices on {}: {}".format(host_list, devices))
return ",".join(devices)
def find_pci_address(value):
"""Find PCI addresses in the specified string.
Args:
value (str): string to search for PCI addresses
Returns:
list: a list of all the PCI addresses found in the string
"""
pattern = r"[{0}]{{4}}:[{0}]{{2}}:[{0}]{{2}}\.[{0}]".format("0-9a-fA-F")
return re.findall(pattern, str(value))
def replace_yaml_file(yaml_file, args, yaml_dir):
"""Create a temporary test yaml file with any requested values replaced.
Optionally replace the following test yaml file values if specified by the
user via the command line arguments:
test_servers: Use the list specified by the --test_servers (-ts)
argument to replace any host name placeholders listed
under "test_servers:"
test_clients Use the list specified by the --test_clients (-tc)
argument (or any remaining names in the --test_servers
list argument, if --test_clients is not specified) to
replace any host name placeholders listed under
"test_clients:".
bdev_list Use the list specified by the --nvme (-n) argument to
replace the string specified by the "bdev_list:" yaml
parameter. If multiple "bdev_list:" entries exist in
the yaml file, evenly divide the list when making the
replacements.
Any replacements are made in a copy of the original test yaml file. If no
replacements are specified return the original test yaml file.
Args:
yaml_file (str): test yaml file
args (argparse.Namespace): command line arguments for this program
yaml_dir (str): directory in which to write the modified yaml files
Returns:
str: the test yaml file; None if the yaml file contains placeholders
w/o replacements
"""
replacements = {}
if args.test_servers or args.nvme:
# Find the test yaml keys and values that match the replaceable fields
yaml_data = get_yaml_data(yaml_file)
yaml_keys = list(YAML_KEYS.keys())
yaml_find = find_values(yaml_data, yaml_keys)
# Generate a list of values that can be used as replacements
new_values = {}
for key, value in list(YAML_KEYS.items()):
args_value = getattr(args, value)
if isinstance(args_value, NodeSet):
new_values[key] = list(args_value)
elif args_value:
new_values[key] = args_value.split(",")
else:
new_values[key] = []
# Assign replacement values for the test yaml entries to be replaced
display(args, "Detecting replacements for {} in {}".format(
yaml_keys, yaml_file))
display(args, " Found values: {}".format(yaml_find))
display(args, " New values: {}".format(new_values))
for key in YAML_KEY_ORDER:
# If the user did not provide a specific list of replacement
# test_clients values, use the remaining test_servers values to
# replace test_clients placeholder values
if key == "test_clients" and not new_values[key]:
new_values[key] = new_values["test_servers"]
# Replace test yaml keys that were:
# - found in the test yaml
# - have a user-specified replacement
if key in yaml_find and new_values[key]:
if key.startswith("test_"):
# The entire server/client test yaml list entry is replaced
# by a new test yaml list entry, e.g.
# '- serverA' --> '- wolf-1'
value_format = "- {}"
values_to_replace = [
value_format.format(item) for item in yaml_find[key]]
else:
# Individual bdev_list NVMe PCI addresses in the test yaml
# file are replaced with the new NVMe PCI addresses in the
# order they are found, e.g.
# 0000:81:00.0 --> 0000:12:00.0
value_format = "\"{}\""
values_to_replace = [
value_format.format(item)
for item in find_pci_address(yaml_find[key])]
# Add the next user-specified value as a replacement for the key
for value in values_to_replace:
if value in replacements:
continue
try:
replacements[value] = value_format.format(
new_values[key].pop(0))
except IndexError:
replacements[value] = None
display(
args,
" - Replacement: {} -> {}".format(
value, replacements[value]))
if replacements:
# Read in the contents of the yaml file to retain the !mux entries
print("Reading {}".format(yaml_file))
with open(yaml_file) as yaml_buffer:
yaml_data = yaml_buffer.read()
# Apply the placeholder replacements
missing_replacements = []
display(args, "Modifying contents: {}".format(yaml_file))
for key in sorted(replacements):
value = replacements[key]
if value:
# Replace the host entries with their mapped values
display(args, " - Replacing: {} --> {}".format(key, value))
yaml_data = re.sub(key, value, yaml_data)
elif args.discard:
# Discard any host entries without a replacement value
display(args, " - Removing: {}".format(key))
yaml_data = re.sub(r"\s*[,]?{}".format(key), "", yaml_data)
else:
# Keep track of any placeholders without a replacement value
display(args, " - Missing: {}".format(key))
missing_replacements.append(key)
if missing_replacements:
# Report an error for all of the placeholders w/o a replacement
print(
"Error: Placeholders missing replacements in {}:\n {}".format(
yaml_file, ", ".join(missing_replacements)))
return None
# Write the modified yaml file into a temporary file. Use the path to
# ensure unique yaml files for tests with the same filename.
orig_yaml_file = yaml_file
yaml_name = get_test_category(yaml_file)
yaml_file = os.path.join(yaml_dir, "{}.yaml".format(yaml_name))
print("Creating copy: {}".format(yaml_file))
with open(yaml_file, "w") as yaml_buffer:
yaml_buffer.write(yaml_data)
# Optionally display the file
if args.verbose > 0:
cmd = ["diff", "-y", orig_yaml_file, yaml_file]
print(get_output(cmd, False))
# Return the untouched or modified yaml file
return yaml_file
def setup_test_directory(args, mode="all"):
"""Set up the common test directory on all hosts.
Ensure the common test directory exists on each possible test node.
Args:
args (argparse.Namespace): command line arguments for this program
mode (str, optional): setup mode. Defaults to "all".
"rm" = remove the directory
"mkdir" = create the directory
"chmod" = change the permissions of the directory (a+rw)
"list" = list the contents of the directory
"all" = execute all of the mode options
"""
host_list = NodeSet(socket.gethostname().split(".")[0])
host_list.update(args.test_clients)
host_list.update(args.test_servers)
test_dir = os.environ["DAOS_TEST_LOG_DIR"]
print(
"Setting up '{}' on {}:".format(
test_dir, str(NodeSet.fromlist(host_list))))
if mode in ["all", "rm"]:
spawn_commands(host_list, "sudo rm -fr {}".format(test_dir))
if mode in ["all", "mkdir"]:
spawn_commands(host_list, "mkdir -p {}".format(test_dir))
if mode in ["all", "chmod"]:
spawn_commands(host_list, "chmod a+wr {}".format(test_dir))
if mode in ["all", "list"]:
spawn_commands(host_list, "ls -al {}".format(test_dir))
def generate_certs():
"""Generate the certificates for the test."""
daos_test_log_dir = os.environ["DAOS_TEST_LOG_DIR"]
certs_dir = os.path.join(daos_test_log_dir, "daosCA")
subprocess.call(["/usr/bin/rm", "-rf", certs_dir])
subprocess.call(
["../../../../lib64/daos/certgen/gen_certificates.sh",
daos_test_log_dir])
def run_tests(test_files, tag_filter, args):
"""Run or display the test commands.
Args:
test_files (dict): a list of dictionaries of each test script/yaml file
tag_filter (list): the avocado tag filter command line argument
args (argparse.Namespace): command line arguments for this program
Returns:
int: a bitwise-or of all the return codes of each 'avocado run' command
"""
return_code = 0
# Determine the location of the avocado logs for archiving or renaming
data = get_output(["avocado", "config"]).strip()
avocado_logs_dir = re.findall(r"datadir\.paths\.logs_dir\s+(.*)", data)
avocado_logs_dir = os.path.expanduser(avocado_logs_dir[0])
print("Avocado logs stored in {}".format(avocado_logs_dir))
# Create the base avocado run command
version = float(get_output(["avocado", "-v"]).split()[-1])
print("Running with Avocado version {}".format(version))
command_list = ["avocado"]
if not args.sparse and version >= 82.0:
command_list.append("--show=test")
command_list.append("run")
if version >= 82.0:
command_list.append("--ignore-missing-references")
else:
command_list.extend(["--ignore-missing-references", "on"])
if version >= 83.0:
command_list.append("--disable-tap-job-result")
else:
command_list.extend(["--html-job-result", "on"])
command_list.extend(["--tap-job-result", "off"])
if not args.sparse and version < 82.0:
command_list.append("--show-job-log")
if tag_filter:
command_list.extend(tag_filter)
# Run each test
skip_reason = None
for loop in range(1, args.repeat + 1):
print("-" * 80)
print("Starting loop {}/{}".format(loop, args.repeat))
for test_file in test_files:
if skip_reason is not None:
# An error was detected running clean_logs for a previous test.
# As this is typically an indication of a communication issue
# with one of the hosts, do not attempt to run subsequent tests.
if not report_skipped_test(
test_file["py"], avocado_logs_dir, skip_reason):
return_code |= 64
continue
if not isinstance(test_file["yaml"], str):
# The test was not run due to an error replacing host
# placeholders in the yaml file. Treat this like a failed
# avocado command.
reason = "error replacing yaml file placeholders"
if not report_skipped_test(
test_file["py"], avocado_logs_dir, reason):
return_code |= 64
return_code |= 4
continue
# Optionally clean the log files before running this test on the
# servers and clients specified for this test
if args.clean:
if not clean_logs(test_file["yaml"], args):
# Report errors for this skipped test
skip_reason = (
"host communication error attempting to clean out "
"leftover logs from a previous test run prior to "
"running this test")
if not report_skipped_test(
test_file["py"], avocado_logs_dir, skip_reason):
return_code |= 64
return_code |= 128
continue
# Execute this test
test_command_list = list(command_list)
test_command_list.extend([
"--mux-yaml", test_file["yaml"], "--", test_file["py"]])
run_return_code = time_command(test_command_list)
if run_return_code != 0:
collect_crash_files(avocado_logs_dir)
return_code |= run_return_code
# Stop any agents or servers running via systemd
return_code |= stop_daos_agent_services(test_file["py"], args)
return_code |= stop_daos_server_service(test_file["py"], args)
# Optionally store all of the server and client config files
# and archive remote logs and report big log files, if any.
if args.archive:
test_hosts = get_hosts_from_yaml(test_file["yaml"], args)
test_log_dir = os.environ.get(
"DAOS_TEST_LOG_DIR", DEFAULT_DAOS_TEST_LOG_DIR)
# Archive local config files
return_code |= archive_files(
"local configuration files",
os.path.join(avocado_logs_dir, "latest", "daos_configs"),
socket.gethostname().split(".")[0:1],
"{}/*_*_*.yaml".format(
get_temporary_directory(
args, get_build_environment(args)["PREFIX"])),
args)
# Archive remote server configuration files
return_code |= archive_files(
"remote server config files",
os.path.join(avocado_logs_dir, "latest", "daos_configs"),
get_hosts_from_yaml(
test_file["yaml"], args, YAML_KEYS["test_servers"]),
"{}/daos_server*.yml".format(
os.path.join(os.sep, "etc", "daos")),
args)
# Archive remote client configuration files
return_code |= archive_files(
"remote client config files",
os.path.join(avocado_logs_dir, "latest", "daos_configs"),
get_hosts_from_yaml(
test_file["yaml"], args, YAML_KEYS["test_clients"]),
"{0}/daos_agent*.yml {0}/daos_control*.yml".format(
os.path.join(os.sep, "etc", "daos")),
args)
# Archive remote daos log files
return_code |= archive_files(
"daos log files",
os.path.join(avocado_logs_dir, "latest", "daos_logs"),
test_hosts,
"{}/*.log*".format(test_log_dir),
args,
avocado_logs_dir,
get_test_category(test_file["py"]))
# Archive remote ULTs stacks dump files
return_code |= archive_files(
"ULTs stacks dump files",
os.path.join(avocado_logs_dir, "latest", "daos_dumps"),
get_hosts_from_yaml(
test_file["yaml"], args, YAML_KEYS["test_servers"]),
"/tmp/daos_dump*.txt*",
args,
avocado_logs_dir,
get_test_category(test_file["py"]))
# Archive remote cart log files
return_code |= archive_files(
"cart log files",
os.path.join(avocado_logs_dir, "latest", "cart_logs"),
test_hosts,
"{}/*/*log*".format(test_log_dir),
args,
avocado_logs_dir,
get_test_category(test_file["py"]))
# Compress any log file that haven't been remotely compressed.
compress_log_files(avocado_logs_dir, args)
# Optionally rename the test results directory for this test
if args.rename:
return_code |= rename_logs(
avocado_logs_dir, test_file["py"], loop, args)
# Optionally process core files
if args.process_cores:
if not process_the_cores(
avocado_logs_dir, test_file["yaml"], args):
return_code |= 256
if args.jenkinslog:
# Archive bullseye coverage logs
hosts = list(args.test_servers)
hosts += socket.gethostname().split(".")[0:1]
return_code |= archive_files(
"bullseye coverage logs",
os.path.join(avocado_logs_dir, "bullseye_coverage_logs"),
hosts,
"/tmp/test.cov*",
args)
return return_code
def get_yaml_data(yaml_file):
"""Get the contents of a yaml file as a dictionary.
Args:
yaml_file (str): yaml file to read
Raises:
Exception: if an error is encountered reading the yaml file
Returns:
dict: the contents of the yaml file
"""
yaml_data = {}
if os.path.isfile(yaml_file):
with open(yaml_file, "r") as open_file:
try:
file_data = open_file.read()
yaml_data = yaml.safe_load(file_data.replace("!mux", ""))
except yaml.YAMLError as error:
print("Error reading {}: {}".format(yaml_file, error))
sys.exit(1)
return yaml_data
def find_yaml_hosts(test_yaml):
"""Find the all the host values in the specified yaml file.
Args:
test_yaml (str): test yaml file
Returns:
dict: a dictionary of each host key and its host values
"""
return find_values(
get_yaml_data(test_yaml),
[YAML_KEYS["test_servers"], YAML_KEYS["test_clients"]])
def get_hosts_from_yaml(test_yaml, args, key_match=None):
"""Extract the list of hosts from the test yaml file.
This host will be included in the list if no clients are explicitly called
out in the test's yaml file.
Args:
test_yaml (str): test yaml file
args (argparse.Namespace): command line arguments for this program
key_match (str, optional): test yaml key used to filter which hosts to
find. Defaults to None which will match all keys.
Returns:
list: a unique list of hosts specified in the test's yaml file
"""
display(
args,
"Extracting hosts from {} - matching key '{}'".format(
test_yaml, key_match))
host_set = set()
if args.include_localhost and key_match != YAML_KEYS["test_servers"]:
host_set.add(socket.gethostname().split(".")[0])
found_client_key = False
for key, value in list(find_yaml_hosts(test_yaml).items()):
display(args, " Found {}: {}".format(key, value))
if key_match is None or key == key_match:
display(args, " Adding {}".format(value))
host_set.update(value)
if key in YAML_KEYS["test_clients"]:
found_client_key = True
# Include this host as a client if no clients are specified
if not found_client_key and key_match != YAML_KEYS["test_servers"]:
local_host = socket.gethostname().split(".")[0]
display(args, " Adding the localhost: {}".format(local_host))
host_set.add(local_host)
return sorted(list(host_set))
def clean_logs(test_yaml, args):
"""Remove the test log files on each test host.
Args:
test_yaml (str): yaml file containing host names
args (argparse.Namespace): command line arguments for this program
"""
# Remove any log files from the DAOS_TEST_LOG_DIR directory
logs_dir = os.environ.get("DAOS_TEST_LOG_DIR", DEFAULT_DAOS_TEST_LOG_DIR)
host_list = get_hosts_from_yaml(test_yaml, args)
command = "sudo rm -fr {}".format(os.path.join(logs_dir, "*.log*"))
# also remove any ABT infos/stacks dumps
command += " /tmp/daos_dump*.txt*"
print("-" * 80)
print("Cleaning logs on {}".format(host_list))
if not spawn_commands(host_list, command):
print("Error cleaning logs, aborting")
return False
return True
def collect_crash_files(avocado_logs_dir):
"""Move any avocado crash files into job-results/latest/crashes.
Args:
avocado_logs_dir (str): path to the avocado log files.
"""
data_dir = avocado_logs_dir.replace("job-results", "data")
crash_dir = os.path.join(data_dir, "crashes")
if os.path.isdir(crash_dir):
crash_files = [
os.path.join(crash_dir, crash_file)
for crash_file in os.listdir(crash_dir)
if os.path.isfile(os.path.join(crash_dir, crash_file))]
if crash_files:
latest_dir = os.path.join(avocado_logs_dir, "latest")
latest_crash_dir = os.path.join(latest_dir, "crashes")
run_command(["mkdir", latest_crash_dir])
for crash_file in crash_files:
run_command(["mv", crash_file, latest_crash_dir])
else:
print("No avocado crash files found in {}".format(crash_dir))
def get_remote_file_command():
"""Get path to get_remote_files.sh script."""
return "{}/get_remote_files.sh".format(os.path.abspath(os.getcwd()))
def compress_log_files(avocado_logs_dir, args):
"""Compress log files.
Args:
avocado_logs_dir (str): path to the avocado log files
"""
print("-" * 80)
print("Compressing files in {}".format(socket.gethostname().split(".")[0]))
logs_dir = os.path.join(avocado_logs_dir, "latest", "daos_logs", "*.log*")
command = [
get_remote_file_command(), "-z", "-x", "-f {}".format(logs_dir)]
if args.verbose > 1:
command.append("-v")
print(get_output(command, check=False))
def archive_files(description, destination, hosts, source_files, args,
avocado_logs_dir=None, test_name=None):
"""Archive all of the remote files to a local directory.
Args:
description (str): string identifying the archiving operation
destination (str): path in which to archive files
hosts (list): hosts from which to archive files
source_files (str): remote files to archive
cart (str): enable running cart_logtest.py
args (argparse.Namespace): command line arguments for this program
avocado_logs_dir (optional, str): path to the avocado log files.
Required for checking for large log files - see 'test_name'.
Defaults to None.
test_name (optional, str): current running testname. If specified the
cart_logtest.py will be run against each log file and the size of
each log file will be checked against the threshold (if enabled).
Defaults to None.
Returns:
int: status of archiving the files
"""
status = 0
if hosts:
print("-" * 80)
print(
"Archiving {} from {} in {}".format(
description, hosts, destination))
# Create the destination directory
if not os.path.exists(destination):
get_output(["mkdir", destination])
# Display available disk space prior to copy. Allow commands to fail
# w/o exiting this program. Any disk space issues preventing the
# creation of a directory will be caught in the archiving of the source
# files.
display_disk_space(destination)
this_host = socket.gethostname().split(".")[0]
command = [
get_remote_file_command(),
"-z",
"-a \"{}:{}\"".format(this_host, destination),
"-f \"{}\"".format(source_files),
]
if test_name is not None:
command.append("-c")
if args.logs_threshold:
command.append("-t \"{}\"".format(args.logs_threshold))
if args.verbose > 1:
command.append("-v")
task = get_remote_output(hosts, " ".join(command), 900)
# Determine if the command completed successfully across all the hosts
cmd_description = "archive_files command for {}".format(description)
if not check_remote_output(task, cmd_description):
status |= 16
if test_name is not None and args.logs_threshold:
if not check_big_files(avocado_logs_dir, task, test_name, args):
status |= 32
return status
def rename_logs(avocado_logs_dir, test_file, loop, args):
"""Append the test name to its avocado job-results directory name.
Args:
avocado_logs_dir (str): avocado job-results directory
test_file (str): the test python file
loop (int): test execution loop count
args (argparse.Namespace): command line arguments for this program
Returns:
int: status of renaming the avocado job-results directory name
"""
status = 0
test_name = get_test_category(test_file)
test_logs_lnk = os.path.join(avocado_logs_dir, "latest")
test_logs_dir = os.path.realpath(test_logs_lnk)
print("-" * 80)
print("Renaming the avocado job-results directory")
if args.jenkinslog:
if args.repeat > 1:
# When repeating tests ensure jenkins-style avocado log directories
# are unique by including the loop count in the path
new_test_logs_dir = os.path.join(
avocado_logs_dir, test_file, str(loop))
else:
new_test_logs_dir = os.path.join(avocado_logs_dir, test_file)
try:
os.makedirs(new_test_logs_dir)
except OSError as error:
print("Error mkdir {}: {}".format(new_test_logs_dir, error))
status |= 1024
else:
new_test_logs_dir = "{}-{}".format(test_logs_dir, test_name)
try:
os.rename(test_logs_dir, new_test_logs_dir)
os.remove(test_logs_lnk)
os.symlink(new_test_logs_dir, test_logs_lnk)
print("Renamed {} to {}".format(test_logs_dir, new_test_logs_dir))
except OSError as error:
print(
"Error renaming {} to {}: {}".format(
test_logs_dir, new_test_logs_dir, error))
if args.jenkinslog:
xml_file = os.path.join(new_test_logs_dir, "results.xml")
try:
with open(xml_file) as xml_buffer:
xml_data = xml_buffer.read()
except OSError as error:
print("Error reading {} : {}".format(xml_file, str(error)))
status |= 1024
return status
test_dir = os.path.split(os.path.dirname(test_file))[-1]
org_class = "classname=\""
new_class = "{}FTEST_{}.".format(org_class, test_dir)
xml_data = re.sub(org_class, new_class, xml_data)
try:
with open(xml_file, "w") as xml_buffer:
xml_buffer.write(xml_data)
except OSError as error:
print("Error writing {}: {}".format(xml_file, str(error)))
status |= 1024
return status
def check_big_files(avocado_logs_dir, task, test_name, args):
"""Check the contents of the task object, tag big files, create junit xml.
Args:
avocado_logs_dir (str): path to the avocado log files.
task (Task): a Task object containing the command result
test_name (str): current running testname
args (argparse.Namespace): command line arguments for this program
Returns:
bool: True if no errors occurred checking and creating junit file.
False, otherwise.
"""
status = True
hosts = NodeSet()
cdata = []
for output, nodelist in task.iter_buffers():
node_set = NodeSet.fromlist(nodelist)
hosts.update(node_set)
output_str = "\n".join([line.decode("utf-8") for line in output])
big_files = re.findall(r"Y:\s([0-9]+)", output_str)
if big_files:
cdata.append(
"The following log files on {} exceeded the {} "
"threshold:".format(node_set, args.logs_threshold))
cdata.extend([" {}".format(big_file) for big_file in big_files])
if cdata:
destination = os.path.join(avocado_logs_dir, "latest")
message = "Log size has exceed threshold for this test on: {}".format(
hosts)
status = create_results_xml(
message, test_name, "\n".join(cdata), destination)
else:
print("No log files found exceeding {}".format(args.logs_threshold))
return status
def report_skipped_test(test_file, avocado_logs_dir, reason):
"""Report an error for the skipped test.
Args:
test_file (str): the test python file
avocado_logs_dir (str): avocado job-results directory
reason (str): test skip reason
Returns:
bool: status of writing to junit file
"""
message = "The {} test was skipped due to {}".format(test_file, reason)
print(message)
# Generate a fake avocado results.xml file to report the skipped test.
# This file currently requires being placed in a job-* subdirectory.
test_name = get_test_category(test_file)
time_stamp = datetime.now().strftime("%Y-%m-%dT%H.%M")
destination = os.path.join(
avocado_logs_dir, "job-{}-da03911-{}".format(time_stamp, test_name))
try:
os.makedirs(destination)
except (OSError, FileExistsError) as error:
print(
"Warning: Continuing after failing to create {}: {}".format(
destination, error))
return create_results_xml(
message, test_name, "See launch.py command output for more details",
destination)
def create_results_xml(message, testname, output, destination):
"""Create JUnit xml file.
Args:
message (str): error summary message
testname (str): name of test
output (dict): result of the command.
destination (str): directory where junit xml will be created
Returns:
bool: status of writing to junit file
"""
status = True
# Define the test suite
testsuite_attributes = {
"name": str(testname),
"errors": "1",
"failures": "0",
"skipped": "0",
"test": "1",
"time": "0.0",
}
testsuite = ET.Element("testsuite", testsuite_attributes)
# Define the test case error
testcase_attributes = {"classname": testname, "name": "framework_results",
"time": "0.0"}
testcase = ET.SubElement(testsuite, "testcase", testcase_attributes)
ET.SubElement(testcase, "error", {"message": message})
system_out = ET.SubElement(testcase, "system-out")
system_out.text = output
# Get xml as string and write it to a file
rough_xml = ET.tostring(testsuite, "utf-8")
junit_xml = minidom.parseString(rough_xml)
results_xml = os.path.join(destination, "framework_results.xml")
print("Generating junit xml file {} ...".format(results_xml))
try:
with open(results_xml, "w") as xml_buffer:
xml_buffer.write(junit_xml.toprettyxml())
except IOError as error:
print("Failed to create xml file: {}".format(error))
status = False
return status
USE_DEBUGINFO_INSTALL = True
def resolve_debuginfo(pkg):
"""Return the debuginfo package for a given package name.
Args:
pkg (str): a package name
Returns:
dict: dictionary of debug package information
"""
# pylint: disable=import-error,import-outside-toplevel,unused-import
try:
import dnf
return resolve_debuginfo_dnf(pkg)
except ImportError:
try:
import yum
return resolve_debuginfo_yum(pkg)
except ImportError:
return resolve_debuginfo_rpm(pkg)
def resolve_debuginfo_rpm(pkg):
"""Return the debuginfo package for a given package name.
Args:
pkg (str): a package name
Returns:
dict: dictionary of debug package information
"""
package_info = None
rpm_query = get_output(["rpm", "-qa"])
regex = r"({})-([0-9a-z~\.]+)-([0-9a-z~\.]+)\.x".format(pkg)
matches = re.findall(regex, rpm_query)
if matches:
debuginfo_map = {"glibc": "glibc-debuginfo-common"}
try:
debug_pkg = debuginfo_map[matches[0][0]]
except KeyError:
debug_pkg = matches[0][0] + "-debuginfo"
package_info = {
"name": debug_pkg,
"version": matches[0][1],
"release": matches[0][2],
}
else:
print("Package {} not installed, skipping debuginfo".format(pkg))
return package_info
def resolve_debuginfo_yum(pkg):
"""Return the debuginfo package for a given package name.
Args:
pkg (str): a package name
Returns:
dict: dictionary of debug package information
"""
import yum # pylint: disable=import-error,import-outside-toplevel
yum_base = yum.YumBase()
yum_base.conf.assumeyes = True
yum_base.setCacheDir(force=True, reuse=True)
yum_base.repos.enableRepo('*debug*')
debuginfo_map = {'glibc': 'glibc-debuginfo-common'}
try:
debug_pkg = debuginfo_map[pkg]
except KeyError:
debug_pkg = pkg + "-debuginfo"
try:
pkg_data = yum_base.rpmdb.returnNewestByName(name=pkg)[0]
except yum.Errors.PackageSackError as expn:
if expn.__str__().rstrip() == "No Package Matching " + pkg:
print("Package {} not installed, "
"skipping debuginfo".format(pkg))
return None
raise
return {'name': debug_pkg,
'version': pkg_data['version'],
'release': pkg_data['release'],
'epoch': pkg_data['epoch']}
def resolve_debuginfo_dnf(pkg):
"""Return the debuginfo package for a given package name.
Args:
pkg (str): a package name
Returns:
dict: dictionary of debug package information
"""
import dnf # pylint: disable=import-error,import-outside-toplevel
dnf_base = dnf.Base()
dnf_base.conf.assumeyes = True
dnf_base.read_all_repos()
try:
dnf_base.fill_sack()
except OSError as error:
print("Got an OSError trying to fill_sack(): ", error)
raise RuntimeError("resolve_debuginfo_dnf() "
"failed: ", error)
query = dnf_base.sack.query()
latest = query.latest()
latest_info = latest.filter(name=pkg)
debuginfo = None
try:
package = list(latest_info)[0]
except IndexError as error:
raise RuntimeError("Could not find package info for "
"{}".format(pkg))
if package:
debuginfo_map = {"glibc": "glibc-debuginfo-common"}
try:
debug_pkg = debuginfo_map[pkg]
except KeyError:
debug_pkg = "{}-debuginfo".format(package.name)
debuginfo = {
"name": debug_pkg,
"version": package.version,
"release": package.release,
"epoch": package.epoch
}
else:
print("Package {} not installed, skipping debuginfo".format(pkg))
return debuginfo
def install_debuginfos():
"""Install debuginfo packages."""
distro_info = detect()
if "centos" in distro_info.name.lower():
install_pkgs = [{'name': 'gdb'}, {'name': 'python3-debuginfo'}]
else:
install_pkgs = []
cmds = []
# -debuginfo packages that don't get installed with debuginfo-install
for pkg in ['systemd', 'ndctl', 'mercury', 'hdf5']:
try:
debug_pkg = resolve_debuginfo(pkg)
except RuntimeError as error:
print("Failed trying to install_debuginfos(): ", error)
raise
if debug_pkg and debug_pkg not in install_pkgs:
install_pkgs.append(debug_pkg)
# remove any "source tree" test hackery that might interfere with RPM
# installation
path = os.path.sep + os.path.join('usr', 'share', 'spdk', 'include')
if os.path.islink(path):
cmds.append(["sudo", "rm", "-f", path])
if USE_DEBUGINFO_INSTALL:
dnf_args = ["--exclude", "ompi-debuginfo"]
if os.getenv("TEST_RPMS", 'false') == 'true':
if "suse" in distro_info.name.lower():
dnf_args.extend(["libpmemobj1", "python3", "openmpi3"])
elif "centos" in distro_info.name.lower() and \
distro_info.version == "7":
dnf_args.extend(["--enablerepo=*-debuginfo", "--exclude",
"nvml-debuginfo", "libpmemobj",
"python36", "openmpi3", "gcc"])
elif "centos" in distro_info.name.lower() and \
distro_info.version == "8":
dnf_args.extend(["--enablerepo=*-debuginfo", "libpmemobj",
"python3", "openmpi", "gcc"])
else:
raise RuntimeError(
"install_debuginfos(): Unsupported distro: {}".format(
distro_info))
cmds.append(["sudo", "dnf", "-y", "install"] + dnf_args)
cmds.append(
["sudo", "dnf", "debuginfo-install",
"-y"] + dnf_args + ["daos-server"])
else:
# We're not using the yum API to install packages
# See the comments below.
# kwarg = {'name': 'gdb'}
# yum_base.install(**kwarg)
for debug_pkg in install_pkgs:
# This is how you actually use the API to add a package
# But since we need sudo to do it, we need to call out to yum
# kwarg = debug_pkg
# yum_base.install(**kwarg)
install_pkgs.append(debug_pkg)
# This is how you normally finish up a yum transaction, but
# again, we need to employ sudo
# yum_base.resolveDeps()
# yum_base.buildTransaction()
# yum_base.processTransaction(rpmDisplay=yum.rpmtrans.NoOutputCallBack())
# Now install a few pkgs that debuginfo-install wouldn't
cmd = ["sudo", "dnf", "-y"]
if "centos" in distro_info.name.lower():
cmd.append("--enablerepo=*debug*")
cmd.append("install")
for pkg in install_pkgs:
try:
cmd.append(
"{}-{}-{}".format(pkg['name'], pkg['version'], pkg['release']))
except KeyError:
cmd.append(pkg['name'])
cmds.append(cmd)
retry = False
for cmd in cmds:
try:
print(run_command(cmd))
except RuntimeError as error:
# got an error, so abort this list of commands and re-run
# it with a dnf clean, makecache first
print(error)
retry = True
break
if retry:
print("Going to refresh caches and try again")
cmd_prefix = ["sudo", "dnf"]
if "centos" in distro_info.name.lower():
cmd_prefix.append("--enablerepo=*debug*")
cmds.insert(0, cmd_prefix + ["clean", "all"])
cmds.insert(1, cmd_prefix + ["makecache"])
for cmd in cmds:
print(run_command(cmd))
def process_the_cores(avocado_logs_dir, test_yaml, args):
"""Copy all of the host test log files to the avocado results directory.
Args:
avocado_logs_dir (str): location of the avocado job logs
test_yaml (str): yaml file containing host names
args (argparse.Namespace): command line arguments for this program
Returns:
bool: True if everything was done as expected, False if there were
any issues processing core files
"""
import fnmatch # pylint: disable=import-outside-toplevel
return_status = True
this_host = socket.gethostname().split(".")[0]
host_list = get_hosts_from_yaml(test_yaml, args)
daos_cores_dir = os.path.join(avocado_logs_dir, "latest", "stacktraces")
# Create a subdirectory in the avocado logs directory for this test
print("-" * 80)
print("Processing cores from {} in {}".format(host_list, daos_cores_dir))
get_output(["mkdir", daos_cores_dir])
# Copy any core files that exist on the test hosts and remove them from the
# test host if the copy is successful. Attempt all of the commands and
# report status at the end of the loop. Include a listing of the file
# related to any failed command.
commands = [
"set -eu",
"rc=0",
"copied=()",
"for file in /var/tmp/core.*",
"do if [ -e $file ]",
"then if [ ! -s $file ]",
"then ((rc++))",
"ls -al $file",
"else if sudo chmod 644 $file && "
"scp $file {}:{}/${{file##*/}}-$(hostname -s)".format(
this_host, daos_cores_dir),
"then copied+=($file)",
"if ! sudo rm -fr $file",
"then ((rc++))",
"ls -al $file",
"fi",
"else ((rc++))",
"ls -al $file",
"fi",
"fi",
"fi",
"done",
"echo Copied ${copied[@]:-no files}",
"exit $rc",
]
if not spawn_commands(host_list, "; ".join(commands), timeout=1800):
# we might have still gotten some core files, so don't return here
# but save a False return status for later
return_status = False
cores = os.listdir(daos_cores_dir)
if not cores:
return True
try:
install_debuginfos()
except RuntimeError as error:
print(error)
print("Removing core files to avoid archiving them")
for corefile in cores:
os.remove(os.path.join(daos_cores_dir, corefile))
return False
for corefile in cores:
if not fnmatch.fnmatch(corefile, 'core.*[0-9]'):
continue
corefile_fqpn = os.path.join(daos_cores_dir, corefile)
# can't use the file python magic binding here due to:
# https://bugs.astron.com/view.php?id=225, fixed in:
# https://github.com/file/file/commit/6faf2eba2b8c65fbac7acd36602500d757614d2f
# but not available to us until that is in a released version
# revert the commit this comment is in to see use python magic instead
try:
gdb_output = run_command(["gdb", "-c", corefile_fqpn, "-ex",
"info proc exe", "-ex",
"quit"])
last_line = gdb_output.splitlines()[-1]
cmd = last_line[7:-1]
# assume there are no arguments on cmd
find_char = "'"
if cmd.find(" ") > -1:
# there are arguments on cmd
find_char = " "
exe_name = cmd[0:cmd.find(find_char)]
except RuntimeError:
exe_name = None
if exe_name:
cmd = [
"gdb", "-cd={}".format(daos_cores_dir),
"-ex", "set pagination off",
"-ex", "thread apply all bt full",
"-ex", "detach",
"-ex", "quit",
exe_name, corefile
]
stack_trace_file = os.path.join(
daos_cores_dir, "{}.stacktrace".format(corefile))
try:
with open(stack_trace_file, "w") as stack_trace:
stack_trace.writelines(get_output(cmd))
except IOError as error:
print("Error writing {}: {}".format(stack_trace_file, error))
return_status = False
else:
print(
"Unable to determine executable name from gdb output: '{}'\n"
"Not creating stacktrace".format(gdb_output))
return_status = False
print("Removing {}".format(corefile_fqpn))
os.unlink(corefile_fqpn)
return return_status
def get_test_category(test_file):
"""Get a category for the specified test using its path and name.
Args:
test_file (str): the test python file
Returns:
str: concatenation of the test path and base filename joined by dashes
"""
file_parts = os.path.split(test_file)
return "-".join(
[os.path.splitext(os.path.basename(part))[0] for part in file_parts])
def stop_daos_agent_services(test_file, args):
"""Stop any daos_agent.service running on the hosts running servers.
Args:
test_file (str): the test python file
args (argparse.Namespace): command line arguments for this program
Returns:
int: status code: 0 = success, 512 = failure
"""
service = "daos_agent.service"
print("-" * 80)
print("Verifying {} after running '{}'".format(service, test_file))
if args.test_clients:
hosts = list(args.test_clients)
else:
hosts = list(args.test_servers)
local_host = socket.gethostname().split(".")[0]
if local_host not in hosts:
hosts.append(local_host)
return stop_service(hosts, service)
def stop_daos_server_service(test_file, args):
"""Stop any daos_server.service running on the hosts running servers.
Args:
test_file (str): the test python file
args (argparse.Namespace): command line arguments for this program
Returns:
int: status code: 0 = success, 512 = failure
"""
service = "daos_server.service"
print("-" * 80)
print("Verifying {} after running '{}'".format(service, test_file))
return stop_service(list(args.test_servers), service)
def stop_service(hosts, service):
"""Stop any daos_server.service running on the hosts running servers.
Args:
hosts (list): list of hosts on which to stop the service.
service (str): name of the service
Returns:
int: status code: 0 = success, 512 = failure
"""
result = {"status": 0}
status_keys = ["reset-failed", "stop", "disable"]
mapping = {"stop": "active", "disable": "enabled", "reset-failed": "failed"}
check_hosts = NodeSet.fromlist(hosts)
loop = 1
# Reduce 'max_loops' to 2 once https://jira.hpdd.intel.com/browse/DAOS-7809
# has been resolved
max_loops = 3
while check_hosts:
# Check the status of the service on each host
result = get_service_status(check_hosts, service)
check_hosts = NodeSet()
for key in status_keys:
if result[key]:
if loop == max_loops:
# Exit the while loop if the service is still running
print(
" - Error {} still {} on {}".format(
service, mapping[key], result[key]))
result["status"] = 512
else:
# Issue the appropriate systemctl command to remedy the
# detected state, e.g. 'stop' for 'active'.
command = "sudo systemctl {} {}".format(key, service)
get_remote_output(str(result[key]), command)
# Run the status check again on this group of hosts
check_hosts.add(result[key])
loop += 1
return result["status"]
def get_service_status(host_list, service):
"""Get the status of the daos_server.service.
Args:
host_list (list): list of hosts on which to get the service state
service (str): name of the service
Returns:
dict: a dictionary with the following keys:
- "status": status code: 0 = success, 512 = failure
- "stop": NodeSet where to stop the daos_server.service
- "disable": NodeSet where to disable the daos_server.service
- "reset-failed": NodeSet where to reset the daos_server.service
"""
status = {
"status": 0,
"stop": NodeSet(),
"disable": NodeSet(),
"reset-failed": NodeSet()}
status_states = {
"stop": ["active", "activating", "deactivating"],
"disable": ["active", "activating", "deactivating"],
"reset-failed": ["failed"]}
command = "systemctl is-active {}".format(service)
task = get_remote_output(host_list, command)
for output, nodelist in task.iter_buffers():
output_lines = [line.decode("utf-8") for line in output]
nodeset = NodeSet.fromlist(nodelist)
print(" {}: {}".format(nodeset, "\n".join(output_lines)))
for key in status_states:
for line in output_lines:
if line in status_states[key]:
status[key].add(nodeset)
break
if task.num_timeout() > 0:
nodeset = NodeSet.fromlist(task.iter_keys_timeout())
status["status"] = 512
status["stop"].add(nodeset)
status["disable"].add(nodeset)
status["reset-failed"].add(nodeset)
print(" {}: TIMEOUT".format(nodeset))
return status
def indent_text(indent, text):
"""Prepend the specified number of spaces to the specified text.
Args:
indent (int): the number of spaces to use as an indentation
text (object): text to indent. lists will be converted into a
newline-separated str with spaces prepended to each line
Returns:
str: indented text
"""
if isinstance(text, (list, tuple)):
return "\n".join(["{}{}".format(" " * indent, line) for line in text])
return " " * indent + str(text)
def main():
"""Launch DAOS functional tests."""
# Parse the command line arguments
description = [
"DAOS functional test launcher",
"",
"Launches tests by specifying a test tag. For example:",
"\tbadconnect --run pool connect tests that pass NULL ptrs, etc.",
"\tbadevict --run pool client evict tests that pass NULL ptrs, "
"etc.",
"\tbadexclude --run pool target exclude tests that pass NULL ptrs, "
"etc.",
"\tbadparam --run tests that pass NULL ptrs, etc.",
"\tbadquery --run pool query tests that pass NULL ptrs, etc.",
"\tmulticreate --run tests that create multiple pools at once",
"\tmultitarget --run tests that create pools over multiple servers",
"\tpool --run all pool related tests",
"\tpoolconnect --run all pool connection related tests",
"\tpooldestroy --run all pool destroy related tests",
"\tpoolevict --run all client pool eviction related tests",
"\tpoolinfo --run all pool info retrieval related tests",
"\tquick --run tests that complete quickly, with minimal "
"resources",
"",
"Multiple tags can be specified:",
"\ttag_a,tag_b -- run all tests with both tag_a and tag_b",
"\ttag_a tag_b -- run all tests with either tag_a or tag_b",
"",
"Specifying no tags will run all of the available tests.",
"",
"Tests can also be launched by specifying a path to the python script "
"instead of its tag.",
"",
"The placeholder server and client names in the yaml file can also be "
"replaced with the following options:",
"\tlaunch.py -ts node1,node2 -tc node3 <tag>",
"\t - Use node[1-2] to run the daos server in each test",
"\t - Use node3 to run the daos client in each test",
"\tlaunch.py -ts node1,node2 <tag>",
"\t - Use node[1-2] to run the daos server or client in each test",
"\tlaunch.py -ts node1,node2 -d <tag>",
"\t - Use node[1-2] to run the daos server or client in each test",
"\t - Discard of any additional server or client placeholders for "
"each test",
"",
"You can also specify the sparse flag -s to limit output to "
"pass/fail.",
"\tExample command: launch.py -s pool"
]
parser = ArgumentParser(
prog="launcher.py",
formatter_class=RawDescriptionHelpFormatter,
description="\n".join(description))
parser.add_argument(
"-a", "--archive",
action="store_true",
help="archive host log files in the avocado job-results directory")
parser.add_argument(
"-c", "--clean",
action="store_true",
help="remove daos log files from the test hosts prior to the test")
parser.add_argument(
"-d", "--discard",
action="store_true",
help="when replacing server/client yaml file placeholders, discard "
"any placeholders that do not end up with a replacement value")
parser.add_argument(
"-i", "--include_localhost",
action="store_true",
help="include the local host when cleaning and archiving")
parser.add_argument(
"-l", "--list",
action="store_true",
help="list the python scripts that match the specified tags")
parser.add_argument(
"-m", "--modify",
action="store_true",
help="modify the test yaml files but do not run the tests")
parser.add_argument(
"-n", "--nvme",
action="store",
help="comma-separated list of NVMe device PCI addresses to use as "
"replacement values for the bdev_list in each test's yaml file. "
"Using the 'auto[:<filter>]' keyword will auto-detect the NVMe "
"PCI address list on each of the '--test_servers' hosts - the "
"optional '<filter>' can be used to limit auto-detected "
"addresses, e.g. 'auto:Optane' for Intel Optane NVMe devices.")
parser.add_argument(
"-r", "--rename",
action="store_true",
help="rename the avocado test logs directory to include the test name")
parser.add_argument(
"-re", "--repeat",
action="store",
default=1,
type=int,
help="number of times to repeat test execution")
parser.add_argument(
"-p", "--process_cores",
action="store_true",
help="process core files from tests")
parser.add_argument(
"-th", "--logs_threshold",
action="store",
help="collect log sizes and report log sizes that go past provided"
"threshold. e.g. '-th 5M'"
"Valid threshold units are: B, K, M, G, T")
parser.add_argument(
"-s", "--sparse",
action="store_true",
help="limit output to pass/fail")
parser.add_argument(
"-ins", "--insecure_mode",
action="store_true",
help="Launch test with insecure-mode")
parser.add_argument(
"tags",
nargs="*",
type=str,
help="test category or file to run")
parser.add_argument(
"-tc", "--test_clients",
action="store",
help="comma-separated list of hosts to use as replacement values for "
"client placeholders in each test's yaml file")
parser.add_argument(
"-ts", "--test_servers",
action="store",
help="comma-separated list of hosts to use as replacement values for "
"server placeholders in each test's yaml file. If the "
"'--test_clients' argument is not specified, this list of hosts "
"will also be used to replace client placeholders.")
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
help="verbosity output level. Specify multiple times (e.g. -vv) for "
"additional output")
parser.add_argument(
"-j", "--jenkinslog",
action="store_true",
help="rename the avocado test logs directory for publishing in Jenkins")
parser.add_argument(
"-y", "--yaml_directory",
action="store",
default=None,
help="directory in which to write the modified yaml files. A temporary "
"directory - which only exists for the duration of the launch.py "
"command - is used by default.")
args = parser.parse_args()
print("Arguments: {}".format(args))
# Convert host specifications into NodeSets
args.test_servers = NodeSet(args.test_servers)
args.test_clients = NodeSet(args.test_clients)
# Setup the user environment
set_test_environment(args)
# Auto-detect nvme test yaml replacement values if requested
if args.nvme and args.nvme.startswith("auto"):
args.nvme = get_nvme_replacement(args)
# Process the tags argument to determine which tests to run
tag_filter, test_list = get_test_list(args.tags)
# Verify at least one test was requested
if not test_list:
print("ERROR: No tests or tags found via {}".format(args.tags))
sys.exit(1)
# Display a list of the tests matching the tags
print("Detected tests: \n{}".format(" \n".join(test_list)))
if args.list:
sys.exit(0)
# Create a temporary directory
if args.yaml_directory is None:
temp_dir = TemporaryDirectory()
yaml_dir = temp_dir.name
else:
yaml_dir = args.yaml_directory
if not os.path.exists(yaml_dir):
os.mkdir(yaml_dir)
# Create a dictionary of test and their yaml files
test_files = get_test_files(test_list, args, yaml_dir)
if args.modify:
sys.exit(0)
# Setup (clean/create/list) the common test directory
setup_test_directory(args)
# Generate certificate files
generate_certs()
# Run all the tests
status = run_tests(test_files, tag_filter, args)
# Process the avocado run return codes and only treat job and command
# failures as errors.
ret_code = 0
if status == 0:
print("All avocado tests passed!")
else:
if status & 1 == 1:
print("Detected one or more avocado test failures!")
if status & 8 == 8:
print("Detected one or more interrupted avocado jobs!")
if status & 2 == 2:
print("ERROR: Detected one or more avocado job failures!")
ret_code = 1
if status & 4 == 4:
print("ERROR: Detected one or more failed avocado commands!")
ret_code = 1
if status & 16 == 16:
print("ERROR: Detected one or more tests that failed archiving!")
ret_code = 1
if status & 32 == 32:
print("ERROR: Detected one or more tests with unreported big logs!")
ret_code = 1
if status & 64 == 64:
print("ERROR: Failed to create a junit xml test error file!")
if status & 128 == 128:
print("ERROR: Failed to clean logs in preparation for test run!")
ret_code = 1
if status & 256 == 256:
print("ERROR: Detected one or more tests with failure to create "
"stack traces from core files!")
ret_code = 1
if status & 512 == 512:
print("ERROR: Detected stopping daos_server.service after one or "
"more tests!")
ret_code = 1
if status & 1024 == 1024:
print("ERROR: Detected one or more failures in renaming logs and "
"results for Jenkins!")
ret_code = 1
sys.exit(ret_code)
if __name__ == "__main__":
main()
| 37.119463
| 86
| 0.593549
|
2b2ce80c5c625ac0377646825840104607b4abc0
| 313,494
|
py
|
Python
|
tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
|
SinaChavoshi/python-aiplatform
|
384785bd06b39784e02a0afffa92612a70628946
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
|
SinaChavoshi/python-aiplatform
|
384785bd06b39784e02a0afffa92612a70628946
|
[
"Apache-2.0"
] | 1
|
2020-10-15T18:51:09.000Z
|
2020-10-16T22:02:32.000Z
|
tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
|
SinaChavoshi/python-aiplatform
|
384785bd06b39784e02a0afffa92612a70628946
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.metadata_service import (
MetadataServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.metadata_service import (
MetadataServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers
from google.cloud.aiplatform_v1beta1.services.metadata_service import transports
from google.cloud.aiplatform_v1beta1.services.metadata_service.transports.base import (
_API_CORE_VERSION,
)
from google.cloud.aiplatform_v1beta1.services.metadata_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1beta1.types import artifact
from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact
from google.cloud.aiplatform_v1beta1.types import context
from google.cloud.aiplatform_v1beta1.types import context as gca_context
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import event
from google.cloud.aiplatform_v1beta1.types import execution
from google.cloud.aiplatform_v1beta1.types import execution as gca_execution
from google.cloud.aiplatform_v1beta1.types import lineage_subgraph
from google.cloud.aiplatform_v1beta1.types import metadata_schema
from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema
from google.cloud.aiplatform_v1beta1.types import metadata_service
from google.cloud.aiplatform_v1beta1.types import metadata_store
from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MetadataServiceClient._get_default_mtls_endpoint(None) is None
assert (
MetadataServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [MetadataServiceClient, MetadataServiceAsyncClient,]
)
def test_metadata_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [MetadataServiceClient, MetadataServiceAsyncClient,]
)
def test_metadata_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_metadata_service_client_get_transport_class():
transport = MetadataServiceClient.get_transport_class()
available_transports = [
transports.MetadataServiceGrpcTransport,
]
assert transport in available_transports
transport = MetadataServiceClient.get_transport_class("grpc")
assert transport == transports.MetadataServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
MetadataServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceClient),
)
@mock.patch.object(
MetadataServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceAsyncClient),
)
def test_metadata_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
"true",
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
"false",
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
MetadataServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceClient),
)
@mock.patch.object(
MetadataServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_metadata_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_metadata_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_metadata_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_metadata_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = MetadataServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_metadata_store(
transport: str = "grpc", request_type=metadata_service.CreateMetadataStoreRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_metadata_store_from_dict():
test_create_metadata_store(request_type=dict)
def test_create_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
client.create_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
@pytest.mark.asyncio
async def test_create_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_metadata_store_async_from_dict():
await test_create_metadata_store_async(request_type=dict)
def test_create_metadata_store_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataStoreRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataStoreRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_metadata_store_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_metadata_store(
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].metadata_store == gca_metadata_store.MetadataStore(
name="name_value"
)
assert args[0].metadata_store_id == "metadata_store_id_value"
def test_create_metadata_store_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_metadata_store(
metadata_service.CreateMetadataStoreRequest(),
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
@pytest.mark.asyncio
async def test_create_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_metadata_store(
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].metadata_store == gca_metadata_store.MetadataStore(
name="name_value"
)
assert args[0].metadata_store_id == "metadata_store_id_value"
@pytest.mark.asyncio
async def test_create_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_metadata_store(
metadata_service.CreateMetadataStoreRequest(),
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
def test_get_metadata_store(
transport: str = "grpc", request_type=metadata_service.GetMetadataStoreRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore(
name="name_value", description="description_value",
)
response = client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_store.MetadataStore)
assert response.name == "name_value"
assert response.description == "description_value"
def test_get_metadata_store_from_dict():
test_get_metadata_store(request_type=dict)
def test_get_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
client.get_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
@pytest.mark.asyncio
async def test_get_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.GetMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore(
name="name_value", description="description_value",
)
)
response = await client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_store.MetadataStore)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_metadata_store_async_from_dict():
await test_get_metadata_store_async(request_type=dict)
def test_get_metadata_store_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataStoreRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
call.return_value = metadata_store.MetadataStore()
client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataStoreRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore()
)
await client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_metadata_store_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_metadata_store(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_metadata_store_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_metadata_store(
metadata_service.GetMetadataStoreRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_metadata_store(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_metadata_store(
metadata_service.GetMetadataStoreRequest(), name="name_value",
)
def test_list_metadata_stores(
transport: str = "grpc", request_type=metadata_service.ListMetadataStoresRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse(
next_page_token="next_page_token_value",
)
response = client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataStoresPager)
assert response.next_page_token == "next_page_token_value"
def test_list_metadata_stores_from_dict():
test_list_metadata_stores(request_type=dict)
def test_list_metadata_stores_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
client.list_metadata_stores()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
@pytest.mark.asyncio
async def test_list_metadata_stores_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.ListMetadataStoresRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataStoresAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_metadata_stores_async_from_dict():
await test_list_metadata_stores_async(request_type=dict)
def test_list_metadata_stores_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataStoresRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
call.return_value = metadata_service.ListMetadataStoresResponse()
client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_metadata_stores_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataStoresRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse()
)
await client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_metadata_stores_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_metadata_stores(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_metadata_stores_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_metadata_stores(
metadata_service.ListMetadataStoresRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_metadata_stores_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_metadata_stores(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_metadata_stores_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_metadata_stores(
metadata_service.ListMetadataStoresRequest(), parent="parent_value",
)
def test_list_metadata_stores_pager():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[], next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[metadata_store.MetadataStore(),],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_metadata_stores(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, metadata_store.MetadataStore) for i in results)
def test_list_metadata_stores_pages():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[], next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[metadata_store.MetadataStore(),],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
pages = list(client.list_metadata_stores(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_metadata_stores_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[], next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[metadata_store.MetadataStore(),],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
async_pager = await client.list_metadata_stores(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, metadata_store.MetadataStore) for i in responses)
@pytest.mark.asyncio
async def test_list_metadata_stores_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[], next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[metadata_store.MetadataStore(),],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_metadata_stores(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_metadata_store(
transport: str = "grpc", request_type=metadata_service.DeleteMetadataStoreRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_metadata_store_from_dict():
test_delete_metadata_store(request_type=dict)
def test_delete_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
client.delete_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
@pytest.mark.asyncio
async def test_delete_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.DeleteMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_metadata_store_async_from_dict():
await test_delete_metadata_store_async(request_type=dict)
def test_delete_metadata_store_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteMetadataStoreRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteMetadataStoreRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_metadata_store_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_metadata_store(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_metadata_store_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_metadata_store(
metadata_service.DeleteMetadataStoreRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_metadata_store(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_metadata_store(
metadata_service.DeleteMetadataStoreRequest(), name="name_value",
)
def test_create_artifact(
transport: str = "grpc", request_type=metadata_service.CreateArtifactRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_artifact_from_dict():
test_create_artifact(request_type=dict)
def test_create_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
client.create_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
@pytest.mark.asyncio
async def test_create_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.CreateArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_artifact_async_from_dict():
await test_create_artifact_async(request_type=dict)
def test_create_artifact_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateArtifactRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
call.return_value = gca_artifact.Artifact()
client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateArtifactRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
await client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_artifact_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_artifact(
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].artifact == gca_artifact.Artifact(name="name_value")
assert args[0].artifact_id == "artifact_id_value"
def test_create_artifact_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_artifact(
metadata_service.CreateArtifactRequest(),
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
@pytest.mark.asyncio
async def test_create_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_artifact(
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].artifact == gca_artifact.Artifact(name="name_value")
assert args[0].artifact_id == "artifact_id_value"
@pytest.mark.asyncio
async def test_create_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_artifact(
metadata_service.CreateArtifactRequest(),
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
def test_get_artifact(
transport: str = "grpc", request_type=metadata_service.GetArtifactRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_artifact_from_dict():
test_get_artifact(request_type=dict)
def test_get_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
client.get_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
@pytest.mark.asyncio
async def test_get_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_artifact_async_from_dict():
await test_get_artifact_async(request_type=dict)
def test_get_artifact_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetArtifactRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
call.return_value = artifact.Artifact()
client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetArtifactRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact())
await client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_artifact_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_artifact(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_artifact_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_artifact(
metadata_service.GetArtifactRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_artifact(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_artifact(
metadata_service.GetArtifactRequest(), name="name_value",
)
def test_list_artifacts(
transport: str = "grpc", request_type=metadata_service.ListArtifactsRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse(
next_page_token="next_page_token_value",
)
response = client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListArtifactsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_artifacts_from_dict():
test_list_artifacts(request_type=dict)
def test_list_artifacts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
client.list_artifacts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
@pytest.mark.asyncio
async def test_list_artifacts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListArtifactsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListArtifactsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_artifacts_async_from_dict():
await test_list_artifacts_async(request_type=dict)
def test_list_artifacts_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListArtifactsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
call.return_value = metadata_service.ListArtifactsResponse()
client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_artifacts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListArtifactsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse()
)
await client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_artifacts_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_artifacts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_artifacts_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_artifacts(
metadata_service.ListArtifactsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_artifacts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_artifacts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_artifacts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_artifacts(
metadata_service.ListArtifactsRequest(), parent="parent_value",
)
def test_list_artifacts_pager():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[], next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(),], next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(), artifact.Artifact(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_artifacts(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, artifact.Artifact) for i in results)
def test_list_artifacts_pages():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[], next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(),], next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(), artifact.Artifact(),],
),
RuntimeError,
)
pages = list(client.list_artifacts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_artifacts_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[], next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(),], next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(), artifact.Artifact(),],
),
RuntimeError,
)
async_pager = await client.list_artifacts(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, artifact.Artifact) for i in responses)
@pytest.mark.asyncio
async def test_list_artifacts_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[], next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(),], next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[artifact.Artifact(), artifact.Artifact(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_artifacts(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_artifact(
transport: str = "grpc", request_type=metadata_service.UpdateArtifactRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_artifact_from_dict():
test_update_artifact(request_type=dict)
def test_update_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
client.update_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
@pytest.mark.asyncio
async def test_update_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.UpdateArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_artifact_async_from_dict():
await test_update_artifact_async(request_type=dict)
def test_update_artifact_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateArtifactRequest()
request.artifact.name = "artifact.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
call.return_value = gca_artifact.Artifact()
client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateArtifactRequest()
request.artifact.name = "artifact.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
await client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "artifact.name=artifact.name/value",) in kw[
"metadata"
]
def test_update_artifact_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_artifact(
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].artifact == gca_artifact.Artifact(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_artifact_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_artifact(
metadata_service.UpdateArtifactRequest(),
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_artifact(
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].artifact == gca_artifact.Artifact(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_artifact(
metadata_service.UpdateArtifactRequest(),
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_create_context(
transport: str = "grpc", request_type=metadata_service.CreateContextRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_context_from_dict():
test_create_context(request_type=dict)
def test_create_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
client.create_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
@pytest.mark.asyncio
async def test_create_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.CreateContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_context_async_from_dict():
await test_create_context_async(request_type=dict)
def test_create_context_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateContextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = gca_context.Context()
client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateContextRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_context_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_context(
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].context == gca_context.Context(name="name_value")
assert args[0].context_id == "context_id_value"
def test_create_context_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_context(
metadata_service.CreateContextRequest(),
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
@pytest.mark.asyncio
async def test_create_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_context(
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].context == gca_context.Context(name="name_value")
assert args[0].context_id == "context_id_value"
@pytest.mark.asyncio
async def test_create_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_context(
metadata_service.CreateContextRequest(),
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
def test_get_context(
transport: str = "grpc", request_type=metadata_service.GetContextRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_context_from_dict():
test_get_context(request_type=dict)
def test_get_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
client.get_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
@pytest.mark.asyncio
async def test_get_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_context_async_from_dict():
await test_get_context_async(request_type=dict)
def test_get_context_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = context.Context()
client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_context_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_context_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_context(
metadata_service.GetContextRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_context(
metadata_service.GetContextRequest(), name="name_value",
)
def test_list_contexts(
transport: str = "grpc", request_type=metadata_service.ListContextsRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse(
next_page_token="next_page_token_value",
)
response = client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_contexts_from_dict():
test_list_contexts(request_type=dict)
def test_list_contexts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
client.list_contexts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
@pytest.mark.asyncio
async def test_list_contexts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListContextsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_contexts_async_from_dict():
await test_list_contexts_async(request_type=dict)
def test_list_contexts_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = metadata_service.ListContextsResponse()
client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_contexts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListContextsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse()
)
await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_contexts_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_contexts_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_contexts(
metadata_service.ListContextsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_contexts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_contexts(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_contexts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_contexts(
metadata_service.ListContextsRequest(), parent="parent_value",
)
def test_list_contexts_pager():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
metadata_service.ListContextsResponse(contexts=[], next_page_token="def",),
metadata_service.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_contexts(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, context.Context) for i in results)
def test_list_contexts_pages():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
metadata_service.ListContextsResponse(contexts=[], next_page_token="def",),
metadata_service.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
pages = list(client.list_contexts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_contexts_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
metadata_service.ListContextsResponse(contexts=[], next_page_token="def",),
metadata_service.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
async_pager = await client.list_contexts(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, context.Context) for i in responses)
@pytest.mark.asyncio
async def test_list_contexts_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(), context.Context(),],
next_page_token="abc",
),
metadata_service.ListContextsResponse(contexts=[], next_page_token="def",),
metadata_service.ListContextsResponse(
contexts=[context.Context(),], next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[context.Context(), context.Context(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_contexts(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_context(
transport: str = "grpc", request_type=metadata_service.UpdateContextRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_context_from_dict():
test_update_context(request_type=dict)
def test_update_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
client.update_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
@pytest.mark.asyncio
async def test_update_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.UpdateContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_context_async_from_dict():
await test_update_context_async(request_type=dict)
def test_update_context_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateContextRequest()
request.context.name = "context.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = gca_context.Context()
client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context.name=context.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateContextRequest()
request.context.name = "context.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context.name=context.name/value",) in kw[
"metadata"
]
def test_update_context_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_context(
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].context == gca_context.Context(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_context_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_context(
metadata_service.UpdateContextRequest(),
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_context(
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].context == gca_context.Context(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_context(
metadata_service.UpdateContextRequest(),
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_context(
transport: str = "grpc", request_type=metadata_service.DeleteContextRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_context_from_dict():
test_delete_context(request_type=dict)
def test_delete_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
client.delete_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
@pytest.mark.asyncio
async def test_delete_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.DeleteContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_context_async_from_dict():
await test_delete_context_async(request_type=dict)
def test_delete_context_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteContextRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_context_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_context_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_context(
metadata_service.DeleteContextRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_context(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_context(
metadata_service.DeleteContextRequest(), name="name_value",
)
def test_add_context_artifacts_and_executions(
transport: str = "grpc",
request_type=metadata_service.AddContextArtifactsAndExecutionsRequest,
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
response = client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, metadata_service.AddContextArtifactsAndExecutionsResponse
)
def test_add_context_artifacts_and_executions_from_dict():
test_add_context_artifacts_and_executions(request_type=dict)
def test_add_context_artifacts_and_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
client.add_context_artifacts_and_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddContextArtifactsAndExecutionsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
response = await client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, metadata_service.AddContextArtifactsAndExecutionsResponse
)
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_async_from_dict():
await test_add_context_artifacts_and_executions_async(request_type=dict)
def test_add_context_artifacts_and_executions_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextArtifactsAndExecutionsRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextArtifactsAndExecutionsRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
await client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
def test_add_context_artifacts_and_executions_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_context_artifacts_and_executions(
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
assert args[0].artifacts == ["artifacts_value"]
assert args[0].executions == ["executions_value"]
def test_add_context_artifacts_and_executions_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_context_artifacts_and_executions(
metadata_service.AddContextArtifactsAndExecutionsRequest(),
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_context_artifacts_and_executions(
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
assert args[0].artifacts == ["artifacts_value"]
assert args[0].executions == ["executions_value"]
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_context_artifacts_and_executions(
metadata_service.AddContextArtifactsAndExecutionsRequest(),
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
def test_add_context_children(
transport: str = "grpc", request_type=metadata_service.AddContextChildrenRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
response = client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddContextChildrenResponse)
def test_add_context_children_from_dict():
test_add_context_children(request_type=dict)
def test_add_context_children_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
client.add_context_children()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
@pytest.mark.asyncio
async def test_add_context_children_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddContextChildrenRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
response = await client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddContextChildrenResponse)
@pytest.mark.asyncio
async def test_add_context_children_async_from_dict():
await test_add_context_children_async(request_type=dict)
def test_add_context_children_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextChildrenRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
call.return_value = metadata_service.AddContextChildrenResponse()
client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_context_children_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextChildrenRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
await client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
def test_add_context_children_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_context_children(
context="context_value", child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
assert args[0].child_contexts == ["child_contexts_value"]
def test_add_context_children_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_context_children(
metadata_service.AddContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
@pytest.mark.asyncio
async def test_add_context_children_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_context_children(
context="context_value", child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
assert args[0].child_contexts == ["child_contexts_value"]
@pytest.mark.asyncio
async def test_add_context_children_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_context_children(
metadata_service.AddContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
def test_query_context_lineage_subgraph(
transport: str = "grpc",
request_type=metadata_service.QueryContextLineageSubgraphRequest,
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_context_lineage_subgraph_from_dict():
test_query_context_lineage_subgraph(request_type=dict)
def test_query_context_lineage_subgraph_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
client.query_context_lineage_subgraph()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryContextLineageSubgraphRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_async_from_dict():
await test_query_context_lineage_subgraph_async(request_type=dict)
def test_query_context_lineage_subgraph_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryContextLineageSubgraphRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryContextLineageSubgraphRequest()
request.context = "context/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "context=context/value",) in kw["metadata"]
def test_query_context_lineage_subgraph_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_context_lineage_subgraph(context="context_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
def test_query_context_lineage_subgraph_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_context_lineage_subgraph(
metadata_service.QueryContextLineageSubgraphRequest(),
context="context_value",
)
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_context_lineage_subgraph(context="context_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].context == "context_value"
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_context_lineage_subgraph(
metadata_service.QueryContextLineageSubgraphRequest(),
context="context_value",
)
def test_create_execution(
transport: str = "grpc", request_type=metadata_service.CreateExecutionRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_execution_from_dict():
test_create_execution(request_type=dict)
def test_create_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
client.create_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
@pytest.mark.asyncio
async def test_create_execution_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateExecutionRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_execution_async_from_dict():
await test_create_execution_async(request_type=dict)
def test_create_execution_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateExecutionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = gca_execution.Execution()
client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateExecutionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_execution_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_execution(
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].execution == gca_execution.Execution(name="name_value")
assert args[0].execution_id == "execution_id_value"
def test_create_execution_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_execution(
metadata_service.CreateExecutionRequest(),
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
@pytest.mark.asyncio
async def test_create_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_execution(
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].execution == gca_execution.Execution(name="name_value")
assert args[0].execution_id == "execution_id_value"
@pytest.mark.asyncio
async def test_create_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_execution(
metadata_service.CreateExecutionRequest(),
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
def test_get_execution(
transport: str = "grpc", request_type=metadata_service.GetExecutionRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution(
name="name_value",
display_name="display_name_value",
state=execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_execution_from_dict():
test_get_execution(request_type=dict)
def test_get_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
client.get_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
@pytest.mark.asyncio
async def test_get_execution_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetExecutionRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
execution.Execution(
name="name_value",
display_name="display_name_value",
state=execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_execution_async_from_dict():
await test_get_execution_async(request_type=dict)
def test_get_execution_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = execution.Execution()
client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution())
await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_execution_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_execution_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_execution(
metadata_service.GetExecutionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_execution(
metadata_service.GetExecutionRequest(), name="name_value",
)
def test_list_executions(
transport: str = "grpc", request_type=metadata_service.ListExecutionsRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_executions_from_dict():
test_list_executions(request_type=dict)
def test_list_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
client.list_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
@pytest.mark.asyncio
async def test_list_executions_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListExecutionsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_executions_async_from_dict():
await test_list_executions_async(request_type=dict)
def test_list_executions_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListExecutionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = metadata_service.ListExecutionsResponse()
client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_executions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListExecutionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse()
)
await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_executions_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_executions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_executions_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_executions(
metadata_service.ListExecutionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_executions_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_executions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_executions_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_executions(
metadata_service.ListExecutionsRequest(), parent="parent_value",
)
def test_list_executions_pager():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[], next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(),], next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(), execution.Execution(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_executions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, execution.Execution) for i in results)
def test_list_executions_pages():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[], next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(),], next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(), execution.Execution(),],
),
RuntimeError,
)
pages = list(client.list_executions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_executions_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[], next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(),], next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(), execution.Execution(),],
),
RuntimeError,
)
async_pager = await client.list_executions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, execution.Execution) for i in responses)
@pytest.mark.asyncio
async def test_list_executions_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[], next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(),], next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[execution.Execution(), execution.Execution(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_executions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_execution(
transport: str = "grpc", request_type=metadata_service.UpdateExecutionRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_execution_from_dict():
test_update_execution(request_type=dict)
def test_update_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
client.update_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
@pytest.mark.asyncio
async def test_update_execution_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.UpdateExecutionRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_execution_async_from_dict():
await test_update_execution_async(request_type=dict)
def test_update_execution_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateExecutionRequest()
request.execution.name = "execution.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
call.return_value = gca_execution.Execution()
client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateExecutionRequest()
request.execution.name = "execution.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
await client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution.name=execution.name/value",) in kw[
"metadata"
]
def test_update_execution_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_execution(
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].execution == gca_execution.Execution(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_execution_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_execution(
metadata_service.UpdateExecutionRequest(),
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_execution(
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].execution == gca_execution.Execution(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_execution(
metadata_service.UpdateExecutionRequest(),
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_add_execution_events(
transport: str = "grpc", request_type=metadata_service.AddExecutionEventsRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
response = client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddExecutionEventsResponse)
def test_add_execution_events_from_dict():
test_add_execution_events(request_type=dict)
def test_add_execution_events_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
client.add_execution_events()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
@pytest.mark.asyncio
async def test_add_execution_events_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddExecutionEventsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
response = await client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddExecutionEventsResponse)
@pytest.mark.asyncio
async def test_add_execution_events_async_from_dict():
await test_add_execution_events_async(request_type=dict)
def test_add_execution_events_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddExecutionEventsRequest()
request.execution = "execution/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
call.return_value = metadata_service.AddExecutionEventsResponse()
client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_execution_events_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddExecutionEventsRequest()
request.execution = "execution/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
await client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"]
def test_add_execution_events_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_execution_events(
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].execution == "execution_value"
assert args[0].events == [event.Event(artifact="artifact_value")]
def test_add_execution_events_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_execution_events(
metadata_service.AddExecutionEventsRequest(),
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
@pytest.mark.asyncio
async def test_add_execution_events_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_execution_events(
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].execution == "execution_value"
assert args[0].events == [event.Event(artifact="artifact_value")]
@pytest.mark.asyncio
async def test_add_execution_events_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_execution_events(
metadata_service.AddExecutionEventsRequest(),
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
def test_query_execution_inputs_and_outputs(
transport: str = "grpc",
request_type=metadata_service.QueryExecutionInputsAndOutputsRequest,
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_execution_inputs_and_outputs_from_dict():
test_query_execution_inputs_and_outputs(request_type=dict)
def test_query_execution_inputs_and_outputs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
client.query_execution_inputs_and_outputs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryExecutionInputsAndOutputsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_async_from_dict():
await test_query_execution_inputs_and_outputs_async(request_type=dict)
def test_query_execution_inputs_and_outputs_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryExecutionInputsAndOutputsRequest()
request.execution = "execution/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryExecutionInputsAndOutputsRequest()
request.execution = "execution/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "execution=execution/value",) in kw["metadata"]
def test_query_execution_inputs_and_outputs_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_execution_inputs_and_outputs(execution="execution_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].execution == "execution_value"
def test_query_execution_inputs_and_outputs_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_execution_inputs_and_outputs(
metadata_service.QueryExecutionInputsAndOutputsRequest(),
execution="execution_value",
)
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_execution_inputs_and_outputs(
execution="execution_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].execution == "execution_value"
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_execution_inputs_and_outputs(
metadata_service.QueryExecutionInputsAndOutputsRequest(),
execution="execution_value",
)
def test_create_metadata_schema(
transport: str = "grpc", request_type=metadata_service.CreateMetadataSchemaRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
response = client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
def test_create_metadata_schema_from_dict():
test_create_metadata_schema(request_type=dict)
def test_create_metadata_schema_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
client.create_metadata_schema()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
@pytest.mark.asyncio
async def test_create_metadata_schema_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateMetadataSchemaRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
)
response = await client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_metadata_schema_async_from_dict():
await test_create_metadata_schema_async(request_type=dict)
def test_create_metadata_schema_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataSchemaRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
call.return_value = gca_metadata_schema.MetadataSchema()
client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_metadata_schema_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataSchemaRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema()
)
await client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_metadata_schema_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_metadata_schema(
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(
name="name_value"
)
assert args[0].metadata_schema_id == "metadata_schema_id_value"
def test_create_metadata_schema_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_metadata_schema(
metadata_service.CreateMetadataSchemaRequest(),
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
@pytest.mark.asyncio
async def test_create_metadata_schema_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_metadata_schema(
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(
name="name_value"
)
assert args[0].metadata_schema_id == "metadata_schema_id_value"
@pytest.mark.asyncio
async def test_create_metadata_schema_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_metadata_schema(
metadata_service.CreateMetadataSchemaRequest(),
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
def test_get_metadata_schema(
transport: str = "grpc", request_type=metadata_service.GetMetadataSchemaRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
response = client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
def test_get_metadata_schema_from_dict():
test_get_metadata_schema(request_type=dict)
def test_get_metadata_schema_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
client.get_metadata_schema()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
@pytest.mark.asyncio
async def test_get_metadata_schema_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.GetMetadataSchemaRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
)
response = await client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_metadata_schema_async_from_dict():
await test_get_metadata_schema_async(request_type=dict)
def test_get_metadata_schema_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataSchemaRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
call.return_value = metadata_schema.MetadataSchema()
client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_metadata_schema_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataSchemaRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema()
)
await client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_metadata_schema_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_metadata_schema(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_metadata_schema_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_metadata_schema(
metadata_service.GetMetadataSchemaRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_metadata_schema_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_metadata_schema(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_metadata_schema_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_metadata_schema(
metadata_service.GetMetadataSchemaRequest(), name="name_value",
)
def test_list_metadata_schemas(
transport: str = "grpc", request_type=metadata_service.ListMetadataSchemasRequest
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse(
next_page_token="next_page_token_value",
)
response = client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataSchemasPager)
assert response.next_page_token == "next_page_token_value"
def test_list_metadata_schemas_from_dict():
test_list_metadata_schemas(request_type=dict)
def test_list_metadata_schemas_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
client.list_metadata_schemas()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
@pytest.mark.asyncio
async def test_list_metadata_schemas_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.ListMetadataSchemasRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataSchemasAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_from_dict():
await test_list_metadata_schemas_async(request_type=dict)
def test_list_metadata_schemas_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataSchemasRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
call.return_value = metadata_service.ListMetadataSchemasResponse()
client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_metadata_schemas_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataSchemasRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse()
)
await client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_metadata_schemas_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_metadata_schemas(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_metadata_schemas_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_metadata_schemas(
metadata_service.ListMetadataSchemasRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_metadata_schemas_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_metadata_schemas(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_metadata_schemas_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_metadata_schemas(
metadata_service.ListMetadataSchemasRequest(), parent="parent_value",
)
def test_list_metadata_schemas_pager():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[], next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[metadata_schema.MetadataSchema(),],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_metadata_schemas(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, metadata_schema.MetadataSchema) for i in results)
def test_list_metadata_schemas_pages():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[], next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[metadata_schema.MetadataSchema(),],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
pages = list(client.list_metadata_schemas(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[], next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[metadata_schema.MetadataSchema(),],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
async_pager = await client.list_metadata_schemas(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, metadata_schema.MetadataSchema) for i in responses)
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[], next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[metadata_schema.MetadataSchema(),],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_metadata_schemas(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_query_artifact_lineage_subgraph(
transport: str = "grpc",
request_type=metadata_service.QueryArtifactLineageSubgraphRequest,
):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_artifact_lineage_subgraph_from_dict():
test_query_artifact_lineage_subgraph(request_type=dict)
def test_query_artifact_lineage_subgraph_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
client.query_artifact_lineage_subgraph()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryArtifactLineageSubgraphRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_async_from_dict():
await test_query_artifact_lineage_subgraph_async(request_type=dict)
def test_query_artifact_lineage_subgraph_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryArtifactLineageSubgraphRequest()
request.artifact = "artifact/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryArtifactLineageSubgraphRequest()
request.artifact = "artifact/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "artifact=artifact/value",) in kw["metadata"]
def test_query_artifact_lineage_subgraph_flattened():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_artifact_lineage_subgraph(artifact="artifact_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].artifact == "artifact_value"
def test_query_artifact_lineage_subgraph_flattened_error():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_artifact_lineage_subgraph(
metadata_service.QueryArtifactLineageSubgraphRequest(),
artifact="artifact_value",
)
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_artifact_lineage_subgraph(
artifact="artifact_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].artifact == "artifact_value"
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_artifact_lineage_subgraph(
metadata_service.QueryArtifactLineageSubgraphRequest(),
artifact="artifact_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MetadataServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MetadataServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.MetadataServiceGrpcTransport,)
def test_metadata_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MetadataServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_metadata_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MetadataServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_metadata_store",
"get_metadata_store",
"list_metadata_stores",
"delete_metadata_store",
"create_artifact",
"get_artifact",
"list_artifacts",
"update_artifact",
"create_context",
"get_context",
"list_contexts",
"update_context",
"delete_context",
"add_context_artifacts_and_executions",
"add_context_children",
"query_context_lineage_subgraph",
"create_execution",
"get_execution",
"list_executions",
"update_execution",
"add_execution_events",
"query_execution_inputs_and_outputs",
"create_metadata_schema",
"get_metadata_schema",
"list_metadata_schemas",
"query_artifact_lineage_subgraph",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_metadata_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MetadataServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_metadata_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MetadataServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_metadata_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MetadataServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_metadata_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MetadataServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_metadata_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MetadataServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_metadata_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_metadata_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MetadataServiceGrpcTransport, grpc_helpers),
(transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_gte_1_26_0
def test_metadata_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MetadataServiceGrpcTransport, grpc_helpers),
(transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_metadata_service_transport_create_channel_old_api_core(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MetadataServiceGrpcTransport, grpc_helpers),
(transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
@requires_api_core_lt_1_26_0
def test_metadata_service_transport_create_channel_user_scopes(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_metadata_service_host_no_port():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_metadata_service_host_with_port():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_metadata_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MetadataServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_metadata_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MetadataServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_metadata_service_grpc_lro_client():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_metadata_service_grpc_lro_async_client():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_artifact_path():
project = "squid"
location = "clam"
metadata_store = "whelk"
artifact = "octopus"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(
project=project,
location=location,
metadata_store=metadata_store,
artifact=artifact,
)
actual = MetadataServiceClient.artifact_path(
project, location, metadata_store, artifact
)
assert expected == actual
def test_parse_artifact_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"metadata_store": "cuttlefish",
"artifact": "mussel",
}
path = MetadataServiceClient.artifact_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_artifact_path(path)
assert expected == actual
def test_context_path():
project = "winkle"
location = "nautilus"
metadata_store = "scallop"
context = "abalone"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
project=project,
location=location,
metadata_store=metadata_store,
context=context,
)
actual = MetadataServiceClient.context_path(
project, location, metadata_store, context
)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "squid",
"location": "clam",
"metadata_store": "whelk",
"context": "octopus",
}
path = MetadataServiceClient.context_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_context_path(path)
assert expected == actual
def test_execution_path():
project = "oyster"
location = "nudibranch"
metadata_store = "cuttlefish"
execution = "mussel"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(
project=project,
location=location,
metadata_store=metadata_store,
execution=execution,
)
actual = MetadataServiceClient.execution_path(
project, location, metadata_store, execution
)
assert expected == actual
def test_parse_execution_path():
expected = {
"project": "winkle",
"location": "nautilus",
"metadata_store": "scallop",
"execution": "abalone",
}
path = MetadataServiceClient.execution_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_execution_path(path)
assert expected == actual
def test_metadata_schema_path():
project = "squid"
location = "clam"
metadata_store = "whelk"
metadata_schema = "octopus"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(
project=project,
location=location,
metadata_store=metadata_store,
metadata_schema=metadata_schema,
)
actual = MetadataServiceClient.metadata_schema_path(
project, location, metadata_store, metadata_schema
)
assert expected == actual
def test_parse_metadata_schema_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"metadata_store": "cuttlefish",
"metadata_schema": "mussel",
}
path = MetadataServiceClient.metadata_schema_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_metadata_schema_path(path)
assert expected == actual
def test_metadata_store_path():
project = "winkle"
location = "nautilus"
metadata_store = "scallop"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(
project=project, location=location, metadata_store=metadata_store,
)
actual = MetadataServiceClient.metadata_store_path(
project, location, metadata_store
)
assert expected == actual
def test_parse_metadata_store_path():
expected = {
"project": "abalone",
"location": "squid",
"metadata_store": "clam",
}
path = MetadataServiceClient.metadata_store_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_metadata_store_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MetadataServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = MetadataServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = MetadataServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = MetadataServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = MetadataServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = MetadataServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = MetadataServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = MetadataServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = MetadataServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = MetadataServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MetadataServiceTransport, "_prep_wrapped_messages"
) as prep:
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MetadataServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = MetadataServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 38.793961
| 130
| 0.692833
|
6336817a1caee5582f682fa9c142c6452805661f
| 2,863
|
py
|
Python
|
tests/integration/modules/linux_acl.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 3
|
2015-04-16T18:42:35.000Z
|
2017-10-30T16:57:49.000Z
|
tests/integration/modules/linux_acl.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 16
|
2015-11-18T00:44:03.000Z
|
2018-10-29T20:48:27.000Z
|
tests/integration/modules/linux_acl.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 1
|
2018-04-19T16:57:27.000Z
|
2018-04-19T16:57:27.000Z
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import shutil
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath, skip_if_binaries_missing
import salt.utils
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
# from salt.modules import linux_acl as acl
# Acl package should be installed to test linux_acl module
@skip_if_binaries_missing(['getfacl'])
# Doesn't work. Why?
# @requires_salt_modules('acl')
# @requires_salt_modules('linux_acl')
class LinuxAclModuleTest(integration.ModuleCase,
integration.AdaptedConfigurationTestCaseMixIn):
'''
Validate the linux_acl module
'''
def setUp(self):
# Blindly copied from tests.integration.modules.file; Refactoring?
self.myfile = os.path.join(integration.TMP, 'myfile')
with salt.utils.fopen(self.myfile, 'w+') as fp:
fp.write('Hello\n')
self.mydir = os.path.join(integration.TMP, 'mydir/isawesome')
if not os.path.isdir(self.mydir):
# left behind... Don't fail because of this!
os.makedirs(self.mydir)
self.mysymlink = os.path.join(integration.TMP, 'mysymlink')
if os.path.islink(self.mysymlink):
os.remove(self.mysymlink)
os.symlink(self.myfile, self.mysymlink)
self.mybadsymlink = os.path.join(integration.TMP, 'mybadsymlink')
if os.path.islink(self.mybadsymlink):
os.remove(self.mybadsymlink)
os.symlink('/nonexistentpath', self.mybadsymlink)
super(LinuxAclModuleTest, self).setUp()
def tearDown(self):
if os.path.isfile(self.myfile):
os.remove(self.myfile)
if os.path.islink(self.mysymlink):
os.remove(self.mysymlink)
if os.path.islink(self.mybadsymlink):
os.remove(self.mybadsymlink)
shutil.rmtree(self.mydir, ignore_errors=True)
super(LinuxAclModuleTest, self).tearDown()
def test_version(self):
self.assertRegexpMatches(self.run_function('acl.version'), r'\d+\.\d+\.\d+')
def test_getfacl_w_single_file_without_acl(self):
ret = self.run_function('acl.getfacl', arg=[self.myfile])
self.assertEqual(
ret,
{self.myfile: {'other': {'octal': 4, 'permissions': {'read': True, 'write': False, 'execute': False}},
'user': [{'root': {'octal': 6, 'permissions': {'read': True, 'write': True, 'execute': False}}}],
'group': [{'root': {'octal': 4, 'permissions': {'read': True, 'write': False, 'execute': False}}}],
'comment': {'owner': 'root', 'group': 'root', 'file': self.myfile}}}
)
if __name__ == '__main__':
from integration import run_tests
run_tests(LinuxAclModuleTest)
| 38.173333
| 126
| 0.634998
|
43e9f43a566ab322c89d9269372c4fadd299c576
| 5,712
|
py
|
Python
|
goLDAP.py
|
MD-AlHowsa/GoLDAP
|
4dfef9e010b77b9a16bf8d542eae530fe368f204
|
[
"MIT"
] | 10
|
2020-11-06T20:24:11.000Z
|
2022-02-10T11:07:54.000Z
|
goLDAP.py
|
MD-AlHowsa/GoLDAP
|
4dfef9e010b77b9a16bf8d542eae530fe368f204
|
[
"MIT"
] | 3
|
2020-12-21T15:36:20.000Z
|
2022-02-22T07:50:44.000Z
|
goLDAP.py
|
MD-AlHowsa/GoLDAP
|
4dfef9e010b77b9a16bf8d542eae530fe368f204
|
[
"MIT"
] | 3
|
2021-03-12T09:22:52.000Z
|
2022-02-22T06:42:46.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Name: goLDAP
Author : Mohammed AlHowsa
Brief: An addon for gophish to import users via LDAP
Github : https://github.com/md-howsa
'''
#====================== Configuration ==========================
ldap_server = 'ldap://ldap_IP'
us = 'us'
pw = 'pw'
base_dn = 'ou=Users,,dc=example,dc=com'
col_names = 'Email, First Name, Last Name,position'
gophish_server = 'localhost'
gophish_port = '3333'
gophish_api_key = 'api_key'
group_name = "example group"
update_group = 0
#=================================================================
import ldap
import csv
import requests
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from ldap.controls import SimplePagedResultsControl
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
filter = 'objectClass=user'
attrs = ['mail','givenname','sn','position']
csv_output_file = 'users.csv'
#============= Ldap Connection & search =======================
def ldap_search(ldap_server,us,pw,base_dn,attrs,filter):
connect = ldap.initialize(ldap_server)
connect.set_option(ldap.OPT_REFERRALS, 0)
connect.simple_bind_s(us, pw)
page_control = SimplePagedResultsControl(True, size=1000, cookie='')
response = connect.search_ext(base_dn,ldap.SCOPE_SUBTREE,filter,attrs,0,serverctrls=[page_control])
result = []
pages = 0
while True:
pages += 1
rtype, rdata, rmsgid, serverctrls = connect.result3(response)
result.extend(rdata)
print len(result)
controls = [control for control in serverctrls
if control.controlType == SimplePagedResultsControl.controlType]
if not controls:
print('The server ignores RFC 2696 control')
break
if not controls[0].cookie:
break
page_control.cookie = controls[0].cookie
response = connect.search_ext(base_dn,ldap.SCOPE_SUBTREE,filter,attrs,0,serverctrls=[page_control])
return result
#========= orgnize response for Gophish =============
# == remove the bracket of the dict in every value ==
def gophish_format(search_result):
for i in search_result:
temp_m = i[1].get('mail')
temp_g = i[1].get('givenName')
temp_s = i[1].get('sn')
temp_p = i[1].get('position')
if temp_m:
i[1]['mail'] = temp_m[0]
if temp_g:
i[1]['givenName'] = temp_g[0]
if temp_s:
i[1]['sn'] = temp_s[0]
if temp_p:
i[1]['position']
return search_result
#========= to create CSV file of the LDAP result =====
def result_to_csv(result,col_names,csv_output_file):
dest_file_name = csv_output_file
with open(dest_file_name, 'w') as file:
users = [x[1] for x in result]
file.write(col_names)
file.write("\n")
w = csv.DictWriter(file,users[0].keys())
w.writerows(users)
#=========== convert csv to Json then uplad it through API =====
def upload_csv(group_name,gophish_api_key, csv_output_file,update_group):
fileObj = {'file': open(csv_output_file,'rb')}
response_toJson = requests.post("https://"+gophish_server+":"+gophish_port+"/api/import/group?api_key="+gophish_api_key,files=fileObj,verify=False)
if response_toJson.status_code == 200:
print "Step 1: CSV file has seccfully transformed to Json format"
else:
print "Step 1: Error, Status Code "+str(response_toJson.status_code)
group = {}
group['name'] = group_name
group['targets'] = response_toJson.json()
if update_group:
group['id'] = update_group
json_group = json.dumps(group)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
if update_group:
print "Trying to update group with ID "+str(update_group)
response_toUpload = requests.put("https://"+gophish_server+":"+gophish_port+"/api/groups/"+str(update_group)+"?api_key="+gophish_api_key,data=json_group,verify=False)
else:
print "Trying to create new group with name "+group_name
response_toUpload = requests.post("https://"+gophish_server+":"+gophish_port+"/api/groups/?api_key="+gophish_api_key,data=json_group,headers=headers,verify=False)
if response_toUpload.status_code == 201:
print "Step 2: Done, total number of users is "+str(len(response_toUpload.json()['targets']))
elif response_toUpload.status_code == 409 :
print "Step 2: Group is Already created,put the group is in the configuration section of the code instead of 0"
print "Status code = "+str(response_toUpload.status_code)
elif response_toUpload.status_code == 200:
print "Step 2: Done, total number of users is"+str(len(response_toUpload.json()['targets']))
else:
print "Step 2: Error, Status Code "+str(response_toUpload.status_code)
def main():
global ldap_server
global us
global pw
global attrs
global base_dn
global filter
global csv_output_file
global col_names
global update_group
search_result =ldap_search(ldap_server,us,pw,base_dn,attrs,filter)
result = gophish_format(search_result)
result_to_csv(result,col_names,csv_output_file)
upload_csv(group_name,gophish_api_key,csv_output_file,update_group)
if __name__ == "__main__":
main()
| 35.042945
| 182
| 0.617997
|
6c0488d391a62375437bd2383b307543ce8ae7e0
| 12,535
|
py
|
Python
|
libs/numpy/tests/test_ctypeslib.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
libs/numpy/tests/test_ctypeslib.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
libs/numpy/tests/test_ctypeslib.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
import sys
import pytest
import weakref
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
import ctypes
except ImportError:
ctypes = None
else:
cdll = None
test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
except OSError:
pass
try:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
except OSError:
pass
if cdll is None:
cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
if test_cdll is None:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
c_forward_pointer = test_cdll.forward_pointer
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
class TestLoadLibrary:
def test_basic(self):
try:
# Should succeed
load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
assert_(p.from_param(np.array(1)))
assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
assert_raises(TypeError, p.from_param, np.array(1))
assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
assert_(p.from_param(np.array([[1, 2]])))
assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
assert_(p.from_param(x))
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
# shapes are normalized
assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
# 1.12 <= v < 1.16 had a bug that made these fail
assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
c_forward_pointer.argtypes = (ndpointer(ndim=2),)
c_forward_pointer(np.zeros((2, 3)))
# too many dimensions
assert_raises(
ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
@pytest.mark.parametrize(
'dt', [
float,
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
], ids=[
'float',
'overlapping-fields'
]
)
def test_return(self, dt):
""" Test that return values are coerced to arrays """
arr = np.zeros((2, 3), dt)
ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
# check that the arrays are equivalent views on the same data
arr2 = c_forward_pointer(arr)
assert_equal(arr2.dtype, arr.dtype)
assert_equal(arr2.shape, arr.shape)
assert_equal(
arr2.__array_interface__['data'],
arr.__array_interface__['data']
)
def test_vague_return_value(self):
""" Test that vague ndpointer return values do not promote to arrays """
arr = np.zeros((2, 3))
ptr_type = ndpointer(dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
ret = c_forward_pointer(arr)
assert_(isinstance(ret, ptr_type))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray:
def test_array(self):
from ctypes import c_int
pair_t = c_int * 2
a = as_array(pair_t(1, 2))
assert_equal(a.shape, (2,))
assert_array_equal(a, np.array([1, 2]))
a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
assert_equal(a.shape, (3, 2))
assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
def test_pointer(self):
from ctypes import c_int, cast, POINTER
p = cast((c_int * 10)(*range(10)), POINTER(c_int))
a = as_array(p, shape=(10,))
assert_equal(a.shape, (10,))
assert_array_equal(a, np.arange(10))
a = as_array(p, shape=(2, 5))
assert_equal(a.shape, (2, 5))
assert_array_equal(a, np.arange(10).reshape((2, 5)))
# shape argument is required
assert_raises(TypeError, as_array, p)
def test_struct_array_pointer(self):
from ctypes import c_int16, Structure, pointer
class Struct(Structure):
_fields_ = [('a', c_int16)]
Struct3 = 3 * Struct
c_array = (2 * Struct3)(
Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
)
expected = np.array([
[(1,), (2,), (3,)],
[(4,), (5,), (6,)],
], dtype=[('a', np.int16)])
def check(x):
assert_equal(x.dtype, expected.dtype)
assert_equal(x, expected)
# all of these should be equivalent
check(as_array(c_array))
check(as_array(pointer(c_array), shape=()))
check(as_array(pointer(c_array[0]), shape=(2,)))
check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
def test_reference_cycles(self):
# related to gh-6511
import ctypes
# create array to work with
# don't use int/long to avoid running into bpo-10746
N = 100
a = np.arange(N, dtype=np.short)
# get pointer to array
pnt = np.ctypeslib.as_ctypes(a)
with np.testing.assert_no_gc_cycles():
# decay the array above to a pointer to its first element
newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
# and construct an array using this data
b = np.ctypeslib.as_array(newpnt, (N,))
# now delete both, which should cleanup both objects
del newpnt, b
def test_segmentation_fault(self):
arr = np.zeros((224, 224, 3))
c_arr = np.ctypeslib.as_ctypes(arr)
arr_ref = weakref.ref(arr)
del arr
# check the reference wasn't cleaned up
assert_(arr_ref() is not None)
# check we avoid the segfault
c_arr[0][0][0]
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_le__)
dt = np.dtype('>u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_be__)
dt = np.dtype('u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16)
def test_subarray(self):
dt = np.dtype((np.int32, (2, 3)))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, 2 * (3 * ctypes.c_int32))
def test_structure(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
])
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_structure_aligned(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
], align=True)
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('', ctypes.c_char * 2), # padding
('b', ctypes.c_uint32),
])
def test_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32]
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_padded_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32],
itemsize=5,
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
('', ctypes.c_char * 5), # padding
])
def test_overlapping(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 2],
formats=[np.uint32, np.uint32]
))
assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
| 34.248634
| 94
| 0.558197
|
0729ad525e6a6f9bd72910a13837fffd0e7e47c5
| 5,613
|
py
|
Python
|
my_optimizer.py
|
LIJUNYI95/MAML-Pytorch
|
d8f55b43ae7aec0e99cccba78b71c90a1a0bf79f
|
[
"MIT"
] | null | null | null |
my_optimizer.py
|
LIJUNYI95/MAML-Pytorch
|
d8f55b43ae7aec0e99cccba78b71c90a1a0bf79f
|
[
"MIT"
] | null | null | null |
my_optimizer.py
|
LIJUNYI95/MAML-Pytorch
|
d8f55b43ae7aec0e99cccba78b71c90a1a0bf79f
|
[
"MIT"
] | null | null | null |
import math
import torch
from torch.optim.optimizer import Optimizer
import pdb
from collections import defaultdict
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
#super(Adam, self).__init__(params, self.defaults)
self.state = defaultdict(dict)
self.params = params
# def __setstate__(self, state):
# super(Adam, self).__setstate__(state)
# for group in self.param_groups:
# group.setdefault('amsgrad', False)
def step(self, loss=None, weights= None, create_graph=True):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
gradient = torch.autograd.grad(outputs=loss, inputs=weights,create_graph=create_graph)
norm = 0
#pdb.set_trace()
norm = torch.norm(gradient[0]).data.cpu().numpy()
#pdb.set_trace()
new_params = []
#pdb.set_trace()
for name, p, grad in zip(self.params, weights,gradient):
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[name]
eps = self.defaults["eps"]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# if amsgrad:
# # Maintains max of all exp. moving avg. of sq. grad. values
# state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if self. defaults["amsgrad"]:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = self.defaults["betas"]
#print(state['step'])
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if self.defaults["weight_decay"] != 0:
grad.add_(self.defaults["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
#pdb.set_trace()
exp_avg = torch.add(torch.mul(exp_avg, beta1), grad, alpha=1 - beta1)
#pdb.set_trace()
exp_avg_sq = torch.addcmul(torch.mul(exp_avg_sq,beta2),\
1 - beta2, grad, grad)
state['exp_avg'], state['exp_avg_sq'] = exp_avg, exp_avg_sq
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#pdb.set_trace()
if self.defaults["amsgrad"]:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (torch.sqrt(exp_avg_sq+eps) / math.sqrt(bias_correction2)).add_(eps)
step_size = self.defaults["lr"] / bias_correction1
#p.data.addcdiv_(-step_size, exp_avg, denom)
#pdb.set_trace()
#self.param_groups[i]["params"][j] = \
# torch.addcdiv(p, -step_size, exp_avg, denom)
#pdb.set_trace()
new_params.append(-step_size * exp_avg / denom)
new_params.append(norm)
return new_params
| 44.19685
| 113
| 0.577766
|
c56d0a50576d823026e1b45f04d20317573c08b0
| 1,038
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_7_12_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_7_12_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_7_12_sol_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
import sqlite, cPickle
class Blob(object):
''' automatic converter for binary strings '''
def __init__(self, s): self.s = s
def _quote(self): return "'%s'" % sqlite.encode(self.s)
# make a test database in memory, get a cursor on it, and make a table
connection = sqlite.connect(':memory:')
cursor = connection.cursor()
cursor.execute("CREATE TABLE justatest (name TEXT, ablob BLOB)")
# Prepare some BLOBs to insert in the table
names = 'aramis', 'athos', 'porthos'
data = {}
for name in names:
datum = list(name)
datum.sort()
data[name] = cPickle.dumps(datum, 2)
# Perform the insertions
sql = 'INSERT INTO justatest VALUES(%s, %s)'
for name in names:
cursor.execute(sql, (name, Blob(data[name])) )
# Recover the data so you can check back
sql = 'SELECT name, ablob FROM justatest ORDER BY name'
cursor.execute(sql)
for name, blob in cursor.fetchall():
print name, cPickle.loads(blob), cPickle.loads(data[name])
# Done, close the connection (would be no big deal if you didn't, but...)
connection.close()
| 37.071429
| 73
| 0.699422
|
7d52aa2a5a9a9ee52041f07899b41340795010d7
| 1,329
|
py
|
Python
|
sagas/listings/listings_servant.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3
|
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
sagas/listings/listings_servant.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
sagas/listings/listings_servant.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
|
from sanic import Sanic
from sanic.response import json
from sanic import Blueprint
from sagas.listings.listings_cli import ListingsCli, listings
bp = Blueprint('root_blueprint')
@bp.route('/')
async def bp_root(request):
return json({'mod': 'listings'})
@bp.post("/proc/<target>/<item>")
async def proc(request, target, item):
"""
$ curl -s -d '{"sentence":"Hugging Face is a technology company based in New York and Paris"}' \
-H "Content-Type: application/json" -X POST \
localhost:1755/proc/t5/T5_de | json
$ curl -s -d '{"sentence":"Hugging Face is a technology company"}' \
-H "Content-Type: application/json" -X POST \
localhost:1755/proc/simple/Simple | json
:param request:
:return:
"""
from mode.utils.compat import want_bytes, want_str
rd = request.json
result=listings.proc(target, item, rd)
return json(result, dumps=lambda c: want_str(c.dumps(serializer='json')))
app = Sanic(__name__)
app.blueprint(bp)
class ListingsServant(object):
def run(self, port=1755, debug=True):
"""
$ python -m sagas.listings.listings_servant run 1755 False
$ curl localhost:1755
"""
app.run(host='0.0.0.0', port=port, debug=debug)
if __name__ == '__main__':
import fire
fire.Fire(ListingsServant)
| 27.6875
| 100
| 0.659895
|
39a3ada8d76d1c63bf306b1ebd69eee03a1c5cb2
| 1,309
|
py
|
Python
|
school_backend/api/models.py
|
zaebumbatt/manatal-django-test
|
c178de8718c6b58fbb5c6b819bfbe84a0dc3b207
|
[
"MIT"
] | null | null | null |
school_backend/api/models.py
|
zaebumbatt/manatal-django-test
|
c178de8718c6b58fbb5c6b819bfbe84a0dc3b207
|
[
"MIT"
] | null | null | null |
school_backend/api/models.py
|
zaebumbatt/manatal-django-test
|
c178de8718c6b58fbb5c6b819bfbe84a0dc3b207
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.db import models
SCHOOL_TYPE = [
("PRI", 'Primary School'),
("SEC", 'Secondary School'),
("UNI", 'University'),
("COL", 'College'),
("HS", 'High School'),
]
def restrict_amount(value):
if Student.objects.filter(school=value).count() >= value.max_students:
raise ValidationError(
('School already has maximal amount of students '
+ str(value.max_students)), code='invalid'
)
class School(models.Model):
name = models.CharField(max_length=20, unique=True)
school_type = models.CharField(choices=SCHOOL_TYPE, max_length=3)
max_students = models.PositiveIntegerField()
def __str__(self):
return self.name
class Student(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
school = models.ForeignKey(
School,
on_delete=models.CASCADE,
validators=(restrict_amount,)
)
class Log(models.Model):
datetime = models.DateTimeField(auto_now_add=True)
model = models.CharField(max_length=20)
username = models.CharField(max_length=20)
action = models.CharField(max_length=20)
status_code = models.PositiveIntegerField()
data = models.JSONField()
| 27.851064
| 74
| 0.679908
|
fd03b3b621b574abf67f47d91ab6f28dbba45a1c
| 72,830
|
py
|
Python
|
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'EnvironmentContainerImageArgs',
'EnvironmentVmImageArgs',
'InstanceAcceleratorConfigArgs',
'InstanceContainerImageArgs',
'InstanceIamBindingConditionArgs',
'InstanceIamMemberConditionArgs',
'InstanceReservationAffinityArgs',
'InstanceShieldedInstanceConfigArgs',
'InstanceVmImageArgs',
'RuntimeAccessConfigArgs',
'RuntimeIamBindingConditionArgs',
'RuntimeIamMemberConditionArgs',
'RuntimeMetricArgs',
'RuntimeSoftwareConfigArgs',
'RuntimeVirtualMachineArgs',
'RuntimeVirtualMachineVirtualMachineConfigArgs',
'RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs',
'RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs',
'RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs',
'RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs',
'RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs',
'RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs',
]
@pulumi.input_type
class EnvironmentContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class EnvironmentVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@pulumi.input_type
class InstanceAcceleratorConfigArgs:
def __init__(__self__, *,
core_count: pulumi.Input[int],
type: pulumi.Input[str]):
"""
:param pulumi.Input[int] core_count: Count of cores of this accelerator.
:param pulumi.Input[str] type: Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `NVIDIA_TESLA_A100`, `TPU_V2`, and `TPU_V3`.
"""
pulumi.set(__self__, "core_count", core_count)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> pulumi.Input[int]:
"""
Count of cores of this accelerator.
"""
return pulumi.get(self, "core_count")
@core_count.setter
def core_count(self, value: pulumi.Input[int]):
pulumi.set(self, "core_count", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `NVIDIA_TESLA_A100`, `TPU_V2`, and `TPU_V3`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class InstanceContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class InstanceIamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceIamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceReservationAffinityArgs:
def __init__(__self__, *,
consume_reservation_type: pulumi.Input[str],
key: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] consume_reservation_type: The type of Compute Reservation.
Possible values are `NO_RESERVATION`, `ANY_RESERVATION`, and `SPECIFIC_RESERVATION`.
:param pulumi.Input[str] key: Corresponds to the label key of reservation resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of reservation resource.
"""
pulumi.set(__self__, "consume_reservation_type", consume_reservation_type)
if key is not None:
pulumi.set(__self__, "key", key)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="consumeReservationType")
def consume_reservation_type(self) -> pulumi.Input[str]:
"""
The type of Compute Reservation.
Possible values are `NO_RESERVATION`, `ANY_RESERVATION`, and `SPECIFIC_RESERVATION`.
"""
return pulumi.get(self, "consume_reservation_type")
@consume_reservation_type.setter
def consume_reservation_type(self, value: pulumi.Input[str]):
pulumi.set(self, "consume_reservation_type", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Corresponds to the label key of reservation resource.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Corresponds to the label values of reservation resource.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class InstanceShieldedInstanceConfigArgs:
def __init__(__self__, *,
enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,
enable_secure_boot: Optional[pulumi.Input[bool]] = None,
enable_vtpm: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the
boot integrity of the instance. The attestation is performed against the integrity policy baseline.
This baseline is initially derived from the implicitly trusted boot image when the instance is created.
Enabled by default.
:param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs
authentic software by verifying the digital signature of all boot components, and halting the boot process
if signature verification fails.
Disabled by default.
:param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled.
Enabled by default.
"""
if enable_integrity_monitoring is not None:
pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring)
if enable_secure_boot is not None:
pulumi.set(__self__, "enable_secure_boot", enable_secure_boot)
if enable_vtpm is not None:
pulumi.set(__self__, "enable_vtpm", enable_vtpm)
@property
@pulumi.getter(name="enableIntegrityMonitoring")
def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the
boot integrity of the instance. The attestation is performed against the integrity policy baseline.
This baseline is initially derived from the implicitly trusted boot image when the instance is created.
Enabled by default.
"""
return pulumi.get(self, "enable_integrity_monitoring")
@enable_integrity_monitoring.setter
def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_integrity_monitoring", value)
@property
@pulumi.getter(name="enableSecureBoot")
def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs
authentic software by verifying the digital signature of all boot components, and halting the boot process
if signature verification fails.
Disabled by default.
"""
return pulumi.get(self, "enable_secure_boot")
@enable_secure_boot.setter
def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_secure_boot", value)
@property
@pulumi.getter(name="enableVtpm")
def enable_vtpm(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has the vTPM enabled.
Enabled by default.
"""
return pulumi.get(self, "enable_vtpm")
@enable_vtpm.setter
def enable_vtpm(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_vtpm", value)
@pulumi.input_type
class InstanceVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@pulumi.input_type
class RuntimeAccessConfigArgs:
def __init__(__self__, *,
access_type: Optional[pulumi.Input[str]] = None,
proxy_uri: Optional[pulumi.Input[str]] = None,
runtime_owner: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] access_type: The type of access mode this instance. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#RuntimeAccessType`.
:param pulumi.Input[str] proxy_uri: -
The proxy endpoint that is used to access the runtime.
:param pulumi.Input[str] runtime_owner: The owner of this runtime after creation. Format: `alias@example.com`.
Currently supports one owner only.
"""
if access_type is not None:
pulumi.set(__self__, "access_type", access_type)
if proxy_uri is not None:
pulumi.set(__self__, "proxy_uri", proxy_uri)
if runtime_owner is not None:
pulumi.set(__self__, "runtime_owner", runtime_owner)
@property
@pulumi.getter(name="accessType")
def access_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of access mode this instance. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#RuntimeAccessType`.
"""
return pulumi.get(self, "access_type")
@access_type.setter
def access_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_type", value)
@property
@pulumi.getter(name="proxyUri")
def proxy_uri(self) -> Optional[pulumi.Input[str]]:
"""
-
The proxy endpoint that is used to access the runtime.
"""
return pulumi.get(self, "proxy_uri")
@proxy_uri.setter
def proxy_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy_uri", value)
@property
@pulumi.getter(name="runtimeOwner")
def runtime_owner(self) -> Optional[pulumi.Input[str]]:
"""
The owner of this runtime after creation. Format: `alias@example.com`.
Currently supports one owner only.
"""
return pulumi.get(self, "runtime_owner")
@runtime_owner.setter
def runtime_owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_owner", value)
@pulumi.input_type
class RuntimeIamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class RuntimeIamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class RuntimeMetricArgs:
def __init__(__self__, *,
system_metrics: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
if system_metrics is not None:
pulumi.set(__self__, "system_metrics", system_metrics)
@property
@pulumi.getter(name="systemMetrics")
def system_metrics(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "system_metrics")
@system_metrics.setter
def system_metrics(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "system_metrics", value)
@pulumi.input_type
class RuntimeSoftwareConfigArgs:
def __init__(__self__, *,
custom_gpu_driver_path: Optional[pulumi.Input[str]] = None,
enable_health_monitoring: Optional[pulumi.Input[bool]] = None,
idle_shutdown: Optional[pulumi.Input[bool]] = None,
idle_shutdown_timeout: Optional[pulumi.Input[int]] = None,
install_gpu_driver: Optional[pulumi.Input[bool]] = None,
notebook_upgrade_schedule: Optional[pulumi.Input[str]] = None,
post_startup_script: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] custom_gpu_driver_path: Specify a custom Cloud Storage path where the GPU driver is stored.
If not specified, we'll automatically choose from official GPU drivers.
:param pulumi.Input[bool] enable_health_monitoring: Verifies core internal services are running. Default: True.
:param pulumi.Input[bool] idle_shutdown: Runtime will automatically shutdown after idle_shutdown_time.
Default: True
:param pulumi.Input[int] idle_shutdown_timeout: Time in minutes to wait before shuting down runtime.
Default: 180 minutes
:param pulumi.Input[bool] install_gpu_driver: Install Nvidia Driver automatically.
:param pulumi.Input[str] notebook_upgrade_schedule: Cron expression in UTC timezone for schedule instance auto upgrade.
Please follow the [cron format](https://en.wikipedia.org/wiki/Cron).
:param pulumi.Input[str] post_startup_script: Path to a Bash script that automatically runs after a notebook instance
fully boots up. The path must be a URL or
Cloud Storage path (gs://path-to-file/file-name).
"""
if custom_gpu_driver_path is not None:
pulumi.set(__self__, "custom_gpu_driver_path", custom_gpu_driver_path)
if enable_health_monitoring is not None:
pulumi.set(__self__, "enable_health_monitoring", enable_health_monitoring)
if idle_shutdown is not None:
pulumi.set(__self__, "idle_shutdown", idle_shutdown)
if idle_shutdown_timeout is not None:
pulumi.set(__self__, "idle_shutdown_timeout", idle_shutdown_timeout)
if install_gpu_driver is not None:
pulumi.set(__self__, "install_gpu_driver", install_gpu_driver)
if notebook_upgrade_schedule is not None:
pulumi.set(__self__, "notebook_upgrade_schedule", notebook_upgrade_schedule)
if post_startup_script is not None:
pulumi.set(__self__, "post_startup_script", post_startup_script)
@property
@pulumi.getter(name="customGpuDriverPath")
def custom_gpu_driver_path(self) -> Optional[pulumi.Input[str]]:
"""
Specify a custom Cloud Storage path where the GPU driver is stored.
If not specified, we'll automatically choose from official GPU drivers.
"""
return pulumi.get(self, "custom_gpu_driver_path")
@custom_gpu_driver_path.setter
def custom_gpu_driver_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_gpu_driver_path", value)
@property
@pulumi.getter(name="enableHealthMonitoring")
def enable_health_monitoring(self) -> Optional[pulumi.Input[bool]]:
"""
Verifies core internal services are running. Default: True.
"""
return pulumi.get(self, "enable_health_monitoring")
@enable_health_monitoring.setter
def enable_health_monitoring(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_health_monitoring", value)
@property
@pulumi.getter(name="idleShutdown")
def idle_shutdown(self) -> Optional[pulumi.Input[bool]]:
"""
Runtime will automatically shutdown after idle_shutdown_time.
Default: True
"""
return pulumi.get(self, "idle_shutdown")
@idle_shutdown.setter
def idle_shutdown(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "idle_shutdown", value)
@property
@pulumi.getter(name="idleShutdownTimeout")
def idle_shutdown_timeout(self) -> Optional[pulumi.Input[int]]:
"""
Time in minutes to wait before shuting down runtime.
Default: 180 minutes
"""
return pulumi.get(self, "idle_shutdown_timeout")
@idle_shutdown_timeout.setter
def idle_shutdown_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_shutdown_timeout", value)
@property
@pulumi.getter(name="installGpuDriver")
def install_gpu_driver(self) -> Optional[pulumi.Input[bool]]:
"""
Install Nvidia Driver automatically.
"""
return pulumi.get(self, "install_gpu_driver")
@install_gpu_driver.setter
def install_gpu_driver(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "install_gpu_driver", value)
@property
@pulumi.getter(name="notebookUpgradeSchedule")
def notebook_upgrade_schedule(self) -> Optional[pulumi.Input[str]]:
"""
Cron expression in UTC timezone for schedule instance auto upgrade.
Please follow the [cron format](https://en.wikipedia.org/wiki/Cron).
"""
return pulumi.get(self, "notebook_upgrade_schedule")
@notebook_upgrade_schedule.setter
def notebook_upgrade_schedule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_upgrade_schedule", value)
@property
@pulumi.getter(name="postStartupScript")
def post_startup_script(self) -> Optional[pulumi.Input[str]]:
"""
Path to a Bash script that automatically runs after a notebook instance
fully boots up. The path must be a URL or
Cloud Storage path (gs://path-to-file/file-name).
"""
return pulumi.get(self, "post_startup_script")
@post_startup_script.setter
def post_startup_script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "post_startup_script", value)
@pulumi.input_type
class RuntimeVirtualMachineArgs:
def __init__(__self__, *,
instance_id: Optional[pulumi.Input[str]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
virtual_machine_config: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigArgs']] = None):
"""
:param pulumi.Input[str] instance_id: -
The unique identifier of the Managed Compute Engine instance.
:param pulumi.Input[str] instance_name: -
The user-friendly name of the Managed Compute Engine instance.
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigArgs'] virtual_machine_config: Virtual Machine configuration settings.
Structure is documented below.
"""
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if instance_name is not None:
pulumi.set(__self__, "instance_name", instance_name)
if virtual_machine_config is not None:
pulumi.set(__self__, "virtual_machine_config", virtual_machine_config)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
-
The unique identifier of the Managed Compute Engine instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> Optional[pulumi.Input[str]]:
"""
-
The user-friendly name of the Managed Compute Engine instance.
"""
return pulumi.get(self, "instance_name")
@instance_name.setter
def instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_name", value)
@property
@pulumi.getter(name="virtualMachineConfig")
def virtual_machine_config(self) -> Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigArgs']]:
"""
Virtual Machine configuration settings.
Structure is documented below.
"""
return pulumi.get(self, "virtual_machine_config")
@virtual_machine_config.setter
def virtual_machine_config(self, value: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigArgs']]):
pulumi.set(self, "virtual_machine_config", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigArgs:
def __init__(__self__, *,
data_disk: pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs'],
machine_type: pulumi.Input[str],
accelerator_config: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs']] = None,
container_images: Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs']]]] = None,
encryption_config: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs']] = None,
guest_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
internal_ip_only: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
network: Optional[pulumi.Input[str]] = None,
nic_type: Optional[pulumi.Input[str]] = None,
shielded_instance_config: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs']] = None,
subnet: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs'] data_disk: Data disk option configuration settings.
Structure is documented below.
:param pulumi.Input[str] machine_type: The Compute Engine machine type used for runtimes.
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs'] accelerator_config: The Compute Engine accelerator configuration for this runtime.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs']]] container_images: Use a list of container images to start the notebook instance.
Structure is documented below.
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs'] encryption_config: Encryption settings for virtual machine data disk.
Structure is documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] guest_attributes: -
The Compute Engine guest attributes. (see [Project and instance
guest attributes](https://cloud.google.com/compute/docs/
storing-retrieving-metadata#guest_attributes)).
:param pulumi.Input[bool] internal_ip_only: If true, runtime will only have internal IP addresses. By default,
runtimes are not restricted to internal IP addresses, and will
have ephemeral external IP addresses assigned to each vm. This
`internal_ip_only` restriction can only be enabled for subnetwork
enabled networks, and all dependencies must be configured to be
accessible without external IP addresses.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The Compute Engine metadata entries to add to virtual machine.
(see [Project and instance metadata](https://cloud.google.com
/compute/docs/storing-retrieving-metadata#project_and_instance
_metadata)).
:param pulumi.Input[str] network: The Compute Engine network to be used for machine communications.
Cannot be specified with subnetwork. If neither `network` nor
`subnet` is specified, the "default" network of the project is
used, if it exists. A full URL or partial URI. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/global/default`
* `projects/[project_id]/regions/global/default`
Runtimes are managed resources inside Google Infrastructure.
Runtimes support the following network configurations:
* Google Managed Network (Network & subnet are empty)
* Consumer Project VPC (network & subnet are required). Requires
configuring Private Service Access.
* Shared VPC (network & subnet are required). Requires
configuring Private Service Access.
:param pulumi.Input[str] nic_type: The type of vNIC to be used on this interface. This may be gVNIC
or VirtioNet.
Possible values are `UNSPECIFIED_NIC_TYPE`, `VIRTIO_NET`, and `GVNIC`.
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded VM Instance configuration settings.
Structure is documented below.
:param pulumi.Input[str] subnet: The Compute Engine subnetwork to be used for machine
communications. Cannot be specified with network. A full URL or
partial URI are valid. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/us-east1/subnetworks/sub0`
* `projects/[project_id]/regions/us-east1/subnetworks/sub0`
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The Compute Engine tags to add to runtime (see [Tagging instances]
(https://cloud.google.com/compute/docs/
label-or-tag-resources#tags)).
:param pulumi.Input[str] zone: -
The zone where the virtual machine is located.
"""
pulumi.set(__self__, "data_disk", data_disk)
pulumi.set(__self__, "machine_type", machine_type)
if accelerator_config is not None:
pulumi.set(__self__, "accelerator_config", accelerator_config)
if container_images is not None:
pulumi.set(__self__, "container_images", container_images)
if encryption_config is not None:
pulumi.set(__self__, "encryption_config", encryption_config)
if guest_attributes is not None:
pulumi.set(__self__, "guest_attributes", guest_attributes)
if internal_ip_only is not None:
pulumi.set(__self__, "internal_ip_only", internal_ip_only)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if network is not None:
pulumi.set(__self__, "network", network)
if nic_type is not None:
pulumi.set(__self__, "nic_type", nic_type)
if shielded_instance_config is not None:
pulumi.set(__self__, "shielded_instance_config", shielded_instance_config)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="dataDisk")
def data_disk(self) -> pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs']:
"""
Data disk option configuration settings.
Structure is documented below.
"""
return pulumi.get(self, "data_disk")
@data_disk.setter
def data_disk(self, value: pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs']):
pulumi.set(self, "data_disk", value)
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> pulumi.Input[str]:
"""
The Compute Engine machine type used for runtimes.
"""
return pulumi.get(self, "machine_type")
@machine_type.setter
def machine_type(self, value: pulumi.Input[str]):
pulumi.set(self, "machine_type", value)
@property
@pulumi.getter(name="acceleratorConfig")
def accelerator_config(self) -> Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs']]:
"""
The Compute Engine accelerator configuration for this runtime.
Structure is documented below.
"""
return pulumi.get(self, "accelerator_config")
@accelerator_config.setter
def accelerator_config(self, value: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs']]):
pulumi.set(self, "accelerator_config", value)
@property
@pulumi.getter(name="containerImages")
def container_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs']]]]:
"""
Use a list of container images to start the notebook instance.
Structure is documented below.
"""
return pulumi.get(self, "container_images")
@container_images.setter
def container_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs']]]]):
pulumi.set(self, "container_images", value)
@property
@pulumi.getter(name="encryptionConfig")
def encryption_config(self) -> Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs']]:
"""
Encryption settings for virtual machine data disk.
Structure is documented below.
"""
return pulumi.get(self, "encryption_config")
@encryption_config.setter
def encryption_config(self, value: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs']]):
pulumi.set(self, "encryption_config", value)
@property
@pulumi.getter(name="guestAttributes")
def guest_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
-
The Compute Engine guest attributes. (see [Project and instance
guest attributes](https://cloud.google.com/compute/docs/
storing-retrieving-metadata#guest_attributes)).
"""
return pulumi.get(self, "guest_attributes")
@guest_attributes.setter
def guest_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "guest_attributes", value)
@property
@pulumi.getter(name="internalIpOnly")
def internal_ip_only(self) -> Optional[pulumi.Input[bool]]:
"""
If true, runtime will only have internal IP addresses. By default,
runtimes are not restricted to internal IP addresses, and will
have ephemeral external IP addresses assigned to each vm. This
`internal_ip_only` restriction can only be enabled for subnetwork
enabled networks, and all dependencies must be configured to be
accessible without external IP addresses.
"""
return pulumi.get(self, "internal_ip_only")
@internal_ip_only.setter
def internal_ip_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "internal_ip_only", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The Compute Engine metadata entries to add to virtual machine.
(see [Project and instance metadata](https://cloud.google.com
/compute/docs/storing-retrieving-metadata#project_and_instance
_metadata)).
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The Compute Engine network to be used for machine communications.
Cannot be specified with subnetwork. If neither `network` nor
`subnet` is specified, the "default" network of the project is
used, if it exists. A full URL or partial URI. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/global/default`
* `projects/[project_id]/regions/global/default`
Runtimes are managed resources inside Google Infrastructure.
Runtimes support the following network configurations:
* Google Managed Network (Network & subnet are empty)
* Consumer Project VPC (network & subnet are required). Requires
configuring Private Service Access.
* Shared VPC (network & subnet are required). Requires
configuring Private Service Access.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="nicType")
def nic_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of vNIC to be used on this interface. This may be gVNIC
or VirtioNet.
Possible values are `UNSPECIFIED_NIC_TYPE`, `VIRTIO_NET`, and `GVNIC`.
"""
return pulumi.get(self, "nic_type")
@nic_type.setter
def nic_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nic_type", value)
@property
@pulumi.getter(name="shieldedInstanceConfig")
def shielded_instance_config(self) -> Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs']]:
"""
Shielded VM Instance configuration settings.
Structure is documented below.
"""
return pulumi.get(self, "shielded_instance_config")
@shielded_instance_config.setter
def shielded_instance_config(self, value: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs']]):
pulumi.set(self, "shielded_instance_config", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input[str]]:
"""
The Compute Engine subnetwork to be used for machine
communications. Cannot be specified with network. A full URL or
partial URI are valid. Examples:
* `https://www.googleapis.com/compute/v1/projects/[project_id]/
regions/us-east1/subnetworks/sub0`
* `projects/[project_id]/regions/us-east1/subnetworks/sub0`
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The Compute Engine tags to add to runtime (see [Tagging instances]
(https://cloud.google.com/compute/docs/
label-or-tag-resources#tags)).
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
-
The zone where the virtual machine is located.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigArgs:
def __init__(__self__, *,
core_count: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] core_count: Count of cores of this accelerator.
:param pulumi.Input[str] type: Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
if core_count is not None:
pulumi.set(__self__, "core_count", core_count)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> Optional[pulumi.Input[int]]:
"""
Count of cores of this accelerator.
"""
return pulumi.get(self, "core_count")
@core_count.setter
def core_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "core_count", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigDataDiskArgs:
def __init__(__self__, *,
auto_delete: Optional[pulumi.Input[bool]] = None,
boot: Optional[pulumi.Input[bool]] = None,
device_name: Optional[pulumi.Input[str]] = None,
guest_os_features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
index: Optional[pulumi.Input[int]] = None,
initialize_params: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs']] = None,
interface: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
licenses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mode: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] auto_delete: -
Optional. Specifies whether the disk will be auto-deleted
when the instance is deleted (but not when the disk is
detached from the instance).
:param pulumi.Input[bool] boot: -
Optional. Indicates that this is a boot disk. The virtual
machine will use the first partition of the disk for its
root filesystem.
:param pulumi.Input[str] device_name: -
Optional. Specifies a unique device name of your choice
that is reflected into the /dev/disk/by-id/google-* tree
of a Linux operating system running within the instance.
This name can be used to reference the device for mounting,
resizing, and so on, from within the instance.
If not specified, the server chooses a default device name
to apply to this disk, in the form persistent-disk-x, where
x is a number assigned by Google Compute Engine. This field
is only applicable for persistent disks.
:param pulumi.Input[Sequence[pulumi.Input[str]]] guest_os_features: -
Indicates a list of features to enable on the guest operating
system. Applicable only for bootable images. To see a list of
available features, read `https://cloud.google.com/compute/docs/
images/create-delete-deprecate-private-images#guest-os-features`
options. ``
:param pulumi.Input[int] index: -
Output only. A zero-based index to this disk, where 0 is
reserved for the boot disk. If you have many disks attached
to an instance, each disk would have a unique index number.
:param pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs'] initialize_params: Input only. Specifies the parameters for a new disk that will
be created alongside the new instance. Use initialization
parameters to create boot disks or local SSDs attached to the
new instance. This property is mutually exclusive with the
source property; you can only define one or the other, but not
both.
Structure is documented below.
:param pulumi.Input[str] interface: "Specifies the disk interface to use for attaching this disk,
which is either SCSI or NVME. The default is SCSI. Persistent
disks must always use SCSI and the request will fail if you attempt
to attach a persistent disk in any other format than SCSI. Local SSDs
can use either NVME or SCSI. For performance characteristics of SCSI
over NVMe, see Local SSD performance. Valid values: * NVME * SCSI".
:param pulumi.Input[str] kind: -
Type of the resource. Always compute#attachedDisk for attached
disks.
:param pulumi.Input[Sequence[pulumi.Input[str]]] licenses: -
Output only. Any valid publicly visible licenses.
:param pulumi.Input[str] mode: The mode in which to attach this disk, either READ_WRITE
or READ_ONLY. If not specified, the default is to attach
the disk in READ_WRITE mode.
:param pulumi.Input[str] source: Specifies a valid partial or full URL to an existing
Persistent Disk resource.
:param pulumi.Input[str] type: Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
if auto_delete is not None:
pulumi.set(__self__, "auto_delete", auto_delete)
if boot is not None:
pulumi.set(__self__, "boot", boot)
if device_name is not None:
pulumi.set(__self__, "device_name", device_name)
if guest_os_features is not None:
pulumi.set(__self__, "guest_os_features", guest_os_features)
if index is not None:
pulumi.set(__self__, "index", index)
if initialize_params is not None:
pulumi.set(__self__, "initialize_params", initialize_params)
if interface is not None:
pulumi.set(__self__, "interface", interface)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if licenses is not None:
pulumi.set(__self__, "licenses", licenses)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if source is not None:
pulumi.set(__self__, "source", source)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoDelete")
def auto_delete(self) -> Optional[pulumi.Input[bool]]:
"""
-
Optional. Specifies whether the disk will be auto-deleted
when the instance is deleted (but not when the disk is
detached from the instance).
"""
return pulumi.get(self, "auto_delete")
@auto_delete.setter
def auto_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_delete", value)
@property
@pulumi.getter
def boot(self) -> Optional[pulumi.Input[bool]]:
"""
-
Optional. Indicates that this is a boot disk. The virtual
machine will use the first partition of the disk for its
root filesystem.
"""
return pulumi.get(self, "boot")
@boot.setter
def boot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "boot", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> Optional[pulumi.Input[str]]:
"""
-
Optional. Specifies a unique device name of your choice
that is reflected into the /dev/disk/by-id/google-* tree
of a Linux operating system running within the instance.
This name can be used to reference the device for mounting,
resizing, and so on, from within the instance.
If not specified, the server chooses a default device name
to apply to this disk, in the form persistent-disk-x, where
x is a number assigned by Google Compute Engine. This field
is only applicable for persistent disks.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="guestOsFeatures")
def guest_os_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
-
Indicates a list of features to enable on the guest operating
system. Applicable only for bootable images. To see a list of
available features, read `https://cloud.google.com/compute/docs/
images/create-delete-deprecate-private-images#guest-os-features`
options. ``
"""
return pulumi.get(self, "guest_os_features")
@guest_os_features.setter
def guest_os_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "guest_os_features", value)
@property
@pulumi.getter
def index(self) -> Optional[pulumi.Input[int]]:
"""
-
Output only. A zero-based index to this disk, where 0 is
reserved for the boot disk. If you have many disks attached
to an instance, each disk would have a unique index number.
"""
return pulumi.get(self, "index")
@index.setter
def index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "index", value)
@property
@pulumi.getter(name="initializeParams")
def initialize_params(self) -> Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs']]:
"""
Input only. Specifies the parameters for a new disk that will
be created alongside the new instance. Use initialization
parameters to create boot disks or local SSDs attached to the
new instance. This property is mutually exclusive with the
source property; you can only define one or the other, but not
both.
Structure is documented below.
"""
return pulumi.get(self, "initialize_params")
@initialize_params.setter
def initialize_params(self, value: Optional[pulumi.Input['RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs']]):
pulumi.set(self, "initialize_params", value)
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input[str]]:
"""
"Specifies the disk interface to use for attaching this disk,
which is either SCSI or NVME. The default is SCSI. Persistent
disks must always use SCSI and the request will fail if you attempt
to attach a persistent disk in any other format than SCSI. Local SSDs
can use either NVME or SCSI. For performance characteristics of SCSI
over NVMe, see Local SSD performance. Valid values: * NVME * SCSI".
"""
return pulumi.get(self, "interface")
@interface.setter
def interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
-
Type of the resource. Always compute#attachedDisk for attached
disks.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def licenses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
-
Output only. Any valid publicly visible licenses.
"""
return pulumi.get(self, "licenses")
@licenses.setter
def licenses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "licenses", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
The mode in which to attach this disk, either READ_WRITE
or READ_ONLY. If not specified, the default is to attach
the disk in READ_WRITE mode.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a valid partial or full URL to an existing
Persistent Disk resource.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Accelerator model. For valid values, see
`https://cloud.google.com/vertex-ai/docs/workbench/reference/
rest/v1/projects.locations.runtimes#AcceleratorType`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
disk_name: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
disk_type: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] description: Provide this property when creating the disk.
:param pulumi.Input[str] disk_name: Specifies the disk name. If not specified, the default is
to use the name of the instance. If the disk with the
instance name exists already in the given zone/region, a
new name will be automatically generated.
:param pulumi.Input[int] disk_size_gb: Specifies the size of the disk in base-2 GB. If not
specified, the disk will be the same size as the image
(usually 10GB). If specified, the size must be equal to
or larger than 10GB. Default 100 GB.
:param pulumi.Input[str] disk_type: The type of the boot disk attached to this runtime,
defaults to standard persistent disk. For valid values,
see `https://cloud.google.com/vertex-ai/docs/workbench/
reference/rest/v1/projects.locations.runtimes#disktype`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if disk_name is not None:
pulumi.set(__self__, "disk_name", disk_name)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if disk_type is not None:
pulumi.set(__self__, "disk_type", disk_type)
if labels is not None:
pulumi.set(__self__, "labels", labels)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Provide this property when creating the disk.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the disk name. If not specified, the default is
to use the name of the instance. If the disk with the
instance name exists already in the given zone/region, a
new name will be automatically generated.
"""
return pulumi.get(self, "disk_name")
@disk_name.setter
def disk_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_name", value)
@property
@pulumi.getter(name="diskSizeGb")
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the size of the disk in base-2 GB. If not
specified, the disk will be the same size as the image
(usually 10GB). If specified, the size must be equal to
or larger than 10GB. Default 100 GB.
"""
return pulumi.get(self, "disk_size_gb")
@disk_size_gb.setter
def disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size_gb", value)
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the boot disk attached to this runtime,
defaults to standard persistent disk. For valid values,
see `https://cloud.google.com/vertex-ai/docs/workbench/
reference/rest/v1/projects.locations.runtimes#disktype`
"""
return pulumi.get(self, "disk_type")
@disk_type.setter
def disk_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_type", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels to apply to this disk. These can be later modified
by the disks.setLabels method. This field is only
applicable for persistent disks.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigEncryptionConfigArgs:
def __init__(__self__, *,
kms_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] kms_key: The Cloud KMS resource identifier of the customer-managed
encryption key used to protect a resource, such as a disks.
It has the following format:
`projects/{PROJECT_ID}/locations/{REGION}/keyRings/
{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
"""
if kms_key is not None:
pulumi.set(__self__, "kms_key", kms_key)
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> Optional[pulumi.Input[str]]:
"""
The Cloud KMS resource identifier of the customer-managed
encryption key used to protect a resource, such as a disks.
It has the following format:
`projects/{PROJECT_ID}/locations/{REGION}/keyRings/
{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
"""
return pulumi.get(self, "kms_key")
@kms_key.setter
def kms_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key", value)
@pulumi.input_type
class RuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigArgs:
def __init__(__self__, *,
enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,
enable_secure_boot: Optional[pulumi.Input[bool]] = None,
enable_vtpm: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled.
Enables monitoring and attestation of the boot integrity of
the instance. The attestation is performed against the
integrity policy baseline. This baseline is initially derived
from the implicitly trusted boot image when the instance is
created. Enabled by default.
:param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled.Secure
Boot helps ensure that the system only runs authentic software
by verifying the digital signature of all boot components, and
halting the boot process if signature verification fails.
Disabled by default.
:param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. Enabled by
default.
"""
if enable_integrity_monitoring is not None:
pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring)
if enable_secure_boot is not None:
pulumi.set(__self__, "enable_secure_boot", enable_secure_boot)
if enable_vtpm is not None:
pulumi.set(__self__, "enable_vtpm", enable_vtpm)
@property
@pulumi.getter(name="enableIntegrityMonitoring")
def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has integrity monitoring enabled.
Enables monitoring and attestation of the boot integrity of
the instance. The attestation is performed against the
integrity policy baseline. This baseline is initially derived
from the implicitly trusted boot image when the instance is
created. Enabled by default.
"""
return pulumi.get(self, "enable_integrity_monitoring")
@enable_integrity_monitoring.setter
def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_integrity_monitoring", value)
@property
@pulumi.getter(name="enableSecureBoot")
def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has Secure Boot enabled.Secure
Boot helps ensure that the system only runs authentic software
by verifying the digital signature of all boot components, and
halting the boot process if signature verification fails.
Disabled by default.
"""
return pulumi.get(self, "enable_secure_boot")
@enable_secure_boot.setter
def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_secure_boot", value)
@property
@pulumi.getter(name="enableVtpm")
def enable_vtpm(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has the vTPM enabled. Enabled by
default.
"""
return pulumi.get(self, "enable_vtpm")
@enable_vtpm.setter
def enable_vtpm(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_vtpm", value)
| 42.146991
| 282
| 0.656048
|
1d2abb331ee3c23c25e5730632aed1cbfa38cae8
| 2,797
|
py
|
Python
|
agents/utils/mcts.py
|
huiwy/UNO-Agents
|
e6ede8e66309beb3eae7848cdfed9dc0b6f89d09
|
[
"MIT"
] | 1
|
2021-11-17T05:33:22.000Z
|
2021-11-17T05:33:22.000Z
|
agents/utils/mcts.py
|
huiwy/UNO-Agents
|
e6ede8e66309beb3eae7848cdfed9dc0b6f89d09
|
[
"MIT"
] | null | null | null |
agents/utils/mcts.py
|
huiwy/UNO-Agents
|
e6ede8e66309beb3eae7848cdfed9dc0b6f89d09
|
[
"MIT"
] | null | null | null |
from numpy.lib.function_base import place
from utils.constants import TUPLEACT2INT, INTACT2TUPLE
from game import *
from agents.greedyAgent import GreedyAgent
from copy import deepcopy
import numpy as np
import math
c = 5
class MCTSNode:
def __init__(self, a, parent, id):
self.action = a
self.visited_times = 0
self.value = 0
self.parent = parent
self.id = id
self.children = []
if a == 60:
self.value = -10
self.visited_times = 5
def UCBscore(self, t):
if self.visited_times == 0:
return float("inf")
return self.value + c*(math.log(t) / self.visited_times)**0.5
def select(self, game, t):
valid_actions = game.get_valid_actions()
valid_actions = set((TUPLEACT2INT[(a, i)] for i in range(4) for a in valid_actions))
valid_children = [self.children[a] for a in valid_actions]
selected = max(valid_children, key=lambda x: x.UCBscore(t))
reward = till_next_turn(game, INTACT2TUPLE[selected.action], self.id)
if reward == 1:
self.backprop(1)
elif reward == -1:
self.backprop(-1)
if len(selected.children) == 0:
selected.expand(game)
else:
selected.select(game, t)
def expand(self, game):
if len(self.children) == 0:
for i in range(61):
self.children.append(MCTSNode(i, self, self.id))
game.agents[self.id] = GreedyAgent()
result = (play(game) == self.id)
result = result*2-1
self.backprop(result)
def backprop(self, result):
self.visited_times += 1
self.update(result)
if self.parent == None:
return
self.parent.backprop(result)
def update(self, value):
self.value = self.value + (value - self.value) / (self.visited_times)
class MCTS:
def __init__(self, game, id):
self.game = deepcopy(game)
self.root = MCTSNode(game, None, id)
self.root.expand(deepcopy(game))
valid_actions = game.get_valid_actions()
valid_actions = set((TUPLEACT2INT[(a, i)] for i in range(4) for a in valid_actions))
valid_children = [self.root.children[a] for a in valid_actions]
for child in self.root.children:
if not child in valid_children:
child.value = float("-inf")
def search(self, iterations):
for t in range(1, iterations+1):
shuffle(self.game.deck)
self.root.select(deepcopy(self.game), t)
return np.array([s.value for s in self.root.children])
def till_next_turn(game, action, id):
if not game.apply_action(action):
return 0
if game.current_win():
return 1
game.next_player()
game.penalize()
while game.current_player != id:
while True:
action = game.get_action()
if game.apply_action(action):
break
if game.current_win():
return -1
game.next_player()
game.penalize()
| 25.898148
| 88
| 0.6532
|
fefebf5ede7d4bed157bd17b2ec69d683ed0e41c
| 362
|
py
|
Python
|
maker/users/apps.py
|
H7777777/maker
|
2c84d7bf13eb330ab62ad281bc5228fe0c56e0f6
|
[
"MIT"
] | null | null | null |
maker/users/apps.py
|
H7777777/maker
|
2c84d7bf13eb330ab62ad281bc5228fe0c56e0f6
|
[
"MIT"
] | null | null | null |
maker/users/apps.py
|
H7777777/maker
|
2c84d7bf13eb330ab62ad281bc5228fe0c56e0f6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "maker.users"
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
try:
import users.signals # noqa F401
except ImportError:
pass
| 21.294118
| 45
| 0.569061
|
7f65014d070112fb39f9c1d529cc3f117dc7be7c
| 4,691
|
py
|
Python
|
examples/radius_of_gyration.py
|
ASVG/NeuroM
|
77a4e1a4d33acc09f0a25d7c42d3f5f01807ba6c
|
[
"BSD-3-Clause"
] | 1
|
2016-10-25T09:23:16.000Z
|
2016-10-25T09:23:16.000Z
|
examples/radius_of_gyration.py
|
ASVG/NeuroM
|
77a4e1a4d33acc09f0a25d7c42d3f5f01807ba6c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-16T20:53:48.000Z
|
2021-02-16T20:53:48.000Z
|
examples/radius_of_gyration.py
|
ASVG/NeuroM
|
77a4e1a4d33acc09f0a25d7c42d3f5f01807ba6c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-16T20:50:02.000Z
|
2021-02-16T20:50:02.000Z
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Calculate radius of gyration of neurites."""
import neurom as nm
from neurom import morphmath as mm
from neurom.core.dataformat import COLS
import numpy as np
def segment_centre_of_mass(seg):
"""Calculate and return centre of mass of a segment.
C, seg_volalculated as centre of mass of conical frustum"""
h = mm.segment_length(seg)
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1
denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1)
centre_of_mass_z_loc = num / denom
return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
def neurite_centre_of_mass(neurite):
"""Calculate and return centre of mass of a neurite."""
centre_of_mass = np.zeros(3)
total_volume = 0
seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite)))
seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite)))
# multiply array of scalars with array of arrays
# http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors
seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis]
centre_of_mass = np.sum(seg_centre_of_mass, axis=0)
total_volume = np.sum(seg_vol)
return centre_of_mass / total_volume
def distance_sqr(point, seg):
"""Calculate and return square Euclidian distance from given point to
centre of mass of given segment."""
centre_of_mass = segment_centre_of_mass(seg)
return sum(pow(np.subtract(point, centre_of_mass), 2))
def radius_of_gyration(neurite):
"""Calculate and return radius of gyration of a given neurite."""
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N)
def mean_rad_of_gyration(neurites):
"""Calculate mean radius of gyration for set of neurites."""
return np.mean([radius_of_gyration(n) for n in neurites])
if __name__ == '__main__':
# load a neuron from an SWC file
filename = 'test_data/swc/Neuron.swc'
nrn = nm.load_neuron(filename)
# for every neurite, print (number of segments, radius of gyration, neurite type)
print([(sum(len(s.points) - 1 for s in nrte.iter_sections()),
radius_of_gyration(nrte), nrte.type) for nrte in nrn.neurites])
# print mean radius of gyration per neurite type
print('Mean radius of gyration for axons: ',
mean_rad_of_gyration(n for n in nrn.neurites if n.type == nm.AXON))
print('Mean radius of gyration for basal dendrites: ',
mean_rad_of_gyration(n for n in nrn.neurites if n.type == nm.BASAL_DENDRITE))
print('Mean radius of gyration for apical dendrites: ',
mean_rad_of_gyration(n for n in nrn.neurites
if n.type == nm.APICAL_DENDRITE))
| 43.841121
| 100
| 0.721595
|
8f84e6bdfde7acb2e82c0a9ac7f5f39f997dc112
| 5,566
|
py
|
Python
|
opentelekom/tests/unit/vpc/v1/test_vpc.py
|
tsdicloud/python-opentelekom-sdk
|
809f3796dba48ad0535990caf7519bb9afa71d2d
|
[
"Apache-2.0"
] | null | null | null |
opentelekom/tests/unit/vpc/v1/test_vpc.py
|
tsdicloud/python-opentelekom-sdk
|
809f3796dba48ad0535990caf7519bb9afa71d2d
|
[
"Apache-2.0"
] | null | null | null |
opentelekom/tests/unit/vpc/v1/test_vpc.py
|
tsdicloud/python-opentelekom-sdk
|
809f3796dba48ad0535990caf7519bb9afa71d2d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import requests
from unittest import mock
from openstack import exceptions
from opentelekom.vpc.vpc_service import VpcService
from opentelekom.tests.functional import base
from opentelekom.tests.unit.vpc.v1 import fixture_vpc
from opentelekom.tests.unit.otc_mockservice import OtcMockService, OtcMockResponse
class TestVpc(base.BaseFunctionalTest):
def setUp(self):
super().setUp()
self.prefix = "rbe-sdkunit-vpc"
self.vpcFixture = self.useFixture(fixture_vpc.VpcFixture(self.user_cloud))
self.vpcFixture.createTestVpc(self.prefix)
class MockVpcList(OtcMockService):
responses = [
OtcMockResponse(method="GET",
url_match="vpc",
path="/v1/0391e4486e864c26be5654c522f440f2/vpcs",
status_code=200,
json={"vpcs":[{"id":"7f4d8a07-df6c-4c86-919f-4fa201463d65","name":"rbe-sdkunit-vpc-vpc","cidr":"10.248.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"8865cc93-36d5-410e-9865-57333f370e53","name":"vpc-poc-admin","cidr":"10.19.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"aa188bb4-465e-4b35-9d12-72d8ecfe7d1c","name":"vpc-poc-admin5","cidr":"10.13.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"dd586f4d-2490-450f-9afe-77f19e44c490","name":"rbe-vpc-profidata-test","cidr":"172.16.0.0/12","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"}]}
)
]
@mock.patch.object(requests.Session, "request", side_effect=MockVpcList().request)
def test_list_vpcs(self, mock):
vpcs = list(self.user_cloud.vpc.vpcs())
self.assertGreater(len(vpcs), 0)
vpcfound = list(filter(lambda x: x['name'] == self.prefix + "-vpc", vpcs ))
self.assertEqual(len(vpcfound), 1)
class MockVpcFind(OtcMockService):
responses = [
# detect name or id type by trying value as id first
OtcMockResponse(method="GET",
url_match="vpc",
path="/v1/0391e4486e864c26be5654c522f440f2/vpcs/rbe-sdkunit-vpc-vpc",
status_code=400,
json={"code":"VPC.0101","message":"getVpc error vpcId is invalid."}),
# name handling is done by search in the list query
OtcMockResponse(method="GET",
url_match="vpc",
path="/v1/0391e4486e864c26be5654c522f440f2/vpcs",
status_code=200,
json={"vpcs":[{"id":"7f4d8a07-df6c-4c86-919f-4fa201463d65","name":"rbe-sdkunit-vpc-vpc","cidr":"10.248.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"8865cc93-36d5-410e-9865-57333f370e53","name":"vpc-poc-admin","cidr":"10.19.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"aa188bb4-465e-4b35-9d12-72d8ecfe7d1c","name":"vpc-poc-admin5","cidr":"10.13.0.0/16","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"},
{"id":"dd586f4d-2490-450f-9afe-77f19e44c490","name":"rbe-vpc-profidata-test","cidr":"172.16.0.0/12","status":"OK","routes":[],"enable_shared_snat":False,"enterprise_project_id":"0"}]}
)
]
@mock.patch.object(requests.Session, "request", side_effect=MockVpcFind().request)
def test_find_by_name(self, mock):
#self.MockVpcFind.assertServicesCalled()
vpcfound2 = self.user_cloud.vpc.find_vpc(self.prefix + "-vpc")
self.assertFalse(vpcfound2 is None)
self.assertEqual(vpcfound2.id, self.vpcFixture.vpc.id)
class MockVpcUpdate(OtcMockService):
responses = [
# update
OtcMockResponse(method="PUT",
url_match="vpc",
path="/v1/0391e4486e864c26be5654c522f440f2/vpcs/7f4d8a07-df6c-4c86-919f-4fa201463d65",
status_code=200,
json={"vpc":{"id":"7f4d8a07-df6c-4c86-919f-4fa201463d65","name":"rbe-sdktest-vpc-vpc","cidr":"10.248.0.0/16","status":"OK","routes":[],"enable_shared_snat": True,"enterprise_project_id":"0"}}),
]
@mock.patch.object(requests.Session, "request", side_effect=MockVpcUpdate().request)
def test_update(self, mock):
vpc = self.user_cloud.vpc.update_vpc(self.vpcFixture.vpc.id, enable_shared_snat=True)
self.assertTrue(vpc)
self.assertEqual(vpc.id, self.vpcFixture.vpc.id)
self.assertEqual(vpc.enable_shared_snat, True)
def tearDown(self):
super().tearDown()
| 52.018692
| 221
| 0.617499
|
8ba8b37864064186ea0c618f41e3fa912ef2c8f8
| 9,948
|
py
|
Python
|
docs/conf.py
|
dlshriver/intercepts
|
e17768129ac8a8781c2db0a12756ba308ed655aa
|
[
"MIT"
] | 12
|
2018-04-22T01:54:47.000Z
|
2022-01-22T04:52:29.000Z
|
docs/conf.py
|
dlshriver/intercepts
|
e17768129ac8a8781c2db0a12756ba308ed655aa
|
[
"MIT"
] | 3
|
2019-10-28T08:09:08.000Z
|
2020-03-10T10:07:20.000Z
|
docs/conf.py
|
dlshriver/intercepts
|
e17768129ac8a8781c2db0a12756ba308ed655aa
|
[
"MIT"
] | 1
|
2020-01-06T04:24:00.000Z
|
2020-01-06T04:24:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# intercepts documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 15 21:18:44 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath(".."))
import intercepts
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"fmttypehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "intercepts"
copyright = "2019, David Shriver"
author = "David Shriver"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = intercepts.__version__
# The full version, including alpha/beta/rc tags.
release = intercepts.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Intercept any call in Python",
"github_user": "dlshriver",
"github_repo": "intercepts",
"github_button": True,
"travis_button": True,
"fixed_sidebar": True
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "interceptsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"intercepts.tex",
"intercepts Documentation",
"David Shriver",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "intercepts", "intercepts Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"intercepts",
"intercepts Documentation",
author,
"intercepts",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| 31.987138
| 81
| 0.70577
|
da0c23f4a9e992b0e3db644f087f15679a2ab37a
| 3,589
|
py
|
Python
|
extras/pipPublish.py
|
fire17/xo-gd
|
30259e6502d91b9462931ee4a2bd019ba15d5b73
|
[
"BSD-3-Clause"
] | 1
|
2021-12-11T20:40:49.000Z
|
2021-12-11T20:40:49.000Z
|
extras/pipPublish.py
|
fire17/xo-gd
|
30259e6502d91b9462931ee4a2bd019ba15d5b73
|
[
"BSD-3-Clause"
] | null | null | null |
extras/pipPublish.py
|
fire17/xo-gd
|
30259e6502d91b9462931ee4a2bd019ba15d5b73
|
[
"BSD-3-Clause"
] | null | null | null |
# Clean Structure for Pip Publish
'''
0 : Preperations:
a. open an account over on PyPi: https://pypi.org/account/register/
b. open an account over on TestPyPi: https://test.pypi.org/account/register/
c. open a repo
d. have correct ~.pypirc file
'''
'''
### XObject + GlobalData - Version 3.2.8 - Startlight✨
# _xObject + Global Data_ <br>Use Python Like You Never Have Before ! <br>
### _Easy Acces To:_
## 🔔 Events and Triggers <br> 💛 Realtime MultiProcessing <br> 🏃 Instant Dynamic DB <br> 📁 Filesytem & Web Watchdog <br> 🌐 Sockets, API Server <br> ⚡ Supports Fast Prototyping To Large Scale Systems <br><br>
'''
# COPY THIS:
'''
#####################################################################
''' '''
[distutils] # this tells distutils what package indexes you can push to
index-servers = pypitest
[pypi]
repository: https://upload.pypi.org/legacy/
username: xxx
password: xxx
[pypitest]
repository: https://test.pypi.org/legacy/
username: xxx
password: xxx
''' '''
############################################################
''' '''
######################################################
######################################################
######################################################
##################### Update package #################
######################################################
######################################################
######################################################
######################################################
Commit
python3.7 setup.py sdist
Commit
git tag 3.2.8 -m 'Subscribe to file/folder, mqtt mpub clean, reqs updated'
git push
git push --tags
twine upload -r pypitest dist/* && twine upload dist/*
######################################################
######################################################
######################################################
######################################################
######################################################
######################################################
######################################################
######################################################
python setup.py sdist
twine upload -r pypitest dist/*
twine upload dist/*
######################################################
git add .
git commit -m 'first commit'
git push
git tag {{version}} -m 'adds the version you entered in cookiecutter as the first tag for release'
git push --tags origin master
######################################################
1.
cd to the folder that hosts the project
2.
pip install cookiecutter
cookiecutter https://github.com/wdm0006/cookiecutter-pipproject.git
3.
cd xo #or project_name
4. #git init - skip if merge with existing
git init
git add -A
git commit -m 'Use Python Like You Never Have Before, Easy Acces To: Events and Triggers, Realtime MultiProcessing, Instant Dynamic DB, Filesytem & Web Watchdog, Sockets, API Server, Supports Fast Prototyping To Large Scale Systems'
git remote add origin https://github.com:fire17/xo-gd.git
git push -u origin master
git tag 3.1.5.02 -m 'GlobalData and XObject - Version 3 - Starlight'
git push --tags origin master
5.
cd ~
touch .pypirc
nano .pypirc
6. copy from below
7. (add snippet from below)
8.
cd -
python setup.py register -r pypitest
python setup.py sdist upload -r pypitest
xo versions
1 - wish - channels, triggers, multiprocessing
2 - rubicon - expando, selfNamed
3 - starlight - generic, env aware, sockets, api
4 - sunrise - os assistant, keyboard, mouse, easy vis, ai-kite
5 - agi
'''
| 32.627273
| 232
| 0.492059
|
5285f9a3e46cff603768dcd0d7ea0f1740d1efce
| 2,276
|
py
|
Python
|
player.py
|
porlov/poker-player-always-all-in
|
3b02fa6af040cfa65de5ade6a1db3b14c133daf9
|
[
"MIT"
] | null | null | null |
player.py
|
porlov/poker-player-always-all-in
|
3b02fa6af040cfa65de5ade6a1db3b14c133daf9
|
[
"MIT"
] | 2
|
2018-02-04T06:56:08.000Z
|
2018-02-04T09:56:21.000Z
|
player.py
|
porlov/poker-player-always-all-in
|
3b02fa6af040cfa65de5ade6a1db3b14c133daf9
|
[
"MIT"
] | null | null | null |
import json
import requests
from card_converter import card_converter
from shouldBet import shouldBet
from strategy import Strategy
from post_flop import getPostFlopBet
from pre_flop import getPreFlopBet
class Player:
VERSION = "Version_22"
def get_player(self):
player_index = self.game_state['in_action']
return self.game_state['players'][player_index]
def get_post_flop_rank(self):
cards = self.game_state['community_cards'] + self.player['hole_cards']
print 'POST FLOP CARDS: ', cards
payload = json.dumps(cards)
response = requests.get('http://rainman.leanpoker.org/rank', params='cards=%s' % payload).json()
rank = response['rank']
print 'RANK: ', rank
return rank
def log_state(self):
print 'STATE: ', self.game_state
print 'STACK: ', self.player['stack']
def betRequest(self, game_state):
self.game_state = game_state
self.player = self.get_player()
self.log_state()
cardsOnFlop = len(game_state['community_cards'])
isPostFlop = cardsOnFlop > 0
if not isPostFlop:
strategy = Strategy(player=self.player, game_state=self.game_state)
bet = strategy.get_bet()
if bet > 0:
return bet
probability = strategy.player_hand.get_probability(2)
return getPreFlopBet(probability, self.game_state, self.player)
# post flop
if isPostFlop:
rank = self.get_post_flop_rank()
if rank >= 2:
return min(self.player['stack'], max(self.game_state['pot'], self.player['stack'] / 8))
if rank == 0 and cardsOnFlop >= 5:
return 0
return getPostFlopBet(game_state, self.player)
return 0
# bet = self.get_bet_v1(self.player)
print 'BET: ', bet
def showdown(self, game_state):
pass
def get_bet_v1(self):
my_stack = self.player['stack']
my_cards = self.player['hole_cards']
converted_cards = card_converter(my_cards)
is_betting = shouldBet(converted_cards)
if not is_betting:
return 0
return my_stack
| 28.810127
| 104
| 0.605888
|
68f446522cff91a10b4ef65f35670c891fd47f0e
| 53,473
|
py
|
Python
|
ocs_ci/ocs/resources/pod.py
|
romayalon/ocs-ci
|
b40428cae0f0766ffb0c2441041744821562c8b5
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/resources/pod.py
|
romayalon/ocs-ci
|
b40428cae0f0766ffb0c2441041744821562c8b5
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/resources/pod.py
|
romayalon/ocs-ci
|
b40428cae0f0766ffb0c2441041744821562c8b5
|
[
"MIT"
] | null | null | null |
"""
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from ocs_ci.helpers import helpers
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed,
NonUpgradedImagesFoundError,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableResourceException,
)
from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.utils import run_cmd, check_timeout_reached, TimeoutSampler
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = "/var/lib/www/html/test"
FEDORA_TEST_FILE = "/mnt/test"
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode="w+", prefix="POD_", delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get("metadata").get("name")
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION,
kind=constants.POD,
namespace=self.namespace,
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get("status").get("containerStatuses")[0].get("restartCount")
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self, timeout=FIO_TIMEOUT):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(timeout)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_s3_cmd_on_pod(self, command, mcg_obj=None):
"""
Execute an S3 command on a pod
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
command (str): The command to execute on the given pod
Returns:
Munch Obj: This object represents a returned yaml file
"""
return self.exec_cmd_on_pod(
craft_s3_command(command, mcg_obj),
out_yaml_format=False,
secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
if mcg_obj
else None,
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get("metadata").get("labels")
def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if "rook-ceph-tools" not in self.labels.values():
raise CommandFailed("Ceph commands can be executed only on toolbox pod")
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type="fs"):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == "block":
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeDevices")[0]
.get("devicePath")
)
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = "fio"
name = f"test_workload_{work_load}"
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self,
storage_type,
size,
io_direction="rw",
rw_ratio=75,
jobs=1,
runtime=60,
depth=4,
rate="1m",
rate_process="poisson",
fio_filename=None,
bs="4K",
end_fsync=0,
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
end_fsync (int): If 1, fio will sync file contents when a write
stage has completed. Fio default is 0
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == "rw":
self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML)
self.io_params["rwmixread"] = rw_ratio
else:
self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML)
self.io_params["runtime"] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.io_params["iodepth"] = depth
self.io_params["rate"] = rate
self.io_params["rate_process"] = rate_process
self.io_params["bs"] = bs
if end_fsync:
self.io_params["end_fsync"] = end_fsync
self.fio_thread = self.wl_obj.run(**self.io_params)
def fillup_fs(self, size, fio_filename=None):
"""
Execute FIO on a pod to fillup a file
This will run sequantial IO of 1MB block size to fill up the fill with data
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
size (str): Size in MB, e.g. '200M'
fio_filename(str): Name of fio file created on app pod's mount point
"""
if not self.wl_setup_done:
self.workload_setup(storage_type="fs", jobs=1)
self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML)
size = size if isinstance(size, str) else f"{size}M"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self):
"""
Execute git clone on a pod to simulate a Jenkins user
"""
name = "test_workload"
work_load = "jenkins"
wl = workload.WorkLoad(
name=name, work_load=work_load, pod=self, path=self.get_storage_path()
)
assert wl.setup(), "Setup up for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = " ".join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f'scp -i {authkey} -o "StrictHostKeyChecking no"'
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}'
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self):
"""
Get the pod memory size
Returns:
dict: The names of the pod's containers (str) as keys and their memory
size (str) as values
"""
containers = self.pod_data.get("spec").get("containers")
container_names_and_memory = {
container.get("name"): container.get("resources")
.get("limits")
.get("memory")
for container in containers
}
return container_names_and_memory
# Helper functions for Pods
def get_all_pods(
namespace=None,
selector=None,
selector_label="app",
exclude_selector=False,
wait=False,
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()["items"]
if selector:
if exclude_selector:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) not in selector
]
else:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if (
ocp_pod_obj.get_resource_status(pod.get("metadata").get("name"))
== constants.STATUS_RUNNING
):
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
selector = (
"app=csi-rbdplugin-provisioner"
if (interface == constants.CEPHBLOCKPOOL)
else "app=csi-cephfsplugin-provisioner"
)
provision_pod_items = ocp_pod_obj.get(selector=selector)["items"]
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name,
)
return provisioner_pod
def get_csi_snapshoter_pod():
"""
Get the csi snapshot controller pod
Returns:
Pod object: csi snapshot controller pod
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace="openshift-cluster-storage-operator"
)
selector = "app=csi-snapshot-controller"
snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"]
snapshotner_pod = Pod(**snapshotner_pod[0]).name
return snapshotner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name="rbd"):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json")
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"')
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get()
.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name, block=False):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
str: The md5sum of the file
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f'bash -c "md5sum {file_path}"', out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists"
current_md5sum = cal_md5sum(pod_obj, file_name, block)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, "Data corruption found"
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f'bash -c "let i=0; while true; do echo '
f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"',
timeout=2400,
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20,
sleep=1,
func=check_file_existence,
pod_obj=pod_obj,
file_path=test_file,
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin")
base64_output = base64.b64encode(out["key"].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get("items")
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get("items")
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(
pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False
):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(kind=constants.POD, namespace=namespace)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get("spec").get("nodeName")
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj["status"]["startTime"]
ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ")
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f"Pod {pod.name} is not respinned, the age of the pod is {start_time}"
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}"
)
actual_node = pod_obj.get().get("spec").get("nodeName")
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim")
if not pvc:
raise UnavailableResourceException
return pvc.get("claimName")
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA["cluster_namespace"]
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"):
"""
Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
leader_type (str): Parameter to check the lease. eg: 'snapshotter' to
select external-snapshotter leader holder
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
leader_types = {
"provisioner": namespace,
"snapshotter": f"external-snapshotter-leader-{namespace}",
"resizer": f"external-resizer-{namespace}",
"attacher": f"external-attacher-{namespace}",
}
if interface == constants.CEPHBLOCKPOOL:
lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml"
elif interface == constants.CEPHFILESYSTEM:
lease_cmd = (
f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml"
)
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
lease = ocp_obj.exec_oc_cmd(command=lease_cmd)
leader = lease.get("spec").get("holderIdentity").strip()
assert leader, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader}")
ocp_obj._resource_name = leader
leader_pod = Pod(**ocp_obj.get())
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
)
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
)
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
# Ignoring pods with "app=rook-ceph-detect-version" app label
all_pod_obj = [
pod
for pod in all_pod_obj
if pod.get_labels()
and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels()
]
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj, state=state, timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if "rook-ceph-crashcollector" in pod_obj.name:
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pod_name = pod_obj.name
deployment_name = "-".join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA["cluster_namespace"]
pod = OCP(
kind=constants.POD,
namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split("=")
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: " f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}." f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get("name")
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get("items")
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name"))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get("name")
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"]
try:
for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [
pod.status() in status_options for pod in new_osd_pods
]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning("None of the new osd pods reached the desired status")
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS"))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
checks whether all the pods in a given namespace are in Running state or not
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if (
("rook-ceph-osd-prepare" not in p.name)
and ("rook-ceph-drain-canary" not in p.name)
and ("debug" not in p.name)
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if status not in "Running":
logging.error(
f"The pod {p.name} is in {status} state. Expected = Running"
)
ret_val = False
return ret_val
def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Checks the running state pods in a given namespace.
Returns:
List: all the pod objects that are in running state only
"""
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
running_pods_object = list()
for pod in list_of_pods:
status = ocp_pod_obj.get_resource(pod.name, "STATUS")
if "Running" in status:
running_pods_object.append(pod)
return running_pods_object
def wait_for_pods_to_be_running(timeout=200, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Wait for all the pods in a specific namespace to be running.
Args:
timeout (int): time to wait for pods to be running
namespace (str): the namespace ot the pods
Returns:
bool: True, if all pods in Running state. False, otherwise
"""
try:
for pods_running in TimeoutSampler(
timeout=timeout,
sleep=10,
func=check_pods_in_running_state,
namespace=namespace,
):
# Check if all the pods in running state
if pods_running:
logging.info("All the pods reached status running!")
return True
except TimeoutExpiredError:
logging.warning(
f"Not all the pods reached status running " f"after {timeout} seconds"
)
return False
def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
The function returns the list of nodes for the given selector
Args:
selector (str): The resource selector to search with
Returns:
list: a list of nodes that runs the given selector pods
"""
pod_obj_list = get_all_pods(namespace=namespace, selector=[selector])
pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list]
logger.info(f"{selector} running on nodes {pods_running_nodes}")
return list(set(pods_running_nodes))
def get_osd_removal_pod_name(osd_id, timeout=60):
"""
Get the osd removal pod name
Args:
osd_id (int): The osd's id to get the osd removal pod name
timeout (int): The time to wait for getting the osd removal pod name
Returns:
str: The osd removal pod name
"""
try:
for osd_removal_pod_names in TimeoutSampler(
timeout=timeout,
sleep=5,
func=get_pod_name_by_pattern,
pattern=f"ocs-osd-removal-{osd_id}",
):
if osd_removal_pod_names:
osd_removal_pod_name = osd_removal_pod_names[0]
logging.info(f"Found pod {osd_removal_pod_name}")
return osd_removal_pod_name
except TimeoutExpiredError:
logger.warning(f"Failed to get pod ocs-osd-removal-{osd_id}")
return None
| 32.270972
| 120
| 0.635536
|
a0997726912e790b303795cb5fa3a1edda6519f5
| 7,337
|
py
|
Python
|
workspace/CsvWrite/CsvWrite.py
|
rshinden/NAOTrainingRTC
|
f7f33ae8f0790d797cd4bdbda2c761c92ac42683
|
[
"BSD-3-Clause"
] | null | null | null |
workspace/CsvWrite/CsvWrite.py
|
rshinden/NAOTrainingRTC
|
f7f33ae8f0790d797cd4bdbda2c761c92ac42683
|
[
"BSD-3-Clause"
] | 3
|
2019-12-02T00:25:20.000Z
|
2019-12-10T04:48:13.000Z
|
workspace/CsvWrite/CsvWrite.py
|
rshinden/training_guide_RTC
|
f7f33ae8f0790d797cd4bdbda2c761c92ac42683
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
@file CsvWrite.py
@brief ModuleDescription
@date $Date$
"""
import sys
import time
import re
import csv
import datetime
import time
import math
pattern = re.compile('\d+\,\d+\,\d+')
sys.path.append(".")
# Import RTM module
import RTC
import OpenRTM_aist
# Import Service implementation class
# <rtc-template block="service_impl">
# </rtc-template>
# Import Service stub modules
# <rtc-template block="consumer_import">
# </rtc-template>
# This module's spesification
# <rtc-template block="module_spec">
CsvWrite_spec = ["implementation_id", "CsvWrite",
"type_name", "CsvWrite",
"description", "ModuleDescription",
"version", "1.0.0",
"vendor", "shinden",
"category", "Category",
"activity_type", "STATIC",
"max_instance", "1",
"language", "Python",
"lang_type", "SCRIPT",
""]
# </rtc-template>
##
# @class CsvWrite
# @brief ModuleDescription
#
#
class CsvWrite(OpenRTM_aist.DataFlowComponentBase):
##
# @brief constructor
# @param manager Maneger Object
#
def __init__(self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
sign_arg = [None] * ((len(RTC._d_TimedString) - 4) / 2)
self._d_sign = RTC.TimedString(RTC.Time(0,0), "")
"""
"""
self._signIn = OpenRTM_aist.InPort("sign", self._d_sign)
sensor_arg = [None] * ((len(RTC._d_TimedFloatSeq) - 4) / 2)
self._d_sensor = RTC.TimedFloatSeq(RTC.Time(0,0), [])
"""
"""
self._sensorIn = OpenRTM_aist.InPort("sensor", self._d_sensor)
fin_arg = [None] * ((len(RTC._d_TimedString) - 4) / 2)
self._d_fin = RTC.TimedString(RTC.Time(0,0), "")
"""
"""
self._finIn = OpenRTM_aist.InPort("fin", self._d_fin)
self._switch = "on"
self._first = "true"
self._fin = []
self._start = 0
self._nexttime = 0
self._today = datetime.date.today()
self._today_str = str(self._today)
self._t = 0
# initialize of configuration-data.
# <rtc-template block="init_conf_param">
# </rtc-template>
##
#
# The initialize action (on CREATED->ALIVE transition)
# formaer rtc_init_entry()
#
# @return RTC::ReturnCode_t
#
#
def onInitialize(self):
# Bind variables and configuration variable
# Set InPort buffers
self.addInPort("sign",self._signIn)
self.addInPort("sensor",self._sensorIn)
self.addInPort("fin",self._finIn)
# Set OutPort buffers
# Set service provider to Ports
# Set service consumers to Ports
# Set CORBA Service Ports
return RTC.RTC_OK
# ##
# #
# # The finalize action (on ALIVE->END transition)
# # formaer rtc_exiting_entry()
# #
# # @return RTC::ReturnCode_t
#
# #
def onActivated(self, ec_id):
print("activate CSVWrite")
return RTC.RTC_OK
##
#
# The deactivated action (Active state exit action)
# former rtc_active_exit()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onDeactivated(self, ec_id):
print("deactivate CsvWrite")
return RTC.RTC_OK
##
#
# The execution action that is invoked periodically
# former rtc_active_do()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onExecute(self, ec_id):
#センサ値の取得
self._d_sensor = self._sensorIn.read()
line = self._d_sensor.data
FSR_dic = {}
i = 0
#辞書に入れる
for i in range (1,15,1):
FSR_dic.setdefault(("FSR" + str(i)), line[i-1])
#開始の合図取得
if self._signIn.isNew():
self._d_sign = self._signIn.read()
sign = self._d_sign.data
if sign == "kaishi":
self._switch = "on"
print("GO")
#csvファイルの作成、ヘッダー作成
if self._switch == "on":
print("switch is on")
if self._first == "true":
self._start = time.time()
self._nexttime = time.time() - self._start
with open ("./data/" + self._today_str + ".csv", "w") as f:
writer = csv.writer(f, lineterminator = '\n')
writer.writerow(["time","No,1","No,2","No,3","No,4","No,5","No,6","No,7","No,8","No,9","No,10","No,11","No,12","No,13","No,14"])
print("csv open")
self._first = "false"
self._nexttime +=0.1
self._t =time.time() - self._start
if (self._t >= self._nexttime and self._first == "false"):
#print(">>>>>>")
#書き込み
with open ("./data/" + self._today_str + ".csv", "a") as f:
print("csv_write")
print(self._t)
writer = csv.writer(f, lineterminator='\n')
writer.writerow([self._t,FSR_dic["FSR1" ], FSR_dic["FSR2" ], FSR_dic["FSR3" ], FSR_dic["FSR4" ], FSR_dic["FSR5" ], FSR_dic["FSR6" ], FSR_dic["FSR7" ], FSR_dic["FSR8" ], FSR_dic["FSR9" ], FSR_dic["FSR10" ], FSR_dic["FSR11" ], FSR_dic["FSR12" ], FSR_dic["FSR13" ], FSR_dic["FSR14" ]])
self._nexttime +=0.1
#終了合図の読み込み
if self._finIn.isNew():
self._d_fin = self._finIn.read()
self._fin = self._d_fin.data
print("receive: ", self._fin)
if self._fin == "fin":
with open ("./data/" + self._today_str + ".csv", "a") as f:
#print("csv_write")
writer = csv.writer(f, lineterminator='\n')
writer.writerow([self._t,FSR_dic["FSR1" ], FSR_dic["FSR2" ], FSR_dic["FSR3" ], FSR_dic["FSR4" ], FSR_dic["FSR5" ], FSR_dic["FSR6" ], FSR_dic["FSR7" ], FSR_dic["FSR8" ], FSR_dic["FSR9" ], FSR_dic["FSR10" ], FSR_dic["FSR11" ], FSR_dic["FSR12" ], FSR_dic["FSR13" ], FSR_dic["FSR14" ]])
f.close()
self._first = "end"
print("close csv")
return RTC.RTC_OK
# ##
# #
# # The aborting action when main logic error occurred.
# # former rtc_aborting_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onAborting(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The error action in ERROR state
# # former rtc_error_do()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onError(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The reset action that is invoked resetting
# # This is same but different the former rtc_init_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onReset(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The state update action that is invoked after onExecute() action
# # no corresponding operation exists in OpenRTm-aist-0.2.0
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onStateUpdate(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The action that is invoked when execution context's rate is changed
# # no corresponding operation exists in OpenRTm-aist-0.2.0
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onRateChanged(self, ec_id):
#
# return RTC.RTC_OK
def CsvWriteInit(manager):
profile = OpenRTM_aist.Properties(defaults_str=CsvWrite_spec)
manager.registerFactory(profile,
CsvWrite,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
CsvWriteInit(manager)
# Create a component
comp = manager.createComponent("CsvWrite")
def main():
mgr = OpenRTM_aist.Manager.init(sys.argv)
mgr.setModuleInitProc(MyModuleInit)
mgr.activateManager()
mgr.runManager()
if __name__ == "__main__":
main()
| 23.516026
| 288
| 0.627641
|
302a452bbda22080b0836ca788bb7a4cb0f0d8f2
| 252
|
py
|
Python
|
manage.py
|
irvanjit/quaterloo
|
38cc4431220c4aae2f3f0903e684d85d05fea80a
|
[
"MIT"
] | null | null | null |
manage.py
|
irvanjit/quaterloo
|
38cc4431220c4aae2f3f0903e684d85d05fea80a
|
[
"MIT"
] | null | null | null |
manage.py
|
irvanjit/quaterloo
|
38cc4431220c4aae2f3f0903e684d85d05fea80a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quaterloo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.909091
| 73
| 0.77381
|
59bd0f1f30f6f4a26d11ae05c4616235988bff4d
| 140
|
py
|
Python
|
rdt/transformers/addons/identity/__init__.py
|
nhsx-mirror/SynthVAE
|
64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651
|
[
"MIT"
] | 2
|
2022-03-22T13:59:28.000Z
|
2022-03-24T07:02:54.000Z
|
rdt/transformers/addons/identity/__init__.py
|
nhsx-mirror/SynthVAE
|
64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651
|
[
"MIT"
] | 12
|
2022-02-17T16:29:31.000Z
|
2022-03-29T15:06:42.000Z
|
rdt/transformers/addons/identity/__init__.py
|
nhsx-mirror/SynthVAE
|
64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651
|
[
"MIT"
] | 3
|
2021-11-18T10:13:00.000Z
|
2022-03-23T10:25:55.000Z
|
"""Identity addons module."""
from rdt.transformers.addons.identity.identity import IdentityTransformer
__all__ = ["IdentityTransformer"]
| 23.333333
| 73
| 0.8
|
2575b29b6d7f3d546ca133a8d5061eaf3e9d645d
| 16,159
|
py
|
Python
|
src/resource_debloater/CResourceDebloater.py
|
michaelbrownuc/CARVE
|
91e4b9232791c7d0686db6d5ec65d680d0bbe3a7
|
[
"MIT"
] | 2
|
2020-06-15T21:23:40.000Z
|
2021-02-16T17:55:54.000Z
|
src/resource_debloater/CResourceDebloater.py
|
michaelbrownuc/CARVE
|
91e4b9232791c7d0686db6d5ec65d680d0bbe3a7
|
[
"MIT"
] | null | null | null |
src/resource_debloater/CResourceDebloater.py
|
michaelbrownuc/CARVE
|
91e4b9232791c7d0686db6d5ec65d680d0bbe3a7
|
[
"MIT"
] | null | null | null |
"""
C Resource Debloater
"""
# Standard Library Imports
import logging
import re
import sys
# Third Party Imports
# Local Imports
from resource_debloater.ResourceDebloater import ResourceDebloater
class CResourceDebloater(ResourceDebloater):
"""
This class implements a resource debloater for the C language. It currently supports debloating the following types
of explicit mappings (which are actually language agnostic):
Debloating the entire file (file still exists, but contains no code)
Debloating specified segments of code, with or without replacement
It also supports the following C-specific implicit mappings:
If / else if / else
function definition
statement
cases in a switch statement (aware of fall through mechanics)
"""
def __init__(self, location, target_features):
"""
CResourceDebloater constructor
:param str location: Filepath of the file on disk to debloat.
:param set target_features: List of features to be debloated from the file.
"""
super(CResourceDebloater, self).__init__(location, target_features)
# If you desire to use a different mapping sequence, it can be adjusted here.
self.annotation_sequence = "///["
@staticmethod
def get_features(line):
"""
Returns a set of features specified in the annotation.
:param str line: line of code containing an annotation.
:return: A set of the features specified in the annotation.
"""
feature_list = line.split("][")
first_trim_point = feature_list[0].find("[") + 1
feature_list[0] = feature_list[0][first_trim_point:]
last_trim_point = feature_list[len(feature_list)-1].find("]")
feature_list[len(feature_list)-1] = feature_list[len(feature_list)-1][:last_trim_point]
return set(feature_list)
@staticmethod
def get_construct(line):
"""
Returns a string detailing the construct found at the line number. Currently supports identifying the following
constructs:
Function definitions
Switch cases
Execution branches (if, else if, else)
Individual statements
:param str line: line of code immediately following an annotation
:return:
"""
if re.search("case\s\s*\w\w*\s*:\w*", line.strip()) is not None:
return "Case"
elif re.search("\selse\s\s*if\s*(\s*\S*\s*)", " " + line.strip()) is not None:
return "ElseIfBranch"
elif re.search("\sif\s*(\s*\S*\s*)", " " + line.strip()) is not None:
return "IfBranch"
elif re.search("\selse\s*{*", " " + line.strip()) is not None:
return "ElseBranch"
elif re.search("\w\w*\s\s*\w\w*\s*(\s*\S*\s*)\s*{*", line.strip()) is not None:
return "FunctionStructDefinition"
else:
return "Statement"
def process_annotation(self, annotation_line):
"""
Processes an implicit or explicit (! and ~) debloating operation annotated at the specified line.
This debloating module operates largely on a line by line basis. It does NOT support all types of source code
authoring styles. Many code authoring styles are not supported and will cause errors in debloating.
Not Supported: Implicit annotations must be immediately preceding the construct to debloat. There cannot
be empty lines, comments, or block comments between the annotation and the construct.
Not Supported: This processor assumes that all single statement debloating operations can remove the line
wholesale. Multi-line statements will not be completely debloated.
Not Supported: This processor assumes all execution branch statements have the entire condition expressed on
the same line as the keyword.
Not Supported: This processor expects all branches to be enclosed in braces, single statement blocks will cause
errors.
:param int annotation_line: Line where annotation to be processed is located.
:return: None
"""
# Check the annotation line for explicit cues ! and ~
last_char = self.lines[annotation_line].strip()[-1]
if last_char == "!":
self.lines = []
self.lines.append("///File Debloated.\n")
self.lines.append("\n")
elif last_char == "~":
segment_end = None
search_line = annotation_line + 1
replacement_code = []
# Check for replacement code following the segment debloat annotation. If found, remove and store for later
if self.lines[search_line].find("///^") > -1:
self.lines.pop(search_line)
while self.lines[search_line].find("///^") < 0:
replacement_code.append(self.lines.pop(search_line).replace("///", ""))
self.lines.pop(search_line)
while search_line < len(self.lines):
if self.lines[search_line].find("///~") > -1:
segment_end = search_line
break
else:
search_line += 1
if segment_end is None:
logging.error("No termination annotation found for segment annotation on line " + str(annotation_line) +
". Marking location and skipping this annotation.")
self.lines.insert(annotation_line+1, "/// Segment NOT removed due to lack of termination annotation.\n")
else:
while segment_end != annotation_line:
self.lines.pop(segment_end)
segment_end -= 1
self.lines[annotation_line] = "/// Segment Debloated.\n"
self.lines.insert(annotation_line + 1, "\n")
# Insert replacement code if it exists
if len(replacement_code) > 0:
insert_point = 2
self.lines.insert(annotation_line + insert_point, "/// Code Inserted:\n")
insert_point += 1
for replacement_line in replacement_code:
self.lines.insert(annotation_line + insert_point, replacement_line)
insert_point += 1
self.lines.insert(annotation_line + insert_point, "\n")
# If not explicit, look at next line to determine the implicit cue
else:
construct_line = annotation_line + 1
construct = CResourceDebloater.get_construct(self.lines[construct_line])
# Process implicit annotation based on construct identified
if construct == "FunctionStructDefinition" or construct == "ElseBranch":
# Function definitions, struct definitions, else branches are simple block removals.
search_line = construct_line
open_brace_counted = False
brace_count = 0
block_end = None
while search_line < len(self.lines):
brace_count += self.lines[search_line].count("{")
if open_brace_counted is False and brace_count > 0:
open_brace_counted = True
brace_count -= self.lines[search_line].count("}")
if open_brace_counted is True and brace_count == 0:
block_end = search_line
break
else:
search_line += 1
if block_end is None:
logging.error("Error finding end of code block annotated on line " + str(annotation_line) +
". Marking location and skipping this annotation.")
self.lines.insert(annotation_line + 1,
"/// Block NOT removed due to lack of termination brace.\n")
else:
while block_end != annotation_line:
self.lines.pop(block_end)
block_end -= 1
self.lines[annotation_line] = "/// Code Block Debloated.\n"
self.lines.insert(annotation_line + 1, " \n")
elif construct == "IfBranch" or construct == "ElseIfBranch":
# Removing an If or and Else If branch can result in inadvertent execution of an else block if they are
# removed entirely. To debloat these constructs, the condition check should remain in the source code
# to ensure sound operation of the debloated code. Ultimately, the condition check will more than likely
# be eliminated by the compiler, so modifying the conditions in source is unnecessarily dangerous.
# Function definitions, struct definitions, else branches are simple block removals.
search_line = construct_line
open_brace_counted = False
brace_count = 0
block_end = None
open_brace_line = None
while search_line < len(self.lines):
brace_count += self.lines[search_line].count("{")
if open_brace_counted is False and brace_count > 0:
open_brace_counted = True
open_brace_line = search_line
brace_count -= self.lines[search_line].count("}")
if open_brace_counted is True and brace_count == 0:
block_end = search_line
break
else:
search_line += 1
if block_end is None:
logging.error("Error finding end of code block annotated on line " + str(annotation_line) +
". Marking location and skipping this annotation.")
self.lines.insert(annotation_line + 1,
"/// Block NOT removed due to lack of termination brace.\n")
else:
# Remove the code on line after open curly brace, and before the closing curly brace.
# Need this in case the braces aren't on their own line.
# Remove lines in bedtween the open and close curly brace.
block_end -= 1
open_brace_line += 1
while block_end != open_brace_line:
self.lines.pop(block_end)
block_end -= 1
self.lines[annotation_line] = "/// If / Else If Code Block Debloated.\n"
elif construct == "Case":
# Removing a case statement requires checking for fall through logic:
# If the previous case has a break, the case be removed.
# If the previous case doesn't have a break, then only the case label can be removed.
# Search backwards from the annotation, see if a break or a case is found first.
search_line = annotation_line - 1
previous_break = None
while search_line >= 0:
if re.search("\sbreak\s*;", " " + self.lines[search_line].strip()) is not None or \
re.search("\sswitch\s*(\s*\S*\s*)\s*{*", " " + self.lines[search_line].strip()) is not None:
previous_break = True
break
elif re.search("case\s\s*\w\w*\s*:\w*", self.lines[search_line].strip()) is not None:
previous_break = False
break
else:
search_line -= 1
# Log an error and skip if switch statement behavior cannot be determined
if previous_break is None:
logging.error("Error finding previous case or switch for case on line " + str(annotation_line) +
". Marking location and skipping this annotation.")
self.lines.insert(annotation_line + 1,
"/// Case NOT removed due to lack of switch or previous case.\n")
# If previous case has fall through logic, only the case label can be deleted.
elif previous_break is False:
self.lines[annotation_line] = "/// Case Label Debloated.\n"
self.lines[construct_line] = "\n"
# If the previous case does not have fall through logic, then search for next break, case, or default
elif previous_break is True:
case_end = None
search_line = construct_line + 1
brace_count = self.lines[construct_line].count("{")
while search_line < len(self.lines):
brace_count += self.lines[search_line].count("{")
brace_count -= self.lines[search_line].count("}")
if re.search("case\s\s*\w\w*\s*:\w*", self.lines[search_line].strip()) is not None or \
re.search("default\s\s*\w\w*\s*:\w*", self.lines[search_line].strip()) is not None or \
brace_count < 0:
case_end = search_line - 1
# Check that the line before the next case (or default) isn't a debloating annotation.
if self.lines[case_end].find(self.annotation_sequence) > -1:
case_end -= 1
break
elif re.search("\sbreak\s*;", " " + self.lines[search_line].strip()) is not None:
case_end = search_line
break
else:
search_line += 1
if case_end is None:
logging.error("No end of switch block found for case annotation on line " + str(annotation_line)
+ ". Marking location and skipping this annotation.")
self.lines.insert(annotation_line + 1,
"/// Case block NOT removed due to failure to identify end of block.\n")
else:
while case_end != annotation_line:
self.lines.pop(case_end)
case_end -= 1
self.lines[annotation_line] = "/// Case Block Debloated.\n"
self.lines.insert(annotation_line + 1, " \n")
elif construct == "Statement":
self.lines[annotation_line] = "/// Statement Debloated.\n"
self.lines[construct_line] = "\n"
else:
# Log error and exit
logging.error("Unexpected construct encountered when processing implicit annotation. Exiting.")
sys.exit("Unexpected construct encountered when processing implicit annotation. Exiting.")
def debloat(self):
"""
Iterates through the file and debloats the selected features subject to dependency constraints
:return: None
"""
logging.info("Beginning debloating pass on " + self.location)
# Search the source code for debloater annotations, and process them.
current_line = 0
while current_line < len(self.lines):
if self.lines[current_line].find(self.annotation_sequence) > -1:
logging.info("Annotation found on line " + str(current_line))
feature_set = CResourceDebloater.get_features(self.lines[current_line])
if self.target_features.issuperset(feature_set):
logging.info("Processing annotation found on line " + str(current_line))
self.process_annotation(current_line)
current_line += 1
| 47.949555
| 120
| 0.560802
|
579c3b1381100a4e796e5b4b361fcbd15cd20f53
| 4,238
|
py
|
Python
|
google/cloud/bigquery_v2/types/standard_sql.py
|
jaykdoe/python-bigquery
|
02b13b60647b979d6e527ce723971da77cff059d
|
[
"Apache-2.0"
] | 1
|
2022-03-02T20:40:03.000Z
|
2022-03-02T20:40:03.000Z
|
google/cloud/bigquery_v2/types/standard_sql.py
|
jaykdoe/python-bigquery
|
02b13b60647b979d6e527ce723971da77cff059d
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery_v2/types/standard_sql.py
|
jaykdoe/python-bigquery
|
02b13b60647b979d6e527ce723971da77cff059d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.v2",
manifest={
"StandardSqlDataType",
"StandardSqlField",
"StandardSqlStructType",
"StandardSqlTableType",
},
)
class StandardSqlDataType(proto.Message):
r"""The type of a variable, e.g., a function argument. Examples: INT64:
{type_kind="INT64"} ARRAY: {type_kind="ARRAY",
array_element_type="STRING"} STRUCT<x STRING, y ARRAY>:
{type_kind="STRUCT", struct_type={fields=[ {name="x",
type={type_kind="STRING"}}, {name="y", type={type_kind="ARRAY",
array_element_type="DATE"}} ]}}
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
type_kind (google.cloud.bigquery_v2.types.StandardSqlDataType.TypeKind):
Required. The top level type of this field.
Can be any standard SQL data type (e.g.,
"INT64", "DATE", "ARRAY").
array_element_type (google.cloud.bigquery_v2.types.StandardSqlDataType):
The type of the array's elements, if type_kind = "ARRAY".
This field is a member of `oneof`_ ``sub_type``.
struct_type (google.cloud.bigquery_v2.types.StandardSqlStructType):
The fields of this struct, in order, if type_kind =
"STRUCT".
This field is a member of `oneof`_ ``sub_type``.
"""
class TypeKind(proto.Enum):
r""""""
TYPE_KIND_UNSPECIFIED = 0
INT64 = 2
BOOL = 5
FLOAT64 = 7
STRING = 8
BYTES = 9
TIMESTAMP = 19
DATE = 10
TIME = 20
DATETIME = 21
INTERVAL = 26
GEOGRAPHY = 22
NUMERIC = 23
BIGNUMERIC = 24
JSON = 25
ARRAY = 16
STRUCT = 17
type_kind = proto.Field(proto.ENUM, number=1, enum=TypeKind,)
array_element_type = proto.Field(
proto.MESSAGE, number=2, oneof="sub_type", message="StandardSqlDataType",
)
struct_type = proto.Field(
proto.MESSAGE, number=3, oneof="sub_type", message="StandardSqlStructType",
)
class StandardSqlField(proto.Message):
r"""A field or a column.
Attributes:
name (str):
Optional. The name of this field. Can be
absent for struct fields.
type (google.cloud.bigquery_v2.types.StandardSqlDataType):
Optional. The type of this parameter. Absent
if not explicitly specified (e.g., CREATE
FUNCTION statement can omit the return type; in
this case the output parameter does not have
this "type" field).
"""
name = proto.Field(proto.STRING, number=1,)
type = proto.Field(proto.MESSAGE, number=2, message="StandardSqlDataType",)
class StandardSqlStructType(proto.Message):
r"""
Attributes:
fields (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
"""
fields = proto.RepeatedField(proto.MESSAGE, number=1, message="StandardSqlField",)
class StandardSqlTableType(proto.Message):
r"""A table type
Attributes:
columns (Sequence[google.cloud.bigquery_v2.types.StandardSqlField]):
The columns in this table type
"""
columns = proto.RepeatedField(proto.MESSAGE, number=1, message="StandardSqlField",)
__all__ = tuple(sorted(__protobuf__.manifest))
| 32.106061
| 110
| 0.65243
|
9d6c4c923d783e73e51a0482b4b561e00b03b3ad
| 1,726
|
py
|
Python
|
Incident-Response/Tools/cyphon/cyphon/cyphon/tests/test_fieldsets.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/cyphon/cyphon/cyphon/tests/test_fieldsets.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/cyphon/cyphon/cyphon/tests/test_fieldsets.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
"""
# standard library
from unittest import TestCase
# local
from cyphon.fieldsets import QueryFieldset
class QueryFieldsetTestCase(TestCase):
"""
Tests the QueryFieldset class.
"""
fieldset = QueryFieldset(
field_name='foo',
field_type='CharField',
operator='eq',
value='foobar'
)
def test_str(self):
"""
Tests the __str__ method on a QueryFieldset.
"""
actual = str(self.fieldset)
expected = ("QueryFieldset: {'field_name': 'foo', 'field_type': "
"'CharField', 'operator': 'eq', 'value': 'foobar'}")
self.assertEqual(actual, expected)
def test_vars(self):
"""
Tests the outcome of the vars() function on a QueryFieldset.
"""
actual = vars(self.fieldset)
expected = {
'field_name': 'foo',
'field_type': 'CharField',
'operator': 'eq',
'value': 'foobar'
}
self.assertEqual(actual, expected)
| 28.766667
| 73
| 0.633256
|
c7d5b565c1b433e63948976955add10efcdd89a8
| 2,261
|
py
|
Python
|
localstack/services/sts/provider.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/sts/provider.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/sts/provider.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
import xmltodict
from localstack import config
from localstack.aws.api import RequestContext
from localstack.aws.api.sts import GetCallerIdentityResponse, StsApi
from localstack.aws.proxy import AwsApiListener
from localstack.constants import APPLICATION_JSON
from localstack.http import Request, Response
from localstack.services.moto import MotoFallbackDispatcher, call_moto
from localstack.services.plugins import ServiceLifecycleHook
from localstack.utils.strings import to_str
from localstack.utils.time import parse_timestamp
from localstack.utils.xml import strip_xmlns
LOG = logging.getLogger(__name__)
class StsProvider(StsApi, ServiceLifecycleHook):
def get_caller_identity(self, context: RequestContext) -> GetCallerIdentityResponse:
result = call_moto(context)
username = config.TEST_IAM_USER_NAME or "localstack"
result["Arn"] = result["Arn"].replace("user/moto", f"user/{username}")
if config.TEST_IAM_USER_ID:
result["UserId"] = config.TEST_IAM_USER_ID
return result
class StsAwsApiListener(AwsApiListener):
def __init__(self):
self.provider = StsProvider()
super().__init__("sts", MotoFallbackDispatcher(self.provider))
def request(self, request: Request) -> Response:
response = super().request(request)
if request.headers.get("Accept") == APPLICATION_JSON:
# convert "Expiration" to int for JSON response format (tested against AWS)
# TODO: introduce a proper/generic approach that works across arbitrary date fields in JSON
def _replace(match):
timestamp = parse_timestamp(match.group(1).strip())
return f"<Expiration>{int(timestamp.timestamp())}</Expiration>"
def _replace_response_content(_pattern, _replacement):
content = to_str(response.data or "")
data = re.sub(_pattern, _replacement, content)
content = xmltodict.parse(data)
stripped_content = strip_xmlns(content)
response.set_json(stripped_content)
pattern = r"<Expiration>([^<]+)</Expiration>"
_replace_response_content(pattern, _replace)
return response
| 38.982759
| 103
| 0.707651
|
1a6da605b44d04e4dd83a5427b233bb9f05e007b
| 1,723
|
py
|
Python
|
antNRE/src/seq_encoder.py
|
yanqiuxia/AntNRE
|
cd13446589c0f096cf3e8165ad3efa8e77466232
|
[
"Apache-2.0"
] | 42
|
2019-03-04T04:36:49.000Z
|
2022-03-27T23:15:13.000Z
|
antNRE/src/seq_encoder.py
|
yanqiuxia/AntNRE
|
cd13446589c0f096cf3e8165ad3efa8e77466232
|
[
"Apache-2.0"
] | 2
|
2019-07-19T03:47:24.000Z
|
2020-11-30T10:46:40.000Z
|
antNRE/src/seq_encoder.py
|
yanqiuxia/AntNRE
|
cd13446589c0f096cf3e8165ad3efa8e77466232
|
[
"Apache-2.0"
] | 11
|
2019-05-06T06:34:10.000Z
|
2021-09-14T14:06:53.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 18/10/10 21:30:45
@author: Changzhi Sun
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Dict, Any, Optional
from antNRE.src.word_encoder import WordCharEncoder
class BiLSTMEncoder(nn.Module):
def __init__(self,
word_encoder_size: int,
hidden_size: int,
num_layers: int = 1,
bidirectional: bool = True,
dropout: float = 0.5) -> None:
super(BiLSTMEncoder, self).__init__()
self.word_encoder_size = word_encoder_size
self.hidden_size = hidden_size
self.bilstm = nn.LSTM(word_encoder_size,
hidden_size // 2,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=dropout)
self.dropout = nn.Dropout(dropout)
def forward(self,
batch_seq_encoder_input: torch.Tensor,
batch_seq_len: List) -> torch.Tensor:
batch_size, seq_size, word_encoder_size = batch_seq_encoder_input.size()
assert word_encoder_size == self.word_encoder_size
batch_seq_encoder_input_pack = nn.utils.rnn.pack_padded_sequence(
batch_seq_encoder_input,
batch_seq_len,
batch_first=True)
batch_seq_encoder_output, _ = self.bilstm(batch_seq_encoder_input_pack)
batch_seq_encoder_output, _ = nn.utils.rnn.pad_packed_sequence(
batch_seq_encoder_output, batch_first=True)
return self.dropout(batch_seq_encoder_output)
| 35.163265
| 81
| 0.612304
|
0f8f012e5b43d4a4236d6f208507118d9e7fef89
| 13,277
|
py
|
Python
|
qcodes/data/format.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | 1
|
2019-10-12T04:54:30.000Z
|
2019-10-12T04:54:30.000Z
|
qcodes/data/format.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | null | null | null |
qcodes/data/format.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from traceback import format_exc
from operator import attrgetter
import logging
log = logging.getLogger(__name__)
class Formatter:
"""
Data file formatters
Formatters translate between DataSets and data files.
Each Formatter is expected to implement writing methods:
- ``write``: to write the ``DataArrays``
- ``write_metadata``: to write the metadata structure
Optionally, if this Formatter keeps the data file(s) open
between write calls, it may implement:
- ``close_file``: to perform any final cleanup and release the
file and any other resources.
and reading methods:
- ``read`` or ``read_one_file`` to reconstruct the ``DataArrays``, either
all at once (``read``) or one file at a time, supplied by the base class
``read`` method that loops over all data files at the correct location.
- ``read_metadata``: to reload saved metadata. If a subclass overrides
``read``, this method should call ``read_metadata``, but keep it also
as a separate method because it occasionally gets called independently.
All of these methods accept a ``data_set`` argument, which should be a
``DataSet`` object. Even if you are loading a new data set from disk, this
object should already have attributes:
- io: an IO manager (see qcodes.data.io)
location: a string, like a file path, that identifies the DataSet and
tells the IO manager where to store it
- arrays: a dict of ``{array_id:DataArray}`` to read into.
- read will create entries that don't yet exist.
- write will write ALL DataArrays in the DataSet, using
last_saved_index and modified_range, as well as whether or not
it found the specified file, to determine how much to write.
"""
ArrayGroup = namedtuple('ArrayGroup', 'shape set_arrays data name')
def write(self, data_set, io_manager, location, write_metadata=True,
force_write=False, only_complete=True):
"""
Write the DataSet to storage.
Subclasses must override this method.
It is up to the Formatter to decide when to overwrite completely,
and when to just append or otherwise update the file(s).
Args:
data_set (DataSet): the data we are writing.
io_manager (io_manager): base physical location to write to.
location (str): the file location within the io_manager.
write_metadata (bool): if True, then the metadata is written to disk
force_write (bool): if True, then the data is written to disk
only_complete (bool): Used only by the gnuplot formatter's
overridden version of this method
"""
raise NotImplementedError
def read(self, data_set):
"""
Read the entire ``DataSet``.
Find all files matching ``data_set.location`` (using io_manager.list)
and call ``read_one_file`` on each. Subclasses may either override
this method (if they use only one file or want to do their own
searching) or override ``read_one_file`` to use the search and
initialization functionality defined here.
Args:
data_set (DataSet): the data to read into. Should already have
attributes ``io`` (an io manager), ``location`` (string),
and ``arrays`` (dict of ``{array_id: array}``, can be empty
or can already have some or all of the arrays present, they
expect to be overwritten)
"""
io_manager = data_set.io
location = data_set.location
data_files = io_manager.list(location)
if not data_files:
raise IOError('no data found at ' + location)
# in case the DataArrays exist but haven't been initialized
for array in data_set.arrays.values():
if array.ndarray is None:
array.init_data()
self.read_metadata(data_set)
ids_read = set()
for fn in data_files:
with io_manager.open(fn, 'r') as f:
try:
self.read_one_file(data_set, f, ids_read)
except ValueError:
log.warning('error reading file ' + fn)
log.warning(format_exc())
def write_metadata(self, data_set, io_manager, location, read_first=True):
"""
Write the metadata for this DataSet to storage.
Subclasses must override this method.
Args:
data_set (DataSet): the data we are writing.
io_manager (io_manager): base physical location to write to.
location (str): the file location within the io_manager.
read_first (bool, optional): whether to first look for previously
saved metadata that may contain more information than the local
copy.
"""
raise NotImplementedError
def read_metadata(self, data_set):
"""
Read the metadata from this DataSet from storage.
Subclasses must override this method.
Args:
data_set (DataSet): the data to read metadata into
"""
raise NotImplementedError
def read_one_file(self, data_set, f, ids_read):
"""
Read data from a single file into a ``DataSet``.
Formatter subclasses that break a DataSet into multiple data files may
choose to override either this method, which handles one file at a
time, or ``read`` which finds matching files on its own.
Args:
data_set (DataSet): the data we are reading into.
f (file-like): a file-like object to read from, as provided by
``io_manager.open``.
ids_read (set): ``array_ids`` that we have already read.
When you read an array, check that it's not in this set (except
setpoints, which can be in several files with different inner
loops) then add it to the set so other files know it should not
be read again.
Raises:
ValueError: if a duplicate array_id of measured data is found
"""
raise NotImplementedError
def match_save_range(self, group, file_exists, only_complete=True):
"""
Find the save range that will joins all changes in an array group.
Matches all full-sized arrays: the data arrays plus the inner loop
setpoint array.
Note: if an outer loop has changed values (without the inner
loop or measured data changing) we won't notice it here. We assume
that before an iteration of the inner loop starts, the outer loop
setpoint gets set and then does not change later.
Args:
group (Formatter.ArrayGroup): a ``namedtuple`` containing the
arrays that go together in one file, as tuple ``group.data``.
file_exists (bool): Does this file already exist? If True, and
all arrays in the group agree on ``last_saved_index``, we
assume the file has been written up to this index and we can
append to it. Otherwise we will set the returned range to start
from zero (so if the file does exist, it gets completely
overwritten).
only_complete (bool): Should we write all available new data,
or only complete rows? If True, we write only the range of
array indices which all arrays in the group list as modified,
so that future writes will be able to do a clean append to
the data file as more data arrives.
Default True.
Returns:
Tuple(int, int): the first and last raveled indices that should
be saved. Returns None if:
* no data is present
* no new data can be found
"""
inner_setpoint = group.set_arrays[-1]
full_dim_data = (inner_setpoint, ) + group.data
# always return None if there are no modifications,
# even if there are last_saved_index inconsistencies
# so we don't do extra writing just to reshape the file
for array in full_dim_data:
if array.modified_range:
break
else:
return None
last_saved_index = inner_setpoint.last_saved_index
if last_saved_index is None or not file_exists:
if last_saved_index is None and file_exists:
log.warning("Inconsistent file information. "
"last_save_index is None but file exists. "
"Will overwrite")
if last_saved_index is not None and not file_exists:
log.warning("Inconsistent file information. "
"last_save_index is not None but file does not "
"exist. Will rewrite from scratch")
return self._match_save_range_whole_file(
full_dim_data, only_complete)
# force overwrite if inconsistent last_saved_index
for array in group.data:
if array.last_saved_index != last_saved_index:
return self._match_save_range_whole_file(
full_dim_data, only_complete)
return self._match_save_range_incremental(
full_dim_data, last_saved_index, only_complete)
@staticmethod
def _match_save_range_whole_file(arrays, only_complete):
max_save = None
agg = (min if only_complete else max)
for array in arrays:
array_max = array.last_saved_index
if array_max is None:
array_max = -1
mr = array.modified_range
if mr:
array_max = max(array_max, mr[1])
max_save = (array_max if max_save is None else
agg(max_save, array_max))
if max_save >= 0:
return (0, max_save)
else:
return None
@staticmethod
def _match_save_range_incremental(arrays, last_saved_index, only_complete):
mod_ranges = []
for array in arrays:
mr = array.modified_range
if not mr:
if only_complete:
return None
else:
continue
mod_ranges.append(mr)
mod_range = mod_ranges[0]
agg = (min if only_complete else max)
for mr in mod_ranges[1:]:
mod_range = (min(mod_range[0], mr[0]),
agg(mod_range[1], mr[1]))
if last_saved_index >= mod_range[1]:
return (0, last_saved_index)
elif last_saved_index >= mod_range[0]:
return (0, mod_range[1])
else:
return (last_saved_index + 1, mod_range[1])
def group_arrays(self, arrays):
"""
Find the sets of arrays which share all the same setpoint arrays.
Some Formatters use this grouping to determine which arrays to save
together in one file.
Args:
arrays (Dict[DataArray]): all the arrays in a DataSet
Returns:
List[Formatter.ArrayGroup]: namedtuples giving:
- shape (Tuple[int]): dimensions as in numpy
- set_arrays (Tuple[DataArray]): the setpoints of this group
- data (Tuple[DataArray]): measured arrays in this group
- name (str): a unique name of this group, obtained by joining
the setpoint array ids.
"""
set_array_sets = tuple(set(array.set_arrays
for array in arrays.values()))
all_set_arrays = set()
for set_array_set in set_array_sets:
all_set_arrays.update(set_array_set)
grouped_data = [[] for _ in set_array_sets]
for array in arrays.values():
i = set_array_sets.index(array.set_arrays)
if array not in all_set_arrays: # array.set_arrays[-1] != array:
# don't include the setpoint array itself in the data
grouped_data[i].append(array)
out = []
id_getter = attrgetter('array_id')
for set_arrays, data in zip(set_array_sets, grouped_data):
leni = len(set_arrays)
if not data and any(1 for other_set_arrays in set_array_sets if
len(other_set_arrays) > leni and
other_set_arrays[:leni] == set_arrays):
# this is an outer loop that doesn't have any data of its own,
# so skip it.
# Inner-loop setpoints with no data is weird (we set values
# but didn't measure anything there?) but we should keep it.
continue
group_name = '_'.join(sai.array_id for sai in set_arrays)
out.append(self.ArrayGroup(shape=set_arrays[-1].shape,
set_arrays=set_arrays,
data=tuple(sorted(data, key=id_getter)),
name=group_name))
return out
| 39.990964
| 80
| 0.602621
|
dda56420630e25eddf97c9703b57bbc85d848f37
| 2,078
|
py
|
Python
|
VideoMix/makeNumberedVideos.py
|
ctralie/SlidingWindowVideoTDA
|
d707a0c4727e068778d5c805f938556c91d6f1ce
|
[
"Apache-2.0"
] | 6
|
2017-05-09T12:21:04.000Z
|
2021-07-29T10:14:23.000Z
|
VideoMix/makeNumberedVideos.py
|
ctralie/SlidingWindowVideoTDA
|
d707a0c4727e068778d5c805f938556c91d6f1ce
|
[
"Apache-2.0"
] | null | null | null |
VideoMix/makeNumberedVideos.py
|
ctralie/SlidingWindowVideoTDA
|
d707a0c4727e068778d5c805f938556c91d6f1ce
|
[
"Apache-2.0"
] | 4
|
2017-05-23T07:00:33.000Z
|
2021-05-11T11:32:36.000Z
|
import sys
sys.path.append("../")
from VideoTools import *
import subprocess
import os
import scipy.misc
MAXHEIGHT = 160
MINWIDTH = 120
def saveVideoID(I, IDims, fileprefix, ID, FrameRate = 30, NumberFrames = 30):
N = I.shape[0]
print(I.shape)
if I.shape[0] > FrameRate*5:
I = I[0:FrameRate*5, :]
N = I.shape[0]
frame = np.array([])
print("IDims = ", IDims)
for i in range(N):
frame = np.reshape(I[i, :], IDims)
frame[frame < 0] = 0
frame[frame > 1] = 1
if IDims[0] > MAXHEIGHT:
fac1 = MAXHEIGHT/float(IDims[0])
fac2 = MINWIDTH/float(IDims[1])
fac = max(fac1, fac2)
if i == 0:
print("Resizing by %g"%fac)
frame = scipy.misc.imresize(frame, fac)
mpimage.imsave("%s%i.png"%(TEMP_STR, i+1), frame)
PS = 60
if frame.shape[1] > MINWIDTH*1.5:
PS = int(30.0*frame.shape[1]/MINWIDTH)
for i in range(NumberFrames):
command = ["convert", "%s%i.png"%(TEMP_STR, N), "-fill", "red", "-pointsize", "%i"%PS, "-draw", 'text 20,60 %s%.3i%s'%("'", ID, "'"), "%s%i.png"%(TEMP_STR, N+i+1)]
print(command)
subprocess.call(command)
print(N + i + 1)
#Convert to video using avconv
for t in ["avi", "webm", "ogg"]:
filename = "%s.%s"%(fileprefix, t)
#Overwrite by default
if os.path.exists(filename):
os.remove(filename)
command = [AVCONV_BIN,
'-r', "%i"%FrameRate,
'-i', TEMP_STR + '%d.png',
'-r', "%i"%FrameRate,
'-b', '30000k',
filename]
subprocess.call(command)
#Clean up
for i in range(N+NumberFrames):
os.remove("%s%i.png"%(TEMP_STR, i+1))
np.random.seed(100)
IDs = np.random.permutation(999)
i = 0
Videos = ["OrigVideos/%s"%v for v in os.listdir("OrigVideos")]
for V in Videos:
print("Saving %s..."%V)
(I, IDims) = loadVideo(V)
saveVideoID(I, IDims, "NumberedVideos/%i"%i, IDs[i])
i = i + 1
| 31.014925
| 171
| 0.525987
|
1c32c6ccaac73c2b146765c6a92a1e99e7b49e0f
| 4,484
|
py
|
Python
|
windcraft/text.py
|
marchdf/windcraft
|
9334864171f10c7a2fa1f0da71938551cc45465a
|
[
"Apache-2.0"
] | null | null | null |
windcraft/text.py
|
marchdf/windcraft
|
9334864171f10c7a2fa1f0da71938551cc45465a
|
[
"Apache-2.0"
] | 2
|
2018-03-12T23:41:26.000Z
|
2018-03-16T16:19:28.000Z
|
windcraft/text.py
|
marchdf/windcraft
|
9334864171f10c7a2fa1f0da71938551cc45465a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 National Renewable Energy Laboratory. This software
# is released under the license detailed in the file, LICENSE, which
# is located in the top-level directory structure.
# ========================================================================
#
# Imports
#
# ========================================================================
import pygame
import windcraft.colors as colors
import windcraft.fonts as fonts
# ========================================================================
#
# Class definitions
#
# ========================================================================
class Text():
"""This class displays the turbine text."""
def __init__(self):
"""Constructor for Text."""
self.colors = colors.Colors()
self.fonts = fonts.Fonts()
self.yoffset = 0.03 * pygame.display.get_surface().get_height()
def display(self, screen, num_turbines, max_turbines, power):
"""Display the text on the screen.
:param screen: pygame screen
:type screen: screen
:param num_turbines: number of turbines in farm
:type num_turbines: int
:param max_turbines: maximum number of turbines in farm
:type max_turbines: int
:param power: power generated by the farm
:type power: float
"""
# Title
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.04 * pygame.display.get_surface().get_height()
text = self.fonts.types['large'].render(
"Build turbines to maximize power!", True, self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
# # Counter
# xstart = 0.01 * pygame.display.get_surface().get_width()
# ystart = 0.12 * pygame.display.get_surface().get_height()
# text = self.fonts.types['medium'].render(
# "Turbines in farm: {0:d}".format(num_turbines), True, self.colors.black)
# textpos = text.get_rect(left=xstart,
# top=ystart)
# screen.blit(text, textpos)
# if num_turbines >= max_turbines:
# text = self.fonts.types['medium'].render("All turbines placed!",
# True,
# self.colors.black)
# screen.blit(text, [textpos[0], textpos[1] + self.yoffset])
# Power
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.15 * pygame.display.get_surface().get_height()
scaling_factor = 100
text = self.fonts.types['medium'].render(
"Power produced: {0:.2f} kW".format(scaling_factor * power),
True,
self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
# Instructions
xstart = 0.97 * pygame.display.get_surface().get_width()
ystart = 0.9 * pygame.display.get_surface().get_height()
text = self.fonts.types['medium'].render("[t] to toggle",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, textpos)
text = self.fonts.types['medium'].render("[u] to undo ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + self.yoffset])
text = self.fonts.types['medium'].render("[r] to reset ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 2 * self.yoffset])
text = self.fonts.types['medium'].render("[q] to quit ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 3 * self.yoffset])
| 41.906542
| 86
| 0.48595
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.