text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import optparse
from melange import ipv4
from melange import mac
from melange import version
from melange.common import config
from melange.db import db_api
from melange.db.sqlalchemy import session
def upgrade(migrate_engine):
_db_connect()
interface = session.get_session().execute(
"SELECT COUNT(1) as count FROM interfaces "
"WHERE device_id NOT REGEXP '.*-.*' AND device_id IS NOT NULL")
print(interface)
if interface.fetchone().count > 0:
print """
---------------------------------------------------------
You have instances IDs stored in your interfaces table. You need to run this
migration with a connection url for your Nova database. It will extract the
proper UUIDs from the Nova DB and update this table. Using devstack this would
look like:
$ python melange/db/sqlalchemy/migrate_repo/versions/002_device_id_to_uuid.py\\
-vd --config-file=/opt/stack/melange/etc/melange/melange.conf\\
mysql://root:password@localhost/nova
---------------------------------------------------------
"""
def downgrade(migrate_engine):
pass
def _db_connect():
# If you really need to do another migration before all of this goes into
# quantum, and you need to access the DB, this is what you need:
oparser = optparse.OptionParser(version="%%prog %s"
% version.version_string())
create_options(oparser)
(options, args) = config.parse_options(oparser)
conf, app = config.Config.load_paste_app('melange', options, args)
db_api.configure_db(conf, ipv4.plugin(), mac.plugin())
def create_options(parser):
"""Sets up the CLI and config-file options.
:param parser: The option parser
:returns: None
"""
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int, default=9898,
help="Port the Melange API host listens on. "
"Default: %default")
config.add_common_options(parser)
config.add_log_options(parser)
if __name__ == '__main__':
import gettext
import optparse
import os
import sys
gettext.install('melange', unicode=1)
possible_topdir = os.path.normpath(os.path.join(
os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'melange', '__init__.py')):
sys.path.insert(0, possible_topdir)
from melange import ipv4
from melange import mac
from melange.common import config
from melange.db import db_api
from melange.ipam import models
from melange.db.sqlalchemy import session
from melange.openstack.common import config as openstack_config
oparser = optparse.OptionParser()
openstack_config.add_common_options(oparser)
openstack_config.add_log_options(oparser)
(options, args) = openstack_config.parse_options(oparser)
if len(args) < 1:
sys.exit("Please include the connection string for the nova DB")
try:
conf = config.load_app_environment(optparse.OptionParser())
db_api.configure_db(conf, ipv4.plugin(), mac.plugin())
nova_engine = session._create_engine({'sql_connection': args[0]})
instances = nova_engine.execute("select id,uuid from instances")
melange_session = session.get_session()
print "-----"
for instance in instances:
print "updating %(id)s with %(uuid)s" % instance
session._ENGINE.execute("update interfaces set "
"device_id='%(uuid)s' "
"where device_id=%(id)s" % instance)
except RuntimeError as error:
sys.exit("ERROR: %s" % error)
|
{
"content_hash": "ba1a957fc2fdf39d9cc630288ae2d679",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 35.982142857142854,
"alnum_prop": 0.5885856079404467,
"repo_name": "openstack-attic/melange",
"id": "069318d01764e06615fc841822d92af918e00fe9",
"size": "4683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "melange/db/sqlalchemy/migrate_repo/versions/002_device_id_to_uuid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11031"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "606148"
},
{
"name": "Shell",
"bytes": "5182"
}
],
"symlink_target": ""
}
|
from django.urls import path
urlpatterns = [
path("posts/(<int:pk>/", lambda **kwargs: "", name="post-detail"),
path(
"hasprimaryslug/<int:pk>/",
lambda **kwargs: "",
name="hasprimaryslug-detail",
),
path(
"hasprimaryuuid/<int:pk>/",
lambda **kwargs: "",
name="hasprimaryuuid-detail",
),
]
|
{
"content_hash": "f9f3b7d1eaa11b7639beed1ab226c3e0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 22.5,
"alnum_prop": 0.5361111111111111,
"repo_name": "PetrDlouhy/django-admin-smoke-tests",
"id": "fe48d911c1627fb49fa9a5fd29dca9f55bf192b7",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/main/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23104"
}
],
"symlink_target": ""
}
|
"""Docker related utilities."""
import logging
import os
import sys
import tarfile
import tempfile
import time
import uuid
import warnings
from . import gcp
from . import machine_config
import docker
from google.cloud import storage
from google.cloud.exceptions import NotFound
from googleapiclient import discovery
from googleapiclient import errors
import requests
from ..utils import google_api_client
from ..utils import tf_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def generate_image_uri():
"""Returns unique name+tag for a Docker image."""
# Keeping this name format uniform with the job id.
unique_tag = str(uuid.uuid4()).replace("-", "_")
docker_registry = "gcr.io/{}".format(gcp.get_project_name())
return "{}/{}:{}".format(docker_registry, "tf_cloud_train", unique_tag)
class ContainerBuilder(object):
"""Container builder for building and pushing a Docker image."""
def __init__(
self,
entry_point,
preprocessed_entry_point,
chief_config,
worker_config,
requirements_txt=None,
destination_dir="/app/",
docker_config=None,
called_from_notebook=False,
):
"""Constructor.
Args:
entry_point: Optional string. File path to the python file or
iPython notebook that contains the TensorFlow code.
Note) This path must be in the current working directory tree.
Example) 'train.py', 'training/mnist.py', 'mnist.ipynb'
If `entry_point` is not provided, then
- If you are in an iPython notebook environment, then the
current notebook is taken as the `entry_point`.
- Otherwise, the current python script is taken as the
`entry_point`.
preprocessed_entry_point: Optional `preprocessed_entry_point`
file path.
chief_config: `MachineConfig` that represents the configuration for
the chief worker in a distribution cluster.
worker_config: `MachineConfig` that represents the configuration
for the workers in a distribution cluster.
requirements_txt: Optional string. File path to requirements.txt
file containing additionally pip dependencies, if any.
destination_dir: Optional working directory in the Docker container
filesystem.
docker_config: Optional Docker configuration.
called_from_notebook: Optional boolean which indicates whether run
has been called in a notebook environment.
"""
self.entry_point = entry_point
self.preprocessed_entry_point = preprocessed_entry_point
self.chief_config = chief_config
self.worker_config = worker_config
self.requirements_txt = requirements_txt
self.destination_dir = destination_dir
self.docker_config = docker_config
self.called_from_notebook = called_from_notebook
self.project_id = gcp.get_project_name()
# Those will be populated lazily.
self.tar_file_path = None
self.docker_client = None
self.tar_file_descriptor = None
self.docker_file_descriptor = None
def get_docker_image(
self, max_status_check_attempts=None, delay_between_status_checks=None
):
"""Builds, publishes and returns a Docker image.
Args:
max_status_check_attempts: Maximum number of times allowed to check
build status. Applicable only to cloud container builder.
delay_between_status_checks: Time is seconds to wait between status
checks.
"""
raise NotImplementedError
def get_generated_files(self, return_descriptors=False):
"""Get generated file paths and/or descriptors for generated files.
Args:
return_descriptors: Whether to return descriptors as well.
Returns:
Docker and tar file paths. Depending on return_descriptors, possibly
their file descriptors as well.
"""
if return_descriptors:
return [
(self.docker_file_path, self.docker_file_descriptor),
(self.tar_file_path, self.tar_file_descriptor)
]
else:
return [self.docker_file_path, self.tar_file_path]
def _get_tar_file_path(self):
"""Packages files into a tarball."""
self._create_docker_file()
file_path_map = self._get_file_path_map()
self.tar_file_descriptor, self.tar_file_path = tempfile.mkstemp()
with tarfile.open(self.tar_file_path, "w:gz", dereference=True) as tar:
for source, destination in file_path_map.items():
tar.add(source, arcname=destination)
def _get_docker_base_image(self):
"""Returns the docker image to be used as the default base image."""
# If in a Kaggle environment, use the `KAGGLE_DOCKER_IMAGE` as the base
# image.
img = os.getenv("KAGGLE_DOCKER_IMAGE")
if img:
return img
tf_version = tf_utils.get_version()
if tf_version is not None:
# Updating the name for RC's to match with the TF generated
# RC Docker image names.
tf_version = tf_version.replace("-rc", "rc")
# Get the TF Docker parent image to use based on the current
# TF version.
img = "tensorflow/tensorflow:{}".format(tf_version)
if (self.chief_config.accelerator_type !=
machine_config.AcceleratorType.NO_ACCELERATOR):
img += "-gpu"
# Add python 3 tag for TF version <= 2.1.0
# https://hub.docker.com/r/tensorflow/tensorflow
v = tf_version.split(".")
if float(v[0] + "." + v[1]) <= 2.1:
img += "-py3"
# Use the latest TF docker image if a local installation is not
# available or if the docker image corresponding to the `tf_version`
# does not exist.
if not (img and self._image_exists(img)):
warnings.warn(
"TF Cloud `run` API uses docker, with a TF parent image "
"matching your local TF version, for containerizing your "
"code. A TF Docker image does not exist for the TF version "
"you are using: {}"
"We are replacing this with the latest stable TF Docker "
"image available: `tensorflow/tensorflow:latest`"
"Please see "
"https://hub.docker.com/r/tensorflow/tensorflow/ "
"for details on the available Docker images."
"If you are seeing any code compatibility issues because of"
" the TF version change, please try using a custom "
"`docker_config.parent_image` with the required "
"TF version.".format(tf_version))
new_img = "tensorflow/tensorflow:latest"
if img and img.endswith("-gpu"):
new_img += "-gpu"
img = new_img
return img
def _create_docker_file(self):
"""Creates a Dockerfile."""
if self.docker_config:
parent_image = self.docker_config.parent_image
else:
parent_image = None
if parent_image is None:
parent_image = self._get_docker_base_image()
lines = [
"FROM {}".format(parent_image),
"WORKDIR {}".format(self.destination_dir),
]
if self.requirements_txt is not None:
_, requirements_txt_name = os.path.split(self.requirements_txt)
dst_requirements_txt = os.path.join(requirements_txt_name)
requirements_txt_path = os.path.join(
self.destination_dir, requirements_txt_name
)
lines.append(
"COPY {requirements_txt} {requirements_txt}".format(
requirements_txt=requirements_txt_path)
)
# install pip requirements from requirements_txt if it exists.
lines.append(
"RUN if [ -e {requirements_txt} ]; "
"then pip install --no-cache -r {requirements_txt}; "
"fi".format(requirements_txt=dst_requirements_txt)
)
if self.entry_point is None:
lines.append("RUN pip install tensorflow-cloud")
if self.worker_config is not None and machine_config.is_tpu_config(
self.worker_config
):
lines.append("RUN pip install cloud-tpu-client")
# Copies the files from the `destination_dir` in Docker daemon location
# to the `destination_dir` in Docker container filesystem.
lines.append("COPY {} {}".format(self.destination_dir,
self.destination_dir))
docker_entry_point = self.preprocessed_entry_point or self.entry_point
_, docker_entry_point_file_name = os.path.split(docker_entry_point)
# Using `ENTRYPOINT` here instead of `CMD` specifically because
# we want to support passing user code flags.
lines.extend(
['ENTRYPOINT ["python", "{}"]'.format(docker_entry_point_file_name)]
)
content = "\n".join(lines)
self.docker_file_descriptor, self.docker_file_path = tempfile.mkstemp()
with open(self.docker_file_path, "w") as f:
f.write(content)
def _image_exists(self, image):
"""Checks whether the image exists on dockerhub using Docker v2 api.
Args:
image: image to check for.
Returns:
True if the image is found on dockerhub.
"""
repo_name, tag_name = image.split(":")
r = requests.get(
"http://hub.docker.com/v2/repositories/{}/tags/{}".format(
repo_name, tag_name
)
)
return r.ok
def _get_file_path_map(self):
"""Maps local file paths to the Docker daemon process location.
Dictionary mapping file paths in the local file system to the paths
in the Docker daemon process location. The `key` or source is the path
of the file that will be used when creating the archive. The `value`
or destination is set as the `arcname` for the file at this time.
When extracting files from the archive, they are extracted to the
destination path.
Returns:
A file path map.
"""
location_map = {}
if self.entry_point is None and sys.argv[0].endswith("py"):
self.entry_point = sys.argv[0]
# Map entry_point directory to the dst directory.
if not self.called_from_notebook or self.entry_point is not None:
entry_point_dir, _ = os.path.split(self.entry_point)
if not entry_point_dir: # Current directory
entry_point_dir = "."
location_map[entry_point_dir] = self.destination_dir
# Place preprocessed_entry_point in the dst directory.
if self.preprocessed_entry_point is not None:
_, preprocessed_entry_point_file_name = os.path.split(
self.preprocessed_entry_point
)
location_map[self.preprocessed_entry_point] = os.path.join(
self.destination_dir, preprocessed_entry_point_file_name
)
# Place requirements_txt in the dst directory.
if self.requirements_txt is not None:
_, requirements_txt_name = os.path.split(self.requirements_txt)
location_map[self.requirements_txt] = os.path.join(
self.destination_dir, requirements_txt_name
)
# Place Docker file in the root directory.
location_map[self.docker_file_path] = "Dockerfile"
return location_map
class LocalContainerBuilder(ContainerBuilder):
"""Container builder that uses local Docker daemon process."""
def get_docker_image(
self, max_status_check_attempts=None, delay_between_status_checks=None
):
"""Builds, publishes and returns a Docker image.
Args:
max_status_check_attempts: Maximum number of times allowed to check
build status. Not applicable to this builder.
delay_between_status_checks: Time is seconds to wait between status
checks. Not applicable to this builder.
Returns:
URI in a registory where the Docker image has been built and pushed.
"""
self.docker_client = docker.APIClient(version="auto")
self._get_tar_file_path()
# create Docker image from tarball
image_uri = self._build_docker_image()
# push to the registry
self._publish_docker_image(image_uri)
return image_uri
def _build_docker_image(self):
"""Builds Docker image.
https://docker-py.readthedocs.io/en/stable/api.html#module-docker.api.build
Returns:
Image URI.
"""
# Use the given Docker image given, if available.
if self.docker_config:
image_uri = self.docker_config.image
else:
image_uri = None
image_uri = image_uri or generate_image_uri()
logger.info("Building Docker image: %s", image_uri)
# `fileobj` is generally set to the Dockerfile file path. If a tar file
# is used for Docker build context (ones that includes a Dockerfile)
# then `custom_context` should be enabled.
with open(self.tar_file_path, "rb") as fileobj:
bld_logs_generator = self.docker_client.build(
path=".",
custom_context=True,
fileobj=fileobj,
tag=image_uri,
encoding="utf-8",
decode=True,
)
self._get_logs(bld_logs_generator, "build", image_uri)
return image_uri
def _publish_docker_image(self, image_uri):
"""Publishes Docker image.
Args:
image_uri: String, the registry name and tag.
"""
logger.info("Publishing Docker image: %s", image_uri)
pb_logs_generator = self.docker_client.push(
image_uri, stream=True, decode=True)
self._get_logs(pb_logs_generator, "publish", image_uri)
def _get_logs(self, logs_generator, name, image_uri):
"""Decodes logs from Docker and generates user friendly logs.
Args:
logs_generator: Generator returned from Docker build/push APIs.
name: String, 'build' or 'publish' used to identify where the
generator came from.
image_uri: String, the Docker image URI.
Raises:
RuntimeError: if there are any errors when building or publishing a
docker image.
"""
for chunk in logs_generator:
if "stream" in chunk:
for line in chunk["stream"].splitlines():
logger.info(line)
if "error" in chunk:
raise RuntimeError(
"Docker image {} failed: {}\nImage URI: {}".format(
name, str(chunk["error"]), image_uri
)
)
class CloudContainerBuilder(ContainerBuilder):
"""Container builder that uses Google cloud build."""
def get_docker_image(
self, max_status_check_attempts=40, delay_between_status_checks=30
):
"""Builds, publishes and returns a Docker image.
Args:
max_status_check_attempts: Maximum number of times allowed to check
build status. Applicable only to cloud container builder.
delay_between_status_checks: Time is seconds to wait between status
checks.
Returns:
URI in a registory where the Docker image has been built and pushed.
"""
self._get_tar_file_path()
storage_object_name = self._upload_tar_to_gcs()
# Use the given Docker image name, if available.
if self.docker_config:
image_uri = self.docker_config.image
else:
image_uri = None
image_uri = image_uri or generate_image_uri()
logger.info(
"Building and publishing Docker image using Google Cloud Build: %s",
image_uri)
build_service = discovery.build(
"cloudbuild",
"v1",
cache_discovery=False,
requestBuilder=google_api_client.TFCloudHttpRequest,
)
request_dict = self._create_cloud_build_request_dict(
image_uri,
storage_object_name,
max_status_check_attempts*delay_between_status_checks
)
try:
# Call to queue request to build and push Docker image.
print("Submitting Docker build and push request to Cloud Build.")
print("Please access your Cloud Build job information here:")
print("https://console.cloud.google.com/cloud-build/builds")
create_response = (
build_service.projects()
.builds()
.create(projectId=self.project_id, body=request_dict)
.execute()
)
# `create` returns a long-running `Operation`.
# https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/operations#Operation # pylint: disable=line-too-long
# This contains the build id, which we can use to get the status.
attempts = 1
while attempts <= max_status_check_attempts:
# Call to check on the status of the queued request.
get_response = (
build_service.projects()
.builds()
.get(
projectId=self.project_id,
id=create_response["metadata"]["build"]["id"],
)
.execute()
)
# `get` response is a `Build` object which contains `Status`.
# https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#Build.Status # pylint: disable=line-too-long
status = get_response["status"]
if status != "WORKING" and status != "QUEUED":
break
attempts += 1
# Wait for 30 seconds before we check on status again.
print("Waiting for Cloud Build, checking status in 30 seconds.")
time.sleep(delay_between_status_checks)
if status != "SUCCESS":
raise RuntimeError(
"There was an error executing the cloud build job. "
"Job status: " + status
)
except errors.HttpError as err:
raise RuntimeError(
"There was an error submitting the cloud build job. ", err)
return image_uri
def _upload_tar_to_gcs(self):
"""Uploads tarfile to GCS and returns the GCS object name."""
logger.info("Uploading files to GCS.")
storage_client = storage.Client()
try:
bucket = storage_client.get_bucket(
self.docker_config.image_build_bucket)
except NotFound:
bucket = storage_client.create_bucket(
self.docker_config.image_build_bucket)
unique_tag = str(uuid.uuid4()).replace("-", "_")
storage_object_name = "tf_cloud_train_tar_{}".format(unique_tag)
blob = bucket.blob(storage_object_name)
blob.upload_from_filename(self.tar_file_path)
return storage_object_name
def _create_cloud_build_request_dict(
self, image_uri, storage_object_name, timeout_sec
):
"""Creates request body for cloud build JSON API call.
`create` body should be a `Build` object
https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#Build
Args:
image_uri: GCR Docker image URI.
storage_object_name: Name of the tarfile object in GCS.
timeout_sec: timeout for the CloudBuild in seconds.
Returns:
Build request dictionary.
"""
request_dict = {}
request_dict["projectId"] = self.project_id
request_dict["images"] = [[image_uri]]
request_dict["steps"] = []
request_dict["timeout"] = "{}s".format(timeout_sec)
build_args = ["build", "-t", image_uri, "."]
if self.docker_config:
cache_from = (self.docker_config.cache_from or
self.docker_config.image)
if cache_from:
# Use the given Docker image as cache.
request_dict["steps"].append({
"name": "gcr.io/cloud-builders/docker",
"entrypoint": "bash",
"args": [
"-c",
"docker pull {} || exit 0".format(cache_from),
],
})
build_args[3:3] = ["--cache-from", cache_from]
request_dict["steps"].append({
"name": "gcr.io/cloud-builders/docker",
"args": build_args,
})
request_dict["source"] = {
"storageSource": {
"bucket": self.docker_config.image_build_bucket,
"object": storage_object_name,
}
}
request_dict["options"] = {
"machineType": "N1_HIGHCPU_8"
}
return request_dict
|
{
"content_hash": "0bbdc92779d51b528d45a68ee6bfb8dd",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 145,
"avg_line_length": 39.174774774774775,
"alnum_prop": 0.5800754300432343,
"repo_name": "tensorflow/cloud",
"id": "55252d53cf24eaee1537cb57f42ace17d5b49d85",
"size": "22338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/tensorflow_cloud/core/containerize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "27410"
},
{
"name": "Jupyter Notebook",
"bytes": "176445"
},
{
"name": "Python",
"bytes": "486580"
},
{
"name": "Starlark",
"bytes": "8948"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Scouting2016', '0032_auto_20160421_2234'),
]
operations = [
migrations.RenameModel(
old_name='Compitition',
new_name='Competition',
),
]
|
{
"content_hash": "e8f273391325d285126098e8e5c2e253",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 20.294117647058822,
"alnum_prop": 0.6057971014492753,
"repo_name": "ArcticWarriors/scouting-app-2016",
"id": "29f3f74226a1c3a0dc973f6ac018cdeaa6a586cf",
"size": "369",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ScoutingWebsite/Scouting2016/migrations/0033_auto_20170121_0010.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2636"
},
{
"name": "HTML",
"bytes": "75765"
},
{
"name": "JavaScript",
"bytes": "16877"
},
{
"name": "Python",
"bytes": "94669"
}
],
"symlink_target": ""
}
|
from pythonds.graphs import Graph, Vertex
from pythonds.basic import Queue
def bfs(g,start):
start.setDistance(0)
start.setPred(None)
vertQueue = Queue()
vertQueue.enqueue(start)
while (vertQueue.size() > 0): currentVert = vertQueue.dequeue()
for nbr in currentVert.getConnections(): if (nbr.getColor() == 'white'):
nbr.setColor('gray')
nbr.setDistance(currentVert.getDistance() + 1)
nbr.setPred(currentVert)
vertQueue.enqueue(nbr)
currentVert.setColor('black')
|
{
"content_hash": "61bca45ee130a0a71bac65fdc0b24066",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.6742424242424242,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "7aa686dbe67e0da465e2e61a23b61258b8b587bf",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Listings-for-Second-Edition/listing_7_4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
}
|
import gevent
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin
from socketio import socketio_manage
from flask import request, Response
from backend import app
import random
import string
import time
import json
import bleach
roundlength = 10
# Load word file
# Not the best place to put this
WORDFILE = "./untitled/backend/resource/words"
words = []
num_clients = 0
artist = 0
drawings = []
players = {}
class PictNamespace(BaseNamespace, BroadcastMixin):
last_ping = 0
last_pong = 0
def initialize(self):
global num_clients
global artist
self.logger = app.logger
self.log("Socketio session commence")
num_clients += 1
self.client_id = num_clients
self.emit('set_client_id', {'client_id': self.client_id})
self.emit('initial_drawing', json.dumps(drawings))
self.log("Sent client id %d" % self.client_id)
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def on_request_control(self, posted):
global artist
global roundlength
if artist == 0:
artist = self.client_id
self.broadcast_event('grant_control', {'client_id': self.client_id})
gevent.spawn(self.revoke_control)
else:
self.emit('deny_control', {})
def revoke_control(self):
global artist
global roundlength
global drawings
drawings = []
self.log("Revoke control - sleeping...")
gevent.sleep(roundlength)
self.log("Control revoked.")
artist = 0
self.broadcast_event('revoke_control', {'client_id': self.client_id})
def on_post_drawing(self, posted):
global artist
global drawings
self.log(posted)
self.log("Drawing posted by %d" % self.client_id)
drawings.append(posted['drawing'])
self.log(drawings)
if self.client_id == artist:
self.broadcast_event_not_me('download_drawing', posted)
def on_get_new_word(self, posted):
global artist
if self.client_id == artist:
new_word = choose_word()
self.emit('new_word', {'word': new_word})
def on_register_user(self, posted):
global artist
global players
self.username = posted['username']
players[self.client_id] = self.username
self.log("registered username %s" % self.username)
def on_post_chat(self, posted):
global players
cleaned = bleach.clean(posted['msg'])
self.log("message: %s" % posted)
try:
nick = players[posted['sender']]
self.broadcast_event('chat_msg', {'sender': nick, 'msg': cleaned})
except KeyError:
self.broadcast_event('chat_msg', {'sender': posted['sender'], 'msg': cleaned})
@app.route('/socket.io/<path:remaining>')
def route(remaining):
socketio_manage(request.environ, {'/game': PictNamespace}, request)
return Response()
def load_words():
global words
fh = open(WORDFILE, 'r')
slurp = fh.read()
words = string.split(slurp)
def choose_word():
return random.choice(words)
load_words()
|
{
"content_hash": "5a7d20e1f78c69ec0a8bbc16539fe0a7",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 90,
"avg_line_length": 27.70940170940171,
"alnum_prop": 0.6172115977791487,
"repo_name": "thumphries/untitled",
"id": "b0ded8144c0bf79e2978dd75908b2d413653d7a2",
"size": "3242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "97"
},
{
"name": "Python",
"bytes": "887"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ[
'DJANGO_SETTINGS_MODULE'] = 'lino_book.projects.homeworkschool.settings.demo'
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "a608f5fbdad8e78d5f759f3ae0b6e496",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 85,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.7019607843137254,
"repo_name": "lino-framework/book",
"id": "fad7177bfbeab948b74354869b39def237457a76",
"size": "277",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lino_book/projects/homeworkschool/manage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
}
|
from helper import MockXPI
from validator.chromemanifest import ChromeManifest
import validator.testcases.content as content
from validator.errorbundler import ErrorBundle
def test_marking_overlays():
"""
Mark an overlay, then test that it marks the scripts within the overlay.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
assert marked_scripts == set(['chrome://ns1/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_marking_overlays_root_package():
"""
Tests that '/' resolves correctly as a chrome content package.
"""
err = ErrorBundle()
err.supported_versions = {}
manifest = ChromeManifest("""
content ns1 /
overlay chrome://foo chrome://ns1/content/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', manifest)
err.save_resource('chrome.manifest_nopush', manifest)
xpi = MockXPI({'main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
assert marked_scripts == set(['chrome://ns1/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_marking_overlays_no_overlay():
"""
Test that unmarked overlays don't mark scripts as being potentially
pollutable.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
#overlay chrome://foo chrome://ns1/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/main.xul': 'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
print marked_scripts
assert not marked_scripts
def test_marking_overlays_subdir():
"""
Mark an overlay in a subdirectory, then test that it marks the scripts
within the overlay. Make sure it properly figures out relative URLs.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/subdir/main.xul
""", 'chrome.manifest')
err.save_resource('chrome.manifest', c)
err.save_resource('chrome.manifest_nopush', c)
xpi = MockXPI({'foo/subdir/main.xul':
'tests/resources/content/script_list.xul'})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource('marked_scripts')
print marked_scripts
assert marked_scripts
assert marked_scripts == set(['chrome://ns1/subdir/foo.js',
'chrome://ns1/bar.js',
'chrome://asdf/foo.js'])
def test_script_scraping():
"""Test that scripts are gathered up during the validation process."""
err = ErrorBundle()
err.supported_versions = {}
xpi = MockXPI({'foo.js': 'tests/resources/junk.xpi',
'dir/bar.jsm': 'tests/resources/junk.xpi'})
content.test_packed_packages(err, xpi)
assert not err.failed()
scripts = err.get_resource('scripts')
print scripts
assert scripts
for bundle in scripts:
assert 'foo.js' in bundle['scripts']
assert 'dir/bar.jsm' in bundle['scripts']
assert bundle['package'] == xpi
assert bundle['state'] == []
|
{
"content_hash": "e4fa4855ddd1241c6a20b188bd96946b",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 30.21641791044776,
"alnum_prop": 0.6265744628303285,
"repo_name": "wagnerand/amo-validator",
"id": "d7d3f6d44b54c6d5d16891feea70741e9e8dd698",
"size": "4049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_content_overlays.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "502"
},
{
"name": "HTML",
"bytes": "2802"
},
{
"name": "JavaScript",
"bytes": "602"
},
{
"name": "Python",
"bytes": "835539"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
import os
import csv
from ooni.settings import config
def load_input(file_input, file_output):
fw = open(file_output, "w+")
with open(file_input) as f:
csvreader = csv.reader(f)
csvreader.next()
for row in csvreader:
fw.write("%s\n" % row[0])
fw.close()
def generate_country_input(country_code, dst):
"""
Write to dst/citizenlab-urls-{country_code}.txt
the list for the given country code.
Returns:
the path to the generated input
"""
country_code = country_code.lower()
filename = os.path.join(dst, "citizenlab-urls-%s.txt" % country_code)
input_list = config.get_data_file_path("resources/"
"citizenlab-test-lists/"
"test-lists-master/lists/"
+ country_code + ".csv")
if not os.path.exists(input_list):
raise Exception("Could not find list for country %s" % country_code)
load_input(input_list, filename)
return filename
def generate_global_input(dst):
filename = os.path.join(dst, "citizenlab-urls-global.txt")
input_list = config.get_data_file_path("resources/"
"citizenlab-test-lists/"
"test-lists-master/lists/"
"global.csv")
load_input(input_list, filename)
return filename
|
{
"content_hash": "20a9ebe7fa221c55540885e8f4efdeba",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 76,
"avg_line_length": 28.442307692307693,
"alnum_prop": 0.5422582826233941,
"repo_name": "Karthikeyan-kkk/ooni-probe",
"id": "0a56ec09b3c2631437ec7359a5444dc8a2105aba",
"size": "1479",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ooni/deckgen/processors/citizenlab_test_lists.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "392"
},
{
"name": "Groff",
"bytes": "38424"
},
{
"name": "HTML",
"bytes": "3963"
},
{
"name": "JavaScript",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "3786"
},
{
"name": "Python",
"bytes": "505218"
},
{
"name": "Shell",
"bytes": "64266"
}
],
"symlink_target": ""
}
|
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: ' + str(v))
|
{
"content_hash": "e62c37b4e1bbbb7c9140bbfc1efa07c6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 41.888888888888886,
"alnum_prop": 0.7984084880636605,
"repo_name": "samcheck/PyMedia",
"id": "fb85fb6c9caf12d33f0e2dbbb36f7875463730bd",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_downgrade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1028"
},
{
"name": "HTML",
"bytes": "13163"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "49068"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
class SiteDetail(models.Model):
label = models.CharField(max_length=200)
key = models.CharField(max_length=200)
value = models.CharField(max_length=400)
pub_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.label
def __unicode__(self):
return unicode(self.value)
|
{
"content_hash": "04e4b8fa9166136e975a5a8c945c925d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 26,
"alnum_prop": 0.6990950226244343,
"repo_name": "501code/Fletcher-Street-Urban-Riding-Club",
"id": "043104272dfef221fb04296ada70530f3bc513cb",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site_details/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82"
},
{
"name": "HTML",
"bytes": "16847"
},
{
"name": "JavaScript",
"bytes": "40522"
},
{
"name": "Python",
"bytes": "19338"
}
],
"symlink_target": ""
}
|
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'regressiontests.syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
|
{
"content_hash": "8086db4ae9659f52a66e1dad0fc9d59e",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 151,
"avg_line_length": 40.94754098360656,
"alnum_prop": 0.6004483945872368,
"repo_name": "disqus/django-old",
"id": "66432981dd361f4908cb0b5cff928cd4e856f697",
"size": "12489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/syndication/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85749"
},
{
"name": "Python",
"bytes": "7413553"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
}
|
import logging
import tornado.escape
import tornado.gen
import tornado.httpclient
import tornado.ioloop
import tornado.locale
import tornado.web
import uimodules
ENTRIES = [
{
'name': 'one',
'per': 'm'
},
{
'name': 'two',
'per': 'm'
},
{
'name': 'three',
'per': 'o'
},
]
class PageHandler(tornado.web.RequestHandler):
def get(self):
logging.warning(self.request.path)
logging.warning(tornado.locale.get_supported_locales())
logging.warning(self.locale.name)
self.render('template.html', title='test', items=map(str, range(10)))
class WebUIHandler(tornado.web.RequestHandler):
def get(self):
entries = ENTRIES
self.render('home.html', entries=entries)
class WebHandler(tornado.web.RequestHandler):
def get(self):
entry = filter(lambda x: x['per'] is 'm', ENTRIES)[0]
self.render('entry.html', entry=entry)
if __name__ == '__main__':
settings = {
'debug': True,
'compiled_template_cache': False,
'ui_modules': uimodules
}
app = tornado.web.Application([
(r'/page', PageHandler),
(r'/ui', WebUIHandler),
(r'/web', WebHandler)
], **settings)
app.listen(9999)
logging.warning('start ..')
tornado.ioloop.IOLoop.current().start()
|
{
"content_hash": "a6cd12dc115923b275e386aff28916ce",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 20.073529411764707,
"alnum_prop": 0.5882783882783883,
"repo_name": "tao12345666333/Talk-Is-Cheap",
"id": "1a0b91a8b881a3a061e15a7a7f91cb3be1fe93a6",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tornado/simple/web.UI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103"
},
{
"name": "CSS",
"bytes": "21381"
},
{
"name": "Dockerfile",
"bytes": "1082"
},
{
"name": "Go",
"bytes": "8982"
},
{
"name": "HTML",
"bytes": "47807"
},
{
"name": "JavaScript",
"bytes": "88596"
},
{
"name": "Lua",
"bytes": "304"
},
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "PHP",
"bytes": "1858"
},
{
"name": "Perl",
"bytes": "1331"
},
{
"name": "Python",
"bytes": "253685"
},
{
"name": "Ruby",
"bytes": "1510"
},
{
"name": "Rust",
"bytes": "45"
},
{
"name": "Shell",
"bytes": "6975"
},
{
"name": "Smarty",
"bytes": "319"
},
{
"name": "Vue",
"bytes": "40435"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# $example on:init_session$
from pyspark.sql import SparkSession
# $example off:init_session$
# $example on:schema_inferring$
from pyspark.sql import Row
# $example off:schema_inferring$
# $example on:programmatic_schema$
# Import data types
from pyspark.sql.types import *
# $example off:programmatic_schema$
"""
A simple example demonstrating basic Spark SQL features.
Run with:
./bin/spark-submit examples/src/main/python/sql/basic.py
"""
def basic_df_example(spark):
# $example on:create_df$
# spark is an existing SparkSession
df = spark.read.json("examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:create_df$
# $example on:untyped_ops$
# spark, df are from the previous example
# Print the schema in a tree format
df.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Select only the "name" column
df.select("name").show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# Select everybody, but increment the age by 1
df.select(df['name'], df['age'] + 1).show()
# +-------+---------+
# | name|(age + 1)|
# +-------+---------+
# |Michael| null|
# | Andy| 31|
# | Justin| 20|
# +-------+---------+
# Select people older than 21
df.filter(df['age'] > 21).show()
# +---+----+
# |age|name|
# +---+----+
# | 30|Andy|
# +---+----+
# Count people by age
df.groupBy("age").count().show()
# +----+-----+
# | age|count|
# +----+-----+
# | 19| 1|
# |null| 1|
# | 30| 1|
# +----+-----+
# $example off:untyped_ops$
# $example on:run_sql$
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:run_sql$
# $example on:global_temp_view$
# Register the DataFrame as a global temporary view
df.createGlobalTempView("people")
# Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:global_temp_view$
def schema_inference_example(spark):
# $example on:schema_inferring$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
teenNames = teenagers.rdd.map(lambda p: "Name: " + p.name).collect()
for name in teenNames:
print(name)
# Name: Justin
# $example off:schema_inferring$
def programmatic_schema_example(spark):
# $example on:programmatic_schema$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
# Each line is converted to a tuple.
people = parts.map(lambda p: (p[0], p[1].strip()))
# The schema is encoded in a string.
schemaString = "name age"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = spark.createDataFrame(people, schema)
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = spark.sql("SELECT name FROM people")
results.show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# $example off:programmatic_schema$
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# $example off:init_session$
basic_df_example(spark)
schema_inference_example(spark)
programmatic_schema_example(spark)
spark.stop()
|
{
"content_hash": "9248980181aad39f0b2b5e846d47f9f7",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 97,
"avg_line_length": 27.663366336633665,
"alnum_prop": 0.5706871868289191,
"repo_name": "alec-heif/MIT-Thesis",
"id": "ebcf66995b4777bd696eb471d6003c82cf663932",
"size": "6373",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "spark-bin/examples/src/main/python/sql/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22888"
},
{
"name": "CSS",
"bytes": "1179"
},
{
"name": "HTML",
"bytes": "2208713"
},
{
"name": "Java",
"bytes": "419587"
},
{
"name": "Makefile",
"bytes": "6843"
},
{
"name": "Python",
"bytes": "2253974"
},
{
"name": "R",
"bytes": "284730"
},
{
"name": "Scala",
"bytes": "619538"
},
{
"name": "Shell",
"bytes": "64356"
}
],
"symlink_target": ""
}
|
import mock
from rally.plugins.openstack.scenarios.quotas import quotas
from tests.unit import test
class QuotasTestCase(test.ScenarioTestCase):
def setUp(self):
super(QuotasTestCase, self).setUp()
self.context.update({
"user": {
"tenant_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
def test_nova_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.nova_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
def test_nova_update_and_delete(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.nova_update_and_delete(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("nova", "fake")
def test_cinder_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.cinder_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
def test_cinder_update_and_delete(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.cinder_update_and_delete(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("cinder", "fake")
def test_neutron_update(self):
scenario = quotas.Quotas(self.context)
scenario._update_quotas = mock.MagicMock()
mock_quota_update_fn = self.admin_clients("neutron").update_quota
scenario.neutron_update(max_quota=1024)
scenario._update_quotas.assert_called_once_with("neutron", "fake",
1024,
mock_quota_update_fn)
|
{
"content_hash": "fc745b4ab75c5dc69ba839cefff40aeb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 40.09090909090909,
"alnum_prop": 0.6149659863945578,
"repo_name": "eayunstack/rally",
"id": "ac91619f63e93be1621dd0676f0cd66e176ac379",
"size": "2833",
"binary": false,
"copies": "4",
"ref": "refs/heads/product",
"path": "tests/unit/plugins/openstack/scenarios/quotas/test_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36716"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2988245"
},
{
"name": "Shell",
"bytes": "41128"
}
],
"symlink_target": ""
}
|
"""Common cgroups management routines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import logging
import os
from treadmill import fs
from treadmill.fs import linux as fs_linux
#: Base directory where we expect to find cgroups
CGROOT = '/sys/fs/cgroup'
#: Cgroups mount layout, modeled after after Red Hat 7.
WANTED_CGROUPS = {
'cpu': 'cpu,cpuacct',
'cpuacct': 'cpu,cpuacct',
'cpuset': 'cpuset',
'memory': 'memory',
'blkio': 'blkio',
'net_cls': 'net_cls,net_prio',
'net_prio': 'net_cls,net_prio',
}
#: Where to read kernel supported cgroups
_PROC_CGROUPS = '/proc/cgroups'
_PROC_CGROUP = '/proc/{}/cgroup'
_SUBSYSTEMS2MOUNTS = None
_LOGGER = logging.getLogger(__name__)
def read_mounted_cgroups(filter_by=CGROOT):
"""Read all the currently mounted cgroups and their mount points.
:params ``str`` filter_by:
Filter out cgroups mounted outside of this path. Set the None/'' to
obtain all mountpoints.
:returns:
``dict`` - Map of cgroup subsystems to their mountpoints list.
"""
availables = _available_subsystems()
mounts = fs_linux.list_mounts()
subsys2mnt = {}
for mount_entry in mounts:
if mount_entry.fs_type != 'cgroup':
continue
for opt in mount_entry.mnt_opts:
if opt in availables:
if not filter_by or mount_entry.target.startswith(filter_by):
subsys2mnt.setdefault(opt, []).append(mount_entry.target)
return subsys2mnt
def mounted_subsystems():
"""Return the cached cgroup subsystems to mount dict.
:returns:
``dict`` - CGroup subsystem to mountpoints list.
"""
# allow global variable to cache
global _SUBSYSTEMS2MOUNTS # pylint: disable=W0603
if _SUBSYSTEMS2MOUNTS is None:
_SUBSYSTEMS2MOUNTS = read_mounted_cgroups(filter_by=CGROOT)
return _SUBSYSTEMS2MOUNTS
def proc_cgroups(proc='self'):
"""Read a process' cgroups
:returns:
``dict`` - Dictionary of all the process' subsystem and cgroups.
"""
assert isinstance(proc, int) or '/' not in proc
cgroups = {}
with io.open(_PROC_CGROUP.format(proc), 'r') as f:
for cgroup_line in f:
(_id, subsys, path) = cgroup_line.strip().split(':', 2)
cgroups[subsys] = path
return cgroups
def makepath(subsystem, group, pseudofile=None):
"""Pieces together a full path of the cgroup.
"""
mountpoint = _get_mountpoint(subsystem)
group = group.strip('/')
if pseudofile:
return os.path.join(mountpoint, group, pseudofile)
return os.path.join(mountpoint, group)
def extractpath(path, subsystem, pseudofile=None):
"""Extract cgroup name from a cgroup path.
"""
mountpoint = _get_mountpoint(subsystem)
if not path.startswith(mountpoint):
raise ValueError('cgroup path does not start with %r' % mountpoint)
subpath = path[len(mountpoint):]
if pseudofile is None:
return subpath.strip('/')
elif not subpath.endswith(pseudofile):
raise ValueError('cgroup path not end with pseudofile %r' % pseudofile)
return subpath[:-len(pseudofile)].strip('/')
def create(subsystem, group):
"""Create a cgroup.
"""
fullpath = makepath(subsystem, group)
return fs.mkdir_safe(fullpath)
def delete(subsystem, group):
"""Delete cgroup (and all sub-cgroups).
"""
fullpath = makepath(subsystem, group)
os.rmdir(fullpath)
def set_value(subsystem, group, pseudofile, value):
"""Set value in cgroup pseudofile"""
fullpath = makepath(subsystem, group, pseudofile)
# Make sure we have utf8 strings
if hasattr(value, 'decode'):
value = value.decode()
value = '{}'.format(value)
_LOGGER.debug('setting %s => %s', fullpath, value)
with io.open(fullpath, 'w') as f:
f.write(value)
def get_data(subsystem, group, pseudofile):
"""Reads the data of cgroup parameter."""
fullpath = makepath(subsystem, group, pseudofile)
with io.open(fullpath, 'r') as f:
return f.read().strip()
def get_value(subsystem, group, pseudofile):
"""Reads the data and convert to value of cgroup parameter.
returns: int
"""
data = get_data(subsystem, group, pseudofile)
try:
return _safe_int(data)
except ValueError:
_LOGGER.exception('Invalid data from %s:/%s[%s]: %r',
subsystem, group, pseudofile, data)
return 0
def join(subsystem, group, pid=None):
"""Move process into a specific cgroup"""
if pid is None:
pid = os.getpid()
return set_value(subsystem, group, 'tasks', pid)
def inherit_value(subsystem, group, pseudofile):
"""Inherit value from parent group.
"""
parent_group = os.path.dirname(group)
parent_value = get_data(subsystem, parent_group, pseudofile)
set_value(subsystem, group, pseudofile, parent_value)
def _get_mountpoint(subsystem):
"""Returns mountpoint of a particular subsystem.
"""
mounts = mounted_subsystems()
return mounts[subsystem][0]
def _available_subsystems():
"""Get set of available cgroup subsystems.
"""
subsystems = []
with io.open(_PROC_CGROUPS, 'r') as cgroups:
for cgroup in cgroups:
(
subsys_name,
_hierarchy,
_num_cgroups,
enabled
) = cgroup.split()
if subsys_name[0] != '#' and enabled == '1':
subsystems.append(subsys_name)
return subsystems
def _safe_int(num_str):
"""Safely parse a value from cgroup pseudofile into an int.
"""
# Values read in cgroups could have multiple lines.
value = int(num_str.split('\n')[0].strip(), base=10)
# not able to have value less than 0
if value < 0:
value = 0
return value
|
{
"content_hash": "67ca4529a5fd22a1fa42a1d41d585865",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 26.700892857142858,
"alnum_prop": 0.6326701220531684,
"repo_name": "ceache/treadmill",
"id": "078501ad68ff4eeafc7889f044308e6c71e94f42",
"size": "5981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/cgroups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3362298"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
}
|
"""Tests for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
INPUT_MATRIX = np.array(
[[0.1, 0.0, 0.2, 0.0, 0.4, 0.5, 0.0],
[0.0, 1.1, 0.0, 1.3, 1.4, 0.0, 1.6],
[2.0, 0.0, 0.0, 2.3, 0.0, 2.5, 0.0],
[3.0, 0.0, 3.2, 3.3, 0.0, 3.5, 0.0],
[0.0, 4.1, 0.0, 0.0, 4.4, 0.0, 4.6]]).astype(np.float32)
def np_matrix_to_tf_sparse(np_matrix, row_slices=None,
col_slices=None, transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate(
(np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if transpose else
np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)), 1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array(
[max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64) if transpose
else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return tf.SparseTensor(ind, val, shape)
def sparse_input():
return np_matrix_to_tf_sparse(INPUT_MATRIX)
class WalsModelTest(tf.test.TestCase):
def setUp(self):
self.col_init = [
# shard 0
[[-0.36444709, -0.39077035, -0.32528427],
[1.19056475, 0.07231052, 2.11834812],
[0.93468881, -0.71099287, 1.91826844]],
# shard 1
[[1.18160152, 1.52490723, -0.50015002],
[1.82574749, -0.57515913, -1.32810032]],
# shard 2
[[-0.15515432, -0.84675711, 0.13097958],
[-0.9246484, 0.69117504, 1.2036494]]
]
self.row_wts = [[0.1, 0.2, 0.3], [0.4, 0.5]]
self.col_wts = [[0.1, 0.2, 0.3],
[0.4, 0.5],
[0.6, 0.7]]
self._wals_inputs = sparse_input()
# Values of factor shards after running one iteration of row and column
# updates.
self._row_factors_0 = [[0.097689, -0.219293, -0.020780],
[0.50842, 0.64626, 0.22364],
[0.401159, -0.046558, -0.192854]]
self._row_factors_1 = [[1.20597, -0.48025, 0.35582],
[1.5564, 1.2528, 1.0528]]
self._col_factors_0 = [[2.4725, -1.2950, -1.9980],
[0.44625, 1.50771, 1.27118],
[1.39801, -2.10134, 0.73572]]
self._col_factors_1 = [[3.36509, -0.66595, -3.51208],
[0.57191, 1.59407, 1.33020]]
self._col_factors_2 = [[3.3459, -1.3341, -3.3008],
[0.57366, 1.83729, 1.26798]]
def _run_test_process_input(self, use_factors_weights_cache):
with self.test_session():
sp_feeder = tf.sparse_placeholder(tf.float32)
wals_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple sparse tensors with scattered rows. Note that
# this split can be different than the factor sharding and the inputs can
# consist of non-consecutive rows. Each row needs to include all non-zero
# elements in that row.
sp_r0 = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 2]).eval()
sp_r1 = np_matrix_to_tf_sparse(INPUT_MATRIX, [1, 4], shuffle=True).eval()
sp_r2 = np_matrix_to_tf_sparse(INPUT_MATRIX, [3], shuffle=True).eval()
input_scattered_rows = [sp_r0, sp_r1, sp_r2]
# Test updating row factors.
# Here we feed in scattered rows of the input.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder,
transpose_input=False)[1]
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Split input into multiple sparse tensors with scattered columns. Note
# that here the elements in the sparse tensors are not ordered and also
# do not need to consist of consecutive columns. However, each column
# needs to include all non-zero elements in that column.
sp_c0 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0]).eval()
sp_c1 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[5, 3, 1],
shuffle=True).eval()
sp_c2 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 6]).eval()
sp_c3 = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[3, 6],
shuffle=True).eval()
input_scattered_cols = [sp_c0, sp_c1, sp_c2, sp_c3]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder,
transpose_input=False)[1]
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
def _run_test_process_input_transposed(self, use_factors_weights_cache):
with self.test_session():
sp_feeder = tf.sparse_placeholder(tf.float32)
wals_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
num_row_shards=2,
num_col_shards=3,
regularization=0.01,
unobserved_weight=0.1,
col_init=self.col_init,
row_weights=self.row_wts,
col_weights=self.col_wts,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
# Split input into multiple SparseTensors with scattered rows.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_r0_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [0, 3],
transpose=True).eval()
sp_r1_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [4, 1],
shuffle=True, transpose=True).eval()
sp_r2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [2], transpose=True).eval()
sp_r3_t = sp_r1_t
input_scattered_rows = [sp_r0_t, sp_r1_t, sp_r2_t, sp_r3_t]
# Test updating row factors.
# Here we feed in scattered rows of the input.
# Note that the needed suffix of placeholder are in the order of test
# case name lexicographical order and then in the line order of where
# they appear.
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder,
transpose_input=True)[1]
for inp in input_scattered_rows:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
row_factors = [x.eval() for x in wals_model.row_factors]
self.assertAllClose(row_factors[0], self._row_factors_0, atol=1e-3)
self.assertAllClose(row_factors[1], self._row_factors_1, atol=1e-3)
# Split input into multiple SparseTensors with scattered columns.
# Here the inputs are transposed. But the same constraints as described in
# the previous non-transposed test case apply to these inputs (before they
# are transposed).
sp_c0_t = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[0, 1],
transpose=True).eval()
sp_c1_t = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[4, 2],
transpose=True).eval()
sp_c2_t = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[5],
transpose=True, shuffle=True).eval()
sp_c3_t = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[3, 6],
transpose=True).eval()
sp_c4_t = sp_c2_t
input_scattered_cols = [sp_c0_t, sp_c1_t, sp_c2_t, sp_c3_t,
sp_c4_t]
# Test updating column factors.
# Here we feed in scattered columns of the input.
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder,
transpose_input=True)[1]
for inp in input_scattered_cols:
feed_dict = {sp_feeder: inp}
process_input_op.run(feed_dict=feed_dict)
col_factors = [x.eval() for x in wals_model.col_factors]
self.assertAllClose(col_factors[0], self._col_factors_0, atol=1e-3)
self.assertAllClose(col_factors[1], self._col_factors_1, atol=1e-3)
self.assertAllClose(col_factors[2], self._col_factors_2, atol=1e-3)
# Note that when row_weights and col_weights are 0, WALS gives dentical
# results as ALS (Alternating Least Squares). However our implementation does
# not handle the case of zero weights differently. Instead, when row_weights
# and col_weights are set to None, we interpret that as the ALS case, and
# trigger the more efficient ALS updates.
# Here we test that those two give identical results.
def _run_test_als(self, use_factors_weights_cache):
with self.test_session():
col_init = np.random.rand(7, 3)
als_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors1 = [x.eval() for x in als_model.row_factors]
wals_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=0,
col_weights=0,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(self._wals_inputs)[1]
process_input_op.run()
row_factors2 = [x.eval() for x in wals_model.row_factors]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
# Here we test partial column updates.
sp_c = np_matrix_to_tf_sparse(INPUT_MATRIX, col_slices=[2, 0],
shuffle=True).eval()
sp_feeder = tf.sparse_placeholder(tf.float32)
feed_dict = {sp_feeder: sp_c}
als_model.col_update_prep_gramian_op.run()
als_model.initialize_col_update_op.run()
process_input_op = als_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors1 = [x.eval() for x in als_model.col_factors]
feed_dict = {sp_feeder: sp_c}
wals_model.col_update_prep_gramian_op.run()
wals_model.initialize_col_update_op.run()
process_input_op = wals_model.update_col_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
col_factors2 = [x.eval() for x in wals_model.col_factors]
for c1, c2 in zip(col_factors1, col_factors2):
self.assertAllClose(c1, c2, rtol=5e-3, atol=1e-2)
def _run_test_als_transposed(self, use_factors_weights_cache):
with self.test_session():
col_init = np.random.rand(7, 3)
als_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
als_model.initialize_op.run()
als_model.worker_init.run()
wals_model = tf.contrib.factorization.WALSModel(
5, 7, 3,
col_init=col_init,
row_weights=[0] * 5,
col_weights=[0] * 7,
use_factors_weights_cache=use_factors_weights_cache)
wals_model.initialize_op.run()
wals_model.worker_init.run()
sp_feeder = tf.sparse_placeholder(tf.float32)
# Here test partial row update with identical inputs but with transposed
# input for als.
sp_r_t = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1],
transpose=True).eval()
sp_r = np_matrix_to_tf_sparse(INPUT_MATRIX, [3, 1]).eval()
feed_dict = {sp_feeder: sp_r_t}
als_model.row_update_prep_gramian_op.run()
als_model.initialize_row_update_op.run()
process_input_op = als_model.update_row_factors(sp_input=sp_feeder,
transpose_input=True)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors1 = [als_model.row_factors[0].eval()[1],
als_model.row_factors[0].eval()[3]]
feed_dict = {sp_feeder: sp_r}
wals_model.row_update_prep_gramian_op.run()
wals_model.initialize_row_update_op.run()
process_input_op = wals_model.update_row_factors(sp_input=sp_feeder)[1]
process_input_op.run(feed_dict=feed_dict)
# Only updated row 1 and row 3, so only compare these rows since others
# have randomly initialized values.
row_factors2 = [wals_model.row_factors[0].eval()[1],
wals_model.row_factors[0].eval()[3]]
for r1, r2 in zip(row_factors1, row_factors2):
self.assertAllClose(r1, r2, atol=1e-3)
def simple_train(self,
model,
inp,
num_iterations):
"""Helper function to train model on inp for num_iterations."""
row_update_op = model.update_row_factors(sp_input=inp)[1]
col_update_op = model.update_col_factors(sp_input=inp)[1]
model.initialize_op.run()
model.worker_init.run()
for _ in xrange(num_iterations):
model.row_update_prep_gramian_op.run()
model.initialize_row_update_op.run()
row_update_op.run()
model.col_update_prep_gramian_op.run()
model.initialize_col_update_op.run()
col_update_op.run()
# Trains an ALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_als(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with self.test_session():
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = tf.SparseTensor(indices, values, [rows, cols])
model = tf.contrib.factorization.WALSModel(
rows, cols, dims,
regularization=1e-5,
row_weights=None,
col_weights=None,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01, atol=0.01)
# Trains a WALS model for a low-rank matrix and make sure the product of
# factors is close to the original input.
def _run_test_train_full_low_rank_wals(self, use_factors_weights_cache):
rows = 15
cols = 11
dims = 3
with self.test_session():
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = [[i, j] for i in xrange(rows) for j in xrange(cols)]
values = data.reshape(-1)
inp = tf.SparseTensor(indices, values, [rows, cols])
model = tf.contrib.factorization.WALSModel(
rows, cols, dims,
regularization=1e-5,
row_weights=0,
col_weights=[0] * cols,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
self.assertAllClose(data,
np.dot(row_factor, np.transpose(col_factor)),
rtol=0.01, atol=0.01)
# Trains a WALS model for a partially observed low-rank matrix and makes
# sure the product of factors is reasonably close to the original input.
def _run_test_train_matrix_completion_wals(self, use_factors_weights_cache):
rows = 11
cols = 9
dims = 4
def keep_index(x):
return not (x[0] + x[1]) % 4
with self.test_session():
row_wts = 0.1 + np.random.rand(rows)
col_wts = 0.1 + np.random.rand(cols)
data = np.dot(np.random.rand(rows, 3),
np.random.rand(3, cols)).astype(np.float32) / 3.0
indices = np.array(
list(filter(keep_index,
[[i, j] for i in xrange(rows) for j in xrange(cols)])))
values = data[indices[:, 0], indices[:, 1]]
inp = tf.SparseTensor(indices, values, [rows, cols])
model = tf.contrib.factorization.WALSModel(
rows, cols, dims,
unobserved_weight=0.01,
regularization=0.001,
row_weights=row_wts,
col_weights=col_wts,
use_factors_weights_cache=use_factors_weights_cache)
self.simple_train(model, inp, 25)
row_factor = model.row_factors[0].eval()
col_factor = model.col_factors[0].eval()
out = np.dot(row_factor, np.transpose(col_factor))
for i in xrange(rows):
for j in xrange(cols):
if keep_index([i, j]):
self.assertNear(data[i][j], out[i][j],
err=0.4, msg="%d, %d" % (i, j))
else:
self.assertNear(0, out[i][j], err=0.5, msg="%d, %d" % (i, j))
def test_process_input_with_cache(self):
self._run_test_process_input(True)
def test_process_input_without_cache(self):
self._run_test_process_input(False)
def test_process_input_transposed_with_cache(self):
self._run_test_process_input_transposed(True)
def test_process_input_transposed_without_cache(self):
self._run_test_process_input_transposed(False)
def test_als_with_cache(self):
self._run_test_als(True)
def test_als_without_cache(self):
self._run_test_als(False)
def test_als_transposed_with_cache(self):
self._run_test_als_transposed(True)
def test_als_without_cache(self):
self._run_test_als_transposed(False)
def test_train_full_low_rank_wals_with_cache(self):
self._run_test_train_full_low_rank_wals(True)
def test_train_full_low_rank_wals_without_cache(self):
self._run_test_train_full_low_rank_wals(False)
def test_train_matrix_completion_wals_with_cache(self):
self._run_test_train_matrix_completion_wals(True)
def test_train_matrix_completion_wals_without_cache(self):
self._run_test_train_matrix_completion_wals(False)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "d7b7074c3bef952b4299842caff19bab",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 80,
"avg_line_length": 41.90891089108911,
"alnum_prop": 0.6038083538083538,
"repo_name": "mrry/tensorflow",
"id": "6a8a52f96da9a1ac76a7ef0827e846983d006384",
"size": "21854",
"binary": false,
"copies": "4",
"ref": "refs/heads/windows",
"path": "tensorflow/contrib/factorization/python/ops/factorization_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88579"
},
{
"name": "C++",
"bytes": "12927212"
},
{
"name": "CMake",
"bytes": "66937"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "42531"
},
{
"name": "HTML",
"bytes": "1171692"
},
{
"name": "Java",
"bytes": "51034"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833435"
},
{
"name": "Makefile",
"bytes": "23439"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "136850"
},
{
"name": "Python",
"bytes": "11873711"
},
{
"name": "Shell",
"bytes": "267180"
},
{
"name": "TypeScript",
"bytes": "675176"
}
],
"symlink_target": ""
}
|
import copy
import fixtures
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import api as dbapi
from neutron.db import flavors_db
from neutron.db import servicetype_db
from neutron.extensions import flavors
from neutron.plugins.common import constants
from neutron.services.flavors import flavors_plugin
from neutron.services import provider_configuration as provconf
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import base as extension
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
_driver = ('neutron.tests.unit.extensions.test_flavors.'
'DummyServiceDriver')
_provider = 'dummy'
_long_name = 'x' * (attr.NAME_MAX_LEN + 1)
_long_description = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN + 1)
class FlavorExtensionTestCase(extension.ExtensionTestCase):
def setUp(self):
super(FlavorExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron.services.flavors.flavors_plugin.FlavorsPlugin',
constants.FLAVORS, flavors.RESOURCE_ATTRIBUTE_MAP,
flavors.Flavors, '', supported_extension_aliases='flavors')
def test_create_flavor(self):
tenant_id = uuidutils.generate_uuid()
# Use service_type FLAVORS since plugin must be loaded to validate
data = {'flavor': {'name': 'GOLD',
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
expected = copy.deepcopy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.create_flavor.return_value = expected['flavor']
res = self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flavor.assert_called_with(mock.ANY,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_create_flavor_invalid_service_type(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': 'GOLD',
'service_type': 'BROKEN',
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_name(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': _long_description,
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': 'BROKEN'}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': 'the best flavor',
'enabled': True}}
expected = copy.copy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.update_flavor.return_value = expected['flavor']
res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.update_flavor.assert_called_with(mock.ANY,
flavor_id,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_update_flavor_too_long_name(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': _long_name,
'description': 'the best flavor',
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_too_long_description(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_invalid_enabled(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': 'BROKEN'}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_flavor(self):
flavor_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_flavor.assert_called_with(mock.ANY,
flavor_id)
def test_show_flavor(self):
flavor_id = 'fake_id'
expected = {'flavor': {'id': flavor_id,
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_flavors(self):
data = {'flavors': [{'id': 'id1',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']},
{'id': 'id2',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
res = self.api.get(_get_path('flavors', fmt=self.fmt))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(data, res)
def test_create_service_profile(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
instance = self.plugin.return_value
instance.create_service_profile.return_value = (
expected['service_profile'])
res = self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_service_profile.assert_called_with(
mock.ANY,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_create_service_profile_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': _long_description,
'driver': '',
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_too_long_driver(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': _long_description,
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'enabled': 'BROKEN',
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': False,
'metainfo': '{"data1": "value3"}'}}
instance = self.plugin.return_value
instance.update_service_profile.return_value = (
expected['service_profile'])
res = self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.update_service_profile.assert_called_with(
mock.ANY,
sp_id,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_update_service_profile_too_long_description(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile_invalid_enabled(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_service_profile(self):
sp_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_service_profile.assert_called_with(mock.ANY,
sp_id)
def test_show_service_profile(self):
sp_id = 'fake_id'
expected = {'service_profile': {'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}}
instance = self.plugin.return_value
instance.get_service_profile.return_value = (
expected['service_profile'])
res = self.api.get(_get_path('service_profiles',
id=sp_id, fmt=self.fmt))
instance.get_service_profile.assert_called_with(mock.ANY,
sp_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_service_profiles(self):
expected = {'service_profiles': [{'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True},
{'id': 'id2',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}]}
instance = self.plugin.return_value
instance.get_service_profiles.return_value = (
expected['service_profiles'])
res = self.api.get(_get_path('service_profiles', fmt=self.fmt))
instance.get_service_profiles.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_associate_service_profile_with_flavor(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'id': _uuid(),
'tenant_id': tenant_id}}
instance = self.plugin.return_value
instance.create_flavor_service_profile.return_value = (
expected['service_profile'])
res = self.api.post('/flavors/fl_id/service_profiles',
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_flavor_service_profile.assert_called_with(
mock.ANY, service_profile=expected, flavor_id='fl_id')
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_disassociate_service_profile_with_flavor(self):
instance = self.plugin.return_value
instance.delete_flavor_service_profile.return_value = None
self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
content_type='application/%s' % self.fmt)
instance.delete_flavor_service_profile.assert_called_with(
mock.ANY,
'fake_spid',
flavor_id='fl_id')
def test_update_association_error(self):
"""Confirm that update is not permitted with user error."""
new_id = uuidutils.generate_uuid()
data = {'service_profile': {'id': new_id}}
self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
class DummyCorePlugin(object):
pass
class DummyServicePlugin(object):
def driver_loaded(self, driver, service_profile):
pass
def get_plugin_type(self):
return constants.DUMMY
def get_plugin_description(self):
return "Dummy service plugin, aware of flavors"
class DummyServiceDriver(object):
@staticmethod
def get_service_type():
return constants.DUMMY
def __init__(self, plugin):
pass
class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
base.PluginFixture):
def setUp(self):
super(FlavorPluginTestCase, self).setUp()
self.config_parse()
cfg.CONF.set_override(
'core_plugin',
'neutron.tests.unit.extensions.test_flavors.DummyCorePlugin')
cfg.CONF.set_override(
'service_plugins',
['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin'])
self.useFixture(
fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
self.plugin = flavors_plugin.FlavorsPlugin()
self.ctx = context.get_admin_context()
providers = [DummyServiceDriver.get_service_type() +
":" + _provider + ":" + _driver]
self.service_manager = servicetype_db.ServiceTypeManager.get_instance()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
self.service_providers.return_value = providers
for provider in providers:
self.service_manager.add_provider_configuration(
provider.split(':')[0], provconf.ProviderConfiguration())
dbapi.get_engine()
def _create_flavor(self, description=None):
flavor = {'flavor': {'name': 'GOLD',
'service_type': constants.DUMMY,
'description': description or 'the best flavor',
'enabled': True}}
return self.plugin.create_flavor(self.ctx, flavor), flavor
def test_create_flavor(self):
self._create_flavor()
res = self.ctx.session.query(flavors_db.Flavor).all()
self.assertEqual(1, len(res))
self.assertEqual('GOLD', res[0]['name'])
self.assertEqual(constants.DUMMY, res[0]['service_type'])
def test_update_flavor(self):
fl, flavor = self._create_flavor()
flavor = {'flavor': {'name': 'Silver',
'enabled': False}}
self.plugin.update_flavor(self.ctx, fl['id'], flavor)
res = (self.ctx.session.query(flavors_db.Flavor).
filter_by(id=fl['id']).one())
self.assertEqual('Silver', res['name'])
self.assertFalse(res['enabled'])
def test_delete_flavor(self):
fl, data = self._create_flavor()
self.plugin.delete_flavor(self.ctx, fl['id'])
res = (self.ctx.session.query(flavors_db.Flavor).all())
self.assertFalse(res)
def test_show_flavor(self):
fl, data = self._create_flavor()
show_fl = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(fl, show_fl)
def test_get_flavors(self):
fl, flavor = self._create_flavor()
flavor['flavor']['name'] = 'SILVER'
self.plugin.create_flavor(self.ctx, flavor)
show_fl = self.plugin.get_flavors(self.ctx)
self.assertEqual(2, len(show_fl))
def _create_service_profile(self, description=None):
data = {'service_profile':
{'description': description or 'the best sp',
'driver': _driver,
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
return sp, data
def test_create_service_profile(self):
sp, data = self._create_service_profile()
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['driver'], res['driver'])
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_create_service_profile_empty_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['driver'], res['driver'])
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_create_service_profile_invalid_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': "Broken",
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.assertRaises(flavors.ServiceProfileDriverNotFound,
self.plugin.create_service_profile,
self.ctx,
data)
def test_create_service_profile_invalid_empty(self):
data = {'service_profile':
{'description': '',
'driver': '',
'enabled': True,
'metainfo': ''}}
self.assertRaises(flavors.ServiceProfileEmpty,
self.plugin.create_service_profile,
self.ctx,
data)
def test_update_service_profile(self):
sp, data = self._create_service_profile()
data['service_profile']['metainfo'] = '{"data": "value1"}'
sp = self.plugin.update_service_profile(self.ctx, sp['id'],
data)
res = (self.ctx.session.query(flavors_db.ServiceProfile).
filter_by(id=sp['id']).one())
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_delete_service_profile(self):
sp, data = self._create_service_profile()
self.plugin.delete_service_profile(self.ctx, sp['id'])
res = self.ctx.session.query(flavors_db.ServiceProfile).all()
self.assertFalse(res)
def test_show_service_profile(self):
sp, data = self._create_service_profile()
sp_show = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(sp, sp_show)
def test_get_service_profiles(self):
self._create_service_profile()
self._create_service_profile(description='another sp')
self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx)))
def test_associate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertEqual(fl['id'], binding['flavor_id'])
self.assertEqual(sp['id'], binding['service_profile_id'])
res = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(1, len(res['service_profiles']))
self.assertEqual(sp['id'], res['service_profiles'][0])
res = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(1, len(res['flavors']))
self.assertEqual(fl['id'], res['flavors'][0])
def test_autodelete_flavor_associations(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor(self.ctx, fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertIsNone(binding)
def test_associate_service_profile_with_flavor_exists(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(flavors.FlavorServiceProfileBindingExists,
self.plugin.create_flavor_service_profile,
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
def test_disassociate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor_service_profile(
self.ctx, sp['id'], fl['id'])
binding = (
self.ctx.session.query(flavors_db.FlavorServiceProfileBinding).
first())
self.assertIsNone(binding)
self.assertRaises(
flavors.FlavorServiceProfileBindingNotFound,
self.plugin.delete_flavor_service_profile,
self.ctx, sp['id'], fl['id'])
def test_delete_service_profile_in_use(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileInUse,
self.plugin.delete_service_profile,
self.ctx,
sp['id'])
def test_get_flavor_next_provider_no_binding(self):
fl, data = self._create_flavor()
self.assertRaises(
flavors.FlavorServiceProfileBindingNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_disabled(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': _driver,
'enabled': False,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileDisabled,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_no_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flavors.ServiceProfileDriverNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
providers = self.plugin.get_flavor_next_provider(
self.ctx,
fl['id'])
self.assertEqual(_provider, providers[0].get('provider', None))
|
{
"content_hash": "59bded3da2c771a663b14c0172fd9c04",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 79,
"avg_line_length": 43.34943181818182,
"alnum_prop": 0.516383773510715,
"repo_name": "chitr/neutron",
"id": "bcc1eec8b604f7e776f6eb7c3a38f5509a86c9bc",
"size": "31096",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/extensions/test_flavors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7647002"
},
{
"name": "Shell",
"bytes": "13342"
}
],
"symlink_target": ""
}
|
import base64
import os
from Plugin import PluginManager
from Crypt import CryptBitcoin
from lib.pybitcointools import bitcoin as btctools
import CryptMessage
@PluginManager.registerTo("UiWebsocket")
class UiWebsocketPlugin(object):
def encrypt(self, text, publickey):
encrypted = CryptMessage.encrypt(text, CryptMessage.toOpensslPublickey(publickey))
return encrypted
def decrypt(self, encrypted, privatekey):
back = CryptMessage.getEcc(privatekey).decrypt(encrypted)
return back.decode("utf8")
# - Actions -
# Returns user's public key unique to site
# Return: Public key
def actionUserPublickey(self, to, index=0):
publickey = self.user.getEncryptPublickey(self.site.address, index)
self.response(to, publickey)
# Encrypt a text using the publickey or user's sites unique publickey
# Return: Encrypted text using base64 encoding
def actionEciesEncrypt(self, to, text, publickey=0, return_aes_key=False):
if type(publickey) is int: # Encrypt using user's publickey
publickey = self.user.getEncryptPublickey(self.site.address, publickey)
aes_key, encrypted = self.encrypt(text.encode("utf8"), publickey.decode("base64"))
if return_aes_key:
self.response(to, [base64.b64encode(encrypted), base64.b64encode(aes_key)])
else:
self.response(to, base64.b64encode(encrypted))
# Decrypt a text using privatekey or the user's site unique private key
# Return: Decrypted text or list of decrypted texts
def actionEciesDecrypt(self, to, param, privatekey=0):
if type(privatekey) is int: # Decrypt using user's privatekey
privatekey = self.user.getEncryptPrivatekey(self.site.address, privatekey)
if type(param) == list:
encrypted_texts = param
else:
encrypted_texts = [param]
texts = [] # Decoded texts
for encrypted_text in encrypted_texts:
try:
text = self.decrypt(encrypted_text.decode("base64"), privatekey)
texts.append(text)
except Exception, err:
texts.append(None)
if type(param) == list:
self.response(to, texts)
else:
self.response(to, texts[0])
# Encrypt a text using AES
# Return: Iv, AES key, Encrypted text
def actionAesEncrypt(self, to, text, key=None, iv=None):
from lib import pyelliptic
if key:
key = key.decode("base64")
else:
key = os.urandom(32)
if iv: # Generate new AES key if not definied
iv = iv.decode("base64")
else:
iv = pyelliptic.Cipher.gen_IV('aes-256-cbc')
if text:
encrypted = pyelliptic.Cipher(key, iv, 1, ciphername='aes-256-cbc').ciphering(text.encode("utf8"))
else:
encrypted = ""
self.response(to, [base64.b64encode(key), base64.b64encode(iv), base64.b64encode(encrypted)])
# Decrypt a text using AES
# Return: Decrypted text
def actionAesDecrypt(self, to, *args):
from lib import pyelliptic
if len(args) == 3: # Single decrypt
encrypted_texts = [(args[0], args[1])]
keys = [args[2]]
else: # Batch decrypt
encrypted_texts, keys = args
texts = [] # Decoded texts
for iv, encrypted_text in encrypted_texts:
encrypted_text = encrypted_text.decode("base64")
iv = iv.decode("base64")
text = None
for key in keys:
ctx = pyelliptic.Cipher(key.decode("base64"), iv, 0, ciphername='aes-256-cbc')
try:
decrypted = ctx.ciphering(encrypted_text)
if decrypted and decrypted.decode("utf8"): # Valid text decoded
text = decrypted
except Exception, err:
pass
texts.append(text)
if len(args) == 3:
self.response(to, texts[0])
else:
self.response(to, texts)
@PluginManager.registerTo("User")
class UserPlugin(object):
def getEncryptPrivatekey(self, address, param_index=0):
assert param_index >= 0 and param_index <= 1000
site_data = self.getSiteData(address)
if site_data.get("cert"): # Different privatekey for different cert provider
index = param_index + self.getAddressAuthIndex(site_data["cert"])
else:
index = param_index
if "encrypt_privatekey_%s" % index not in site_data:
address_index = self.getAddressAuthIndex(address)
crypt_index = address_index + 1000 + index
site_data["encrypt_privatekey_%s" % index] = CryptBitcoin.hdPrivatekey(self.master_seed, crypt_index)
self.log.debug("New encrypt privatekey generated for %s:%s" % (address, index))
return site_data["encrypt_privatekey_%s" % index]
def getEncryptPublickey(self, address, param_index=0):
assert param_index >= 0 and param_index <= 1000
site_data = self.getSiteData(address)
if site_data.get("cert"): # Different privatekey for different cert provider
index = param_index + self.getAddressAuthIndex(site_data["cert"])
else:
index = param_index
if "encrypt_publickey_%s" % index not in site_data:
privatekey = self.getEncryptPrivatekey(address, param_index)
publickey = btctools.encode_pubkey(btctools.privtopub(privatekey), "bin_compressed")
site_data["encrypt_publickey_%s" % index] = base64.b64encode(publickey)
return site_data["encrypt_publickey_%s" % index]
|
{
"content_hash": "4c77d10a94201850714f9d26afac7f09",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 113,
"avg_line_length": 38.52348993288591,
"alnum_prop": 0.6179442508710802,
"repo_name": "kustomzone/Fuzium",
"id": "0302c83ac61e830de9b7979e9b21b5fc44abd1d6",
"size": "5740",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "core/plugins/CryptMessage/CryptMessagePlugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1204"
},
{
"name": "C",
"bytes": "34092"
},
{
"name": "CSS",
"bytes": "373182"
},
{
"name": "CoffeeScript",
"bytes": "88917"
},
{
"name": "HTML",
"bytes": "123191"
},
{
"name": "JavaScript",
"bytes": "2133526"
},
{
"name": "Python",
"bytes": "2843920"
},
{
"name": "Shell",
"bytes": "898"
}
],
"symlink_target": ""
}
|
"""Scheduling learning rate."""
import logging
from math import cos, pi
class LRScheduler(object):
"""Base class of a learning rate scheduler.
A scheduler returns a new learning rate based on the number of updates that have
been performed.
Parameters
----------
base_lr : float, optional
The initial learning rate.
warmup_steps: int
number of warmup steps used before this scheduler starts decay
warmup_begin_lr: float
if using warmup, the learning rate from which it starts warming up
warmup_mode: string
warmup can be done in two modes.
'linear' mode gradually increases lr with each step in equal increments
'constant' mode keeps lr at warmup_begin_lr for warmup_steps
"""
def __init__(self, base_lr=0.01,
warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'):
self.base_lr = base_lr
assert isinstance(warmup_steps, int)
self.warmup_steps = warmup_steps
self.warmup_final_lr = base_lr
self.warmup_begin_lr = warmup_begin_lr
if self.warmup_begin_lr > self.warmup_final_lr:
raise ValueError("Base lr has to be higher than warmup_begin_lr")
if self.warmup_steps < 0:
raise ValueError("Warmup steps has to be positive or 0")
if warmup_mode not in ['linear', 'constant']:
raise ValueError("Supports only linear and constant modes of warmup")
self.warmup_mode = warmup_mode
def get_warmup_lr(self, num_update):
assert num_update < self.warmup_steps
if self.warmup_mode == 'linear':
increase = (self.warmup_final_lr - self.warmup_begin_lr) \
* float(num_update) / float(self.warmup_steps)
return self.warmup_begin_lr + increase
elif self.warmup_mode == 'constant':
return self.warmup_begin_lr
else:
raise ValueError("Invalid warmup mode %s"%self.warmup_mode)
def __call__(self, num_update):
"""Return a new learning rate.
The ``num_update`` is the upper bound of the number of updates applied to
every weight.
Assume the optimizer has updated *i*-th weight by *k_i* times, namely
``optimizer.update(i, weight_i)`` is called by *k_i* times. Then::
num_update = max([k_i for all i])
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
raise NotImplementedError("must override this")
class FactorScheduler(LRScheduler):
"""Reduce the learning rate by a factor for every *n* steps.
It returns a new learning rate by::
base_lr * pow(factor, floor(num_update/step))
Parameters
----------
step : int
Changes the learning rate for every n updates.
factor : float, optional
The factor to change the learning rate.
stop_factor_lr : float, optional
Stop updating the learning rate if it is less than this value.
"""
def __init__(self, step, factor=1, stop_factor_lr=1e-8, base_lr=0.01,
warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'):
super(FactorScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode)
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.factor = factor
self.stop_factor_lr = stop_factor_lr
self.count = 0
def __call__(self, num_update):
if num_update < self.warmup_steps:
return self.get_warmup_lr(num_update)
# NOTE: use while rather than if (for continuing training via load_epoch)
while num_update > self.count + self.step:
self.count += self.step
self.base_lr *= self.factor
if self.base_lr < self.stop_factor_lr:
self.base_lr = self.stop_factor_lr
logging.info("Update[%d]: now learning rate arrived at %0.5e, will not "
"change in the future", num_update, self.base_lr)
else:
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
class MultiFactorScheduler(LRScheduler):
"""Reduce the learning rate by given a list of steps.
Assume there exists *k* such that::
step[k] <= num_update and num_update < step[k+1]
Then calculate the new learning rate by::
base_lr * pow(factor, k+1)
Parameters
----------
step: list of int
The list of steps to schedule a change
factor: float
The factor to change the learning rate.
warmup_steps: int
number of warmup steps used before this scheduler starts decay
warmup_begin_lr: float
if using warmup, the learning rate from which it starts warming up
warmup_mode: string
warmup can be done in two modes.
'linear' mode gradually increases lr with each step in equal increments
'constant' mode keeps lr at warmup_begin_lr for warmup_steps
"""
def __init__(self, step, factor=1, base_lr=0.01, warmup_steps=0, warmup_begin_lr=0,
warmup_mode='linear'):
super(MultiFactorScheduler, self).__init__(base_lr, warmup_steps,
warmup_begin_lr, warmup_mode)
assert isinstance(step, list) and len(step) >= 1
for i, _step in enumerate(step):
if i != 0 and step[i] <= step[i-1]:
raise ValueError("Schedule step must be an increasing integer list")
if _step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
def __call__(self, num_update):
if num_update < self.warmup_steps:
return self.get_warmup_lr(num_update)
# NOTE: use while rather than if (for continuing training via load_epoch)
while self.cur_step_ind <= len(self.step)-1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
else:
return self.base_lr
return self.base_lr
class PolyScheduler(LRScheduler):
""" Reduce the learning rate according to a polynomial of given power.
Calculate the new learning rate, after warmup if any, by::
final_lr + (start_lr - final_lr) * (1-nup/max_nup)^pwr
if nup < max_nup, 0 otherwise.
Parameters
----------
max_update: int
maximum number of updates before the decay reaches final learning rate.
base_lr: float
base learning rate to start from
pwr: int
power of the decay term as a function of the current number of updates.
final_lr: float
final learning rate after all steps
warmup_steps: int
number of warmup steps used before this scheduler starts decay
warmup_begin_lr: float
if using warmup, the learning rate from which it starts warming up
warmup_mode: string
warmup can be done in two modes.
'linear' mode gradually increases lr with each step in equal increments
'constant' mode keeps lr at warmup_begin_lr for warmup_steps
"""
def __init__(self, max_update, base_lr=0.01, pwr=2, final_lr=0,
warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'):
super(PolyScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode)
assert isinstance(max_update, int)
if max_update < 1:
raise ValueError("maximum number of updates must be strictly positive")
self.power = pwr
self.base_lr_orig = self.base_lr
self.max_update = max_update
self.final_lr = final_lr
self.max_steps = self.max_update - self.warmup_steps
def __call__(self, num_update):
if num_update < self.warmup_steps:
return self.get_warmup_lr(num_update)
if num_update <= self.max_update:
self.base_lr = self.final_lr + (self.base_lr_orig - self.final_lr) * \
pow(1 - float(num_update - self.warmup_steps) / float(self.max_steps), self.power)
return self.base_lr
class CosineScheduler(LRScheduler):
""" Reduce the learning rate according to a cosine function
Calculate the new learning rate by::
final_lr + (start_lr - final_lr) * (1+cos(pi * nup/max_nup))/2
if nup < max_nup, 0 otherwise.
Parameters
----------
max_update: int
maximum number of updates before the decay reaches 0
base_lr: float
base learning rate
final_lr: float
final learning rate after all steps
warmup_steps: int
number of warmup steps used before this scheduler starts decay
warmup_begin_lr: float
if using warmup, the learning rate from which it starts warming up
warmup_mode: string
warmup can be done in two modes.
'linear' mode gradually increases lr with each step in equal increments
'constant' mode keeps lr at warmup_begin_lr for warmup_steps
"""
def __init__(self, max_update, base_lr=0.01, final_lr=0,
warmup_steps=0, warmup_begin_lr=0, warmup_mode='linear'):
super(CosineScheduler, self).__init__(base_lr, warmup_steps, warmup_begin_lr, warmup_mode)
assert isinstance(max_update, int)
if max_update < 1:
raise ValueError("maximum number of updates must be strictly positive")
self.base_lr_orig = base_lr
self.max_update = max_update
self.final_lr = final_lr
self.max_steps = self.max_update - self.warmup_steps
def __call__(self, num_update):
if num_update < self.warmup_steps:
return self.get_warmup_lr(num_update)
if num_update <= self.max_update:
self.base_lr = self.final_lr + (self.base_lr_orig - self.final_lr) * \
(1 + cos(pi * (num_update - self.warmup_steps) / self.max_steps)) / 2
return self.base_lr
|
{
"content_hash": "b835162b7348ed902ff8030462a6578c",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 98,
"avg_line_length": 40.74242424242424,
"alnum_prop": 0.6023614726664187,
"repo_name": "mlperf/training_results_v0.6",
"id": "436085620a2e941404da710f60b30f0f1313861e",
"size": "11542",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/lr_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
import traceback
import better_exceptions
import threading
import SpoonModes
import numpy as np
import random
MODES = SpoonModes.SpoonSliceModes()
LM = SpoonModes.SpoonLoopModes()
from spoon_logging import D, L, W, E
from pythonosc import dispatcher
from pythonosc import osc_server
def set_slice_mode(loop_number, slice_number, new_mode, the_status_obj):
loop_id_s = "/loop/{}".format(loop_number)
the_status_obj.loops[loop_id_s].set_slice_mode(slice_number,new_mode)
D("set the slice mode {} : {} : {} ".format(loop_id_s, slice_number, new_mode))
def set_loop_mode(loop_number, new_mode, the_status_obj):
loop_id_s = "/loop/{}".format(loop_number)
the_status_obj.loops[loop_id_s].set_loop_mode(new_mode)
D("set the loop mode {} : {} ".format(loop_id_s, new_mode))
def set_loop_jump(loop_number, new_position, the_status_obj):
loop_id_s = "/loop/{}".format(loop_number)
the_status_obj.loops[loop_id_s].jump_slice(new_position)
D("set {} new focus to {}".format(loop_id_s,new_position))
return
def set_loop_volume(loop_number, new_vol, the_status_obj):
loop_id_s = "/loop/{}".format(loop_number)
the_status_obj.loops[loop_id_s].set_volume(new_vol)
D("set {} volume to {}".format(loop_id_s, new_vol ))
return
def try_load_new_file_by_int(loop_number, new_file_number,the_status_obj):
# pass an file index number to the system and get the loop to load it.
loop_id_s = "/loop/{}".format(loop_number)
the_status_obj.load_new_audio_file_by_index(loop_id_s, new_file_number)
D("attempted setting loop {} to file index {}".format(loop_number, new_file_number))
return
def loop_mode_callback(unused_addr, args, the_OSC_message_argument):
D("LoopCallback")
#print("{} {} {} ".format(unused_addr, args, the_obj))
message_ok = True
if(len(unused_addr) != 12):
message_ok = False
loop_i = -1
new_mode = 'NOT-SET'
the_status_obj = args[0]
try:
loop_s = unused_addr[6]
loop_i = int(loop_s)
except Exception as jeje:
L("exception in handling OSC message {}".format(unused_addr))
L("Exception: {}".format(jeje))
L("{}".format(repr(jeje)))
message_ok = False
if(loop_i >4 or loop_i < 1):
D("Loop value out of range {} ".format(loop_i))
#do nothing
message_ok = False
new_slice_mode = the_OSC_message_argument
if not LM.is_valid_mode(new_slice_mode):
message_ok = False
if(message_ok):
set_loop_mode(loop_i, new_slice_mode, the_status_obj)
the_status_obj.set_osc_message( "{} : {} DONE".format(unused_addr, the_OSC_message_argument))
else:
# log error message
#TODO fix this error message
L("unable to parse message {} {} ".format(unused_addr, the_OSC_message_argument))
the_status_obj.set_osc_message( "{} : {} FAIL".format(unused_addr, the_OSC_message_argument))
return
#print("Loop:{} slice:{}".format(loop_i,slice_i))
return
def loop_jump_callback(unused_addr, args, the_OSC_message_argument):
D("loop jump callback")
message_ok = True
if(len(unused_addr) != 12):
message_ok = False
the_status_obj = args[0]
try:
loop_s = unused_addr[6]
loop_i = int(loop_s)
jump_i = int(the_OSC_message_argument)
except Exception as jj:
L("Exception parsing {} {}".format(unused_addr), the_OSC_message_argument)
L("Exception {} ".format(jj))
message_ok = False
loop_i = -1
jump_i = -1
if(loop_i >4 or loop_i<1):
D("Loop value out of range {} ".format(loop_i))
message_ok = False
if(jump_i >7 or jump_i <0):
D("jump value out of range {}".format(jump_i))
message_ok = False
if(message_ok):
set_loop_jump(loop_i, jump_i, the_status_obj)
the_status_obj.set_osc_message( "{} : {} DONE".format(unused_addr, the_OSC_message_argument))
else:
# log error message
#TODO fix this error message
L("unable to parse message {} {} ".format(unused_addr, the_OSC_message_argument))
the_status_obj.set_osc_message( "{} : {} FAIL".format(unused_addr, the_OSC_message_argument))
return
def slice_callback(unused_addr, args, the_OSC_message_argument):
D("SliceCallback")
#print("{} {} {} ".format(unused_addr, args, the_obj))
message_ok = True
if(len(unused_addr) != 15):
message_ok = False
loop_i = -1
slice_i = -1
new_mode = 'NOT-SET'
the_status_obj = args[0]
#probably should do a type() check here on the_status_obj
try:
loop_s = unused_addr[6]
slice_s = unused_addr[14]
loop_i = int(loop_s)
slice_i = int(slice_s)
except Exception as jeje:
L("exception in handling OSC message {}".format(unused_addr))
L("Exception: {}".format(jeje))
L("{}".format(repr(jeje)))
message_ok = False
if(loop_i >4 or loop_i < 1):
D("Loop value out of range {} ".format(loop_i))
#do nothing
message_ok = False
if(slice_i<0 or slice_i>7):
D("slice value out of range {} ".format(slice_i))
#do nothing
message_ok = False
#check is the_OSC_message_argument is a correct slice mode
new_slice_mode = the_OSC_message_argument
if not MODES.is_valid_mode(new_slice_mode):
message_ok = False
if(message_ok):
set_slice_mode(loop_i,slice_i, new_slice_mode, the_status_obj)
the_status_obj.set_osc_message( "{} : {} DONE".format(unused_addr, the_OSC_message_argument))
else:
# log error message
#TODO fix this error message
L("unable to parse message {} {} ".format(unused_addr, the_OSC_message_argument))
the_status_obj.set_osc_message( "{} : {} FAIL".format(unused_addr, the_OSC_message_argument))
return
#print("Loop:{} slice:{}".format(loop_i,slice_i))
return
def loop_volume_callback(unused_addr, args, the_OSC_message_argument ):
D("loop_volume_callback")
#set the volume level of a loop.
#.set_volume(float)
#range 0.0 - 1.0
# if(len(unused_addr) != 12):
# message_ok = False
loop_i = -1
new_volume_level = -1.0
message_ok=True
the_status_obj = args[0]
#get the loop id. /loop/*
try:
loop_s = unused_addr[6]
loop_i = int(loop_s)
except Exception as jeje:
L("exception in handling OSC message {}".format(unused_addr))
L("Exception: {}".format(jeje))
L("{}".format(repr(jeje)))
message_ok = False
if(loop_i >4 or loop_i < 1):
D("Loop value out of range {} ".format(loop_i))
#do nothing
message_ok = False
#convert the argument to a float.
try:
new_volume_level = float(the_OSC_message_argument)
if(new_volume_level < 0.0):
message_ok = False
#float parsing error handling.
except Exception as flt_error:
L("exception handing OSC message {}".format(unused_addr))
L("exception parsing {}".format( the_OSC_message_argument))
L("Exception: {}".format(flt_error))
L("{}".format(rep(flt_error)))
message_ok = False
if(message_ok):
set_loop_volume(loop_i, new_volume_level, the_status_obj)
the_status_obj.set_osc_message( "{} : {} DONE".format(unused_addr, the_OSC_message_argument))
else:
# log error message
#TODO fix this error message
L("unable to parse message {} {} ".format(unused_addr, the_OSC_message_argument))
the_status_obj.set_osc_message( "{} : {} FAIL".format(unused_addr, the_OSC_message_argument))
return
def loop_file_callback(unused_addr, args, the_OSC_message_argument):
D("loop file callback")
# has an argument of an int.
message_ok=True
the_status_obj = args[0]
#get the loop id. /loop/*
try:
loop_s = unused_addr[6]
loop_i = int(loop_s)
# get the track index
new_track_index=int(the_OSC_message_argument)
except Exception as errerr:
L("exception with loop_file_callback ")
L("Error: {}".format(errerr))
message_ok = False
return
if(loop_i not in range(1,5)):
message_ok = False
if ( message_ok):
try_load_new_file_by_int(loop_i,new_track_index, the_status_obj)
the_status_obj.set_osc_message( "{} : {} DONE".format(unused_addr, the_OSC_message_argument))
D("loop_file_callback {} {}".format(loop_i,new_track_index))
else:
L("message not processed. loop_file_callback")
L("Message received {}".format(the_OSC_message_argument))
L("Message Received {} ".format(unused_addr))
L("Message Received {}".format(args))
the_status_obj.set_osc_message( "{} : {} FAIL".format(unused_addr, the_OSC_message_argument))
return
def default_callback(unused_addr, args):
L("unknown message {} {}".format(unused_addr, args))
return
def start_OSC_server(spoon_status_object):
disp = dispatcher.Dispatcher()
disp.map("/loop/*/slice/*", slice_callback, spoon_status_object)
disp.map("/loop/*/mode", loop_mode_callback, spoon_status_object)
disp.map("/loop/*/jump", loop_jump_callback, spoon_status_object)
disp.map("/loop/*/volume", loop_volume_callback, spoon_status_object)
disp.map("/loop/*/file", loop_file_callback, spoon_status_object)
disp.set_default_handler(default_callback)
new_osc_port = spoon_status_object.osc_port
server = osc_server.ThreadingOSCUDPServer( ("127.0.0.1", new_osc_port), disp)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
L("Serving on {}".format(server.server_address))
#return the server and thread so we can shut it down when we quit
# server.shutdown()
return server , server_thread
|
{
"content_hash": "f90a0870362559614d4fa13350cfda61",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 102,
"avg_line_length": 34.824742268041234,
"alnum_prop": 0.6035129267811328,
"repo_name": "zarquin/SpoonFight",
"id": "9021d5635269f57f4660eadd673f505ffef9cff6",
"size": "10134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SpnOSCReceiver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60580"
}
],
"symlink_target": ""
}
|
import encoder
import unittest
import visual_metrics
def LinearVector(slope=0.0, offset=0.0):
"""A point pair set that has points in a straight line."""
return [[float(i), float(i*slope)+offset] for i in (10, 20, 30, 40)]
class FakeCodec(object):
def __init__(self):
self.name = 'mock'
self.option_set = encoder.OptionSet()
class FakeContext(object):
def __init__(self):
self.codec = FakeCodec()
class FakeOptimizer(object):
def __init__(self):
self.context = FakeContext()
def BestEncoding(self, rate, videofile):
# pylint: disable=W0613,R0201
return FakeEncoding()
def Score(self, encoding):
# pylint: disable=R0201, W0613
return 1.0
class FakeEncoding(object):
def __init__(self):
self.result = {'bitrate': 1000, 'psnr': 1.0}
def Execute(self):
pass
def Store(self):
pass
def Result(self):
return self.result
class TestVisualMetricsFunctions(unittest.TestCase):
def test_GraphBetter(self):
metric_set_1 = LinearVector(slope=1)
# A set compared to itself should have zero difference.
self.assertEquals(0.0,
visual_metrics.GraphBetter(metric_set_1, metric_set_1,
use_set2_as_base=False))
# A test set at exactly double the bitrate. Still linear.
metric_set_2 = LinearVector(slope=2)
self.assertAlmostEqual(
50.0,
100*visual_metrics.GraphBetter(metric_set_1,
metric_set_2,
use_set2_as_base=False))
self.assertAlmostEqual(
-100.0, 100*visual_metrics.GraphBetter(metric_set_2, metric_set_1,
use_set2_as_base=False))
self.assertAlmostEqual(
100.0, 100*visual_metrics.GraphBetter(metric_set_1, metric_set_2,
use_set2_as_base=True))
self.assertAlmostEqual(
-50.0, 100*visual_metrics.GraphBetter(metric_set_2, metric_set_1,
use_set2_as_base=True))
def test_bdsnr(self):
metric_set_1 = LinearVector(slope=1)
self.assertEquals(0.0, visual_metrics.bdsnr(metric_set_1, metric_set_1))
# A test set at exactly double the bitrate. Still linear.
# This test depends on the length of the vector, so not a good fit for
# bdsnr.
metric_set_2 = LinearVector(slope=2)
self.assertAlmostEqual(
21.6, visual_metrics.bdsnr(metric_set_1, metric_set_2), delta=0.5)
self.assertAlmostEqual(
-21.6, visual_metrics.bdsnr(metric_set_2, metric_set_1), delta=0.5)
# A test with a constant improvement in metric.
metric_set_3 = LinearVector(slope=1, offset=2)
self.assertAlmostEqual(
2.0, visual_metrics.bdsnr(metric_set_1, metric_set_3))
self.assertAlmostEqual(
-2.0, visual_metrics.bdsnr(metric_set_3, metric_set_1))
def test_bdrate(self):
metric_set_1 = LinearVector(slope=1)
self.assertEquals(0.0, visual_metrics.bdrate(metric_set_1, metric_set_1))
# A test set at exactly double the bitrate. Still linear.
metric_set_2 = LinearVector(slope=2)
self.assertAlmostEqual(
-50.0, visual_metrics.bdrate(metric_set_1, metric_set_2), delta=0.5)
self.assertAlmostEqual(
100.0, visual_metrics.bdrate(metric_set_2, metric_set_1), delta=2.0)
def test_DataSetBetter(self):
metric_set_1 = LinearVector(slope=1)
metric_set_2 = LinearVector(slope=2)
metric_set_3 = LinearVector(slope=1, offset=2)
self.assertAlmostEqual(
100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'avg'))
self.assertAlmostEqual(
100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'bdrate'),
delta=2.0)
self.assertAlmostEqual(
2.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_3, 'dsnr'))
def test_HtmlPage(self):
page_template = 'Test: //%%filestable_dpsnr%%//'
expected_result = 'Test: result'
filestable = {'dsnr': 'result', 'avg': 'notused', 'drate': 'notused'}
result = visual_metrics.HtmlPage(page_template, filestable, None, None)
self.assertEquals(expected_result, result)
def test_ListOneTarget(self):
datatable = {}
filename = 'file_10x10_10'
videofile = encoder.Videofile(filename)
visual_metrics.ListOneTarget([FakeOptimizer()], 1000, videofile,
False, datatable)
self.assertEquals(1, len(datatable['mock'][filename]))
def test_CrossPerformanceGvizTable(self):
datatable = {'dummy1':{}}
metric = 'meaningless'
codecs = ['dummy1', 'dummy2']
# This should result in an empty table, with correct headers and col1.
data_table = visual_metrics.CrossPerformanceGvizTable(
datatable, metric, codecs, 'psnr')
self.assertEquals(2, data_table.NumberOfRows())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "366a3a51b3f75c692df47f25f20ef4c8",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 35.28776978417266,
"alnum_prop": 0.6389398572884811,
"repo_name": "alvestrand/old-compare-codecs",
"id": "65af54bdbb17dd5c8d27eee9862933c18d3a3fd1",
"size": "5542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/visual_metrics_unittest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3991"
},
{
"name": "CSS",
"bytes": "8583"
},
{
"name": "JavaScript",
"bytes": "1531"
},
{
"name": "Python",
"bytes": "213473"
},
{
"name": "Shell",
"bytes": "10981"
}
],
"symlink_target": ""
}
|
import json
import socket
import datetime
import argparse
import struct
import random
import time
import weakref
from binascii import hexlify, unhexlify
from cryptokit import target_from_diff, uint256_from_str
from gevent import sleep, with_timeout
from gevent.queue import Queue
from gevent.pool import Pool
from gevent.server import StreamServer
from pprint import pformat
from .agent_server import AgentServer, AgentClient
from .exceptions import LoopExit
from .server import GenericClient
from .utils import time_format
from .exceptions import ConfigurationError
from .lib import Component, loop, REQUIRED
class ArgumentParserError(Exception):
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ArgumentParserError(message)
password_arg_parser = ThrowingArgumentParser()
password_arg_parser.add_argument('-d', '--diff', type=float)
class StratumServer(Component, StreamServer):
""" A single port binding of our stratum server. """
one_min_stats = ['stratum_connects', 'stratum_disconnects',
'agent_connects', 'agent_disconnects',
'reject_low_share_n1', 'reject_dup_share_n1',
'reject_stale_share_n1', 'acc_share_n1',
'reject_low_share_count', 'reject_dup_share_count',
'reject_stale_share_count', 'acc_share_count',
'unk_err', 'not_authed_err', 'not_subbed_err']
# enhance readability by reducing magic number use...
defaults = dict(address="0.0.0.0",
port=3333,
start_difficulty=128,
reporter=None,
jobmanager=None,
algo=REQUIRED,
idle_worker_threshold=300,
aliases={},
valid_address_versions=[],
donate_key="donate",
vardiff=dict(enabled=False,
spm_target=20,
interval=30,
tiers=[8, 16, 32, 64, 96, 128, 192, 256, 512]),
minimum_manual_diff=64,
push_job_interval=30,
idle_worker_disconnect_threshold=3600,
agent=dict(enabled=False,
port_diff=1111,
timeout=120,
accepted_types=['temp', 'status', 'hashrate',
'thresholds']))
# Don't spawn a greenlet to handle creation of clients, we start one for
# reading and one for writing in their own class...
_spawn = None
def __init__(self, config):
self._configure(config)
self.agent_servers = []
# Start a corresponding agent server
if self.config['agent']['enabled']:
serv = AgentServer(self)
self.agent_servers.append(serv)
# A dictionary of all connected clients indexed by id
self.clients = {}
self.agent_clients = {}
# A dictionary of lists of connected clients indexed by address
self.address_lut = {}
# A dictionary of lists of connected clients indexed by address and
# worker tuple
self.address_worker_lut = {}
# counters that allow quick display of these numbers. stratum only
self.authed_clients = 0
self.idle_clients = 0
# Unique client ID counters for stratum and agents
self.stratum_id_count = 0
self.agent_id_count = 0
# Track the last job we pushed and when we pushed it
self.last_flush_job = None
self.last_flush_time = None
self.listener = None
def start(self, *args, **kwargs):
self.listener = (self.config['address'],
self.config['port'] + self.manager.config['server_number'])
StreamServer.__init__(self, self.listener, spawn=Pool())
self.algo = self.manager.algos[self.config['algo']]
if not self.config['reporter'] and len(self.manager.component_types['Reporter']) == 1:
self.reporter = self.manager.component_types['Reporter'][0]
elif not self.config['reporter']:
raise ConfigurationError(
"There are more than one Reporter components, target reporter"
"must be specified explicitly!")
else:
self.reporter = self._lookup(self.config['reporter'])
if not self.config['jobmanager'] and len(self.manager.component_types['Jobmanager']) == 1:
self.jobmanager = self.manager.component_types['Jobmanager'][0]
elif not self.config['jobmanager']:
raise ConfigurationError(
"There are more than one Jobmanager components, target jobmanager "
"must be specified explicitly!")
else:
self.jobmanager = self._lookup(self.config['jobmanager'])
self.jobmanager.new_job.rawlink(self.new_job)
self.logger.info("Stratum server starting up on {}".format(self.listener))
for serv in self.agent_servers:
serv.start()
StreamServer.start(self, *args, **kwargs)
Component.start(self)
def stop(self, *args, **kwargs):
self.logger.info("Stratum server {} stopping".format(self.listener))
StreamServer.close(self)
for serv in self.agent_servers:
serv.stop()
for client in self.clients.values():
client.stop()
StreamServer.stop(self)
Component.stop(self)
self.logger.info("Exit")
def handle(self, sock, address):
""" A new connection appears on the server, so setup a new StratumClient
object to manage it. """
self.logger.info("Recieving stratum connection from addr {} on sock {}"
.format(address, sock))
self.stratum_id_count += 1
client = StratumClient(
sock,
address,
config=self.config,
logger=self.logger,
jobmanager=self.jobmanager,
manager=self.manager,
algo=self.algo,
server=self,
reporter=self.reporter)
client.start()
def new_job(self, event):
job = event.job
t = time.time()
job.stratum_string()
flush = job.flush
for client in self.clients.itervalues():
if client.authenticated:
client._push(job, flush=flush, block=False)
self.logger.info("New job enqueued for transmission to {} users in {}"
.format(len(self.clients), time_format(time.time() - t)))
self.last_flush_job = job
self.last_flush_time = time.time()
@property
def status(self):
""" For display in the http monitor """
hps = (self.algo['hashes_per_share'] *
self.counters['acc_share_n1'].minute /
60.0)
dct = dict(mhps=hps / 1000000.0,
hps=hps,
last_flush_job=None,
agent_client_count=len(self.agent_clients),
client_count=len(self.clients),
address_count=len(self.address_lut),
address_worker_count=len(self.address_lut),
client_count_authed=self.authed_clients,
client_count_active=len(self.clients) - self.idle_clients,
client_count_idle=self.idle_clients)
if self.last_flush_job:
j = self.last_flush_job
dct['last_flush_job'] = dict(
algo=j.algo,
pow_block_hash=j.pow_block_hash,
currency=j.currency,
job_id=j.job_id,
merged_networks=j.merged_data.keys(),
pushed_at=self.last_flush_time
)
return dct
def set_user(self, client):
""" Add the client (or create) appropriate worker and address trackers
"""
user_worker = (client.address, client.worker)
self.address_worker_lut.setdefault(user_worker, [])
self.address_worker_lut[user_worker].append(client)
self.authed_clients += 1
self.address_lut.setdefault(user_worker[0], [])
self.address_lut[user_worker[0]].append(client)
def add_client(self, client):
if isinstance(client, StratumClient):
self._incr('stratum_connects')
self.clients[client._id] = client
elif isinstance(client, AgentClient):
self._incr('agent_connects')
self.agent_clients[client._id] = client
else:
self.logger.warn("Add client got unknown client of type {}"
.format(type(client)))
def remove_client(self, client):
""" Manages removing the StratumClient from the luts """
if isinstance(client, StratumClient):
del self.clients[client._id]
address, worker = client.address, client.worker
self._incr('stratum_disconnects')
if client.authenticated:
self.authed_clients -= 1
if client.idle:
self.idle_clients -= 1
# it won't appear in the luts if these values were never set
if address is None and worker is None:
return
# wipe the client from the address tracker
if address in self.address_lut:
# remove from lut for address
self.address_lut[address].remove(client)
# if it's the last client in the object, delete the entry
if not len(self.address_lut[address]):
del self.address_lut[address]
# wipe the client from the address/worker lut
key = (address, worker)
if key in self.address_worker_lut:
self.address_worker_lut[key].remove(client)
# if it's the last client in the object, delete the entry
if not len(self.address_worker_lut[key]):
del self.address_worker_lut[key]
elif isinstance(client, AgentClient):
self._incr('agent_disconnects')
del self.agent_clients[client._id]
else:
self.logger.warn("Remove client got unknown client of type {}"
.format(type(client)))
class StratumClient(GenericClient):
""" Object representation of a single stratum connection to the server. """
# Stratum error codes
errors = {20: 'Other/Unknown',
21: 'Job not found (=stale)',
22: 'Duplicate share',
23: 'Low difficulty share',
24: 'Unauthorized worker',
25: 'Not subscribed'}
error_counter = {20: 'unk_err',
24: 'not_authed_err',
25: 'not_subbed_err'}
# enhance readability by reducing magic number use...
STALE_SHARE_ERR = 21
LOW_DIFF_ERR = 23
DUP_SHARE_ERR = 22
# constansts for share submission outcomes. returned by the share checker
VALID_SHARE = 0
DUP_SHARE = 1
LOW_DIFF_SHARE = 2
STALE_SHARE = 3
share_type_strings = {0: "acc", 1: "dup", 2: "low", 3: "stale"}
def __init__(self, sock, address, logger, manager, jobmanager, server,
reporter, algo, config):
self.config = config
self.jobmanager = jobmanager
self.manager = manager
self.algo = algo
self.server = server
self.reporter = reporter
self.logger = logger
self.sock = sock
self.address = address
# Seconds before sending keepalive probes
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 120)
# Interval in seconds between keepalive probes
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1)
# Failed keepalive probles before declaring other end dead
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)
self.authenticated = False
self.subscribed = False
# flags for current connection state
self.idle = False
self.address = None
self.worker = None
self.client_type = None
# the worker id. this is also extranonce 1
id = self.server.stratum_id_count
if self.manager.config['extranonce_serv_size'] == 8:
self._id = hexlify(struct.pack('Q', id))
elif self.manager.config['extranonce_serv_size'] == 4:
self._id = hexlify(struct.pack('I', id))
else:
raise Exception("Unsupported extranonce size!")
t = time.time()
# running total for vardiff
self.accepted_shares = 0
# an index of jobs and their difficulty
self.job_mapper = {}
self.old_job_mapper = {}
self.job_counter = random.randint(0, 100000)
# Allows us to avoid a bunch of clients getting scheduled at the same
# time by offsetting most timing values by this
self.time_seed = random.uniform(0, 10)
# Used to determine if they're idle
self.last_share_submit = t
# Used to determine if we should send another job on read loop timeout
self.last_job_push = t
# Avoids repeat pushing jobs that the client already knows about
self.last_job = None
# Last time vardiff happened
self.last_diff_adj = t - self.time_seed
# Current difficulty setting
self.difficulty = self.config['start_difficulty']
# the next diff to be used by push job
self.next_diff = self.config['start_difficulty']
# What time the user connected...
self.connection_time = int(t)
# where we put all the messages that need to go out
self.write_queue = Queue()
self.fp = None
self._stopped = False
def _incr(self, *args):
self.server._incr(*args)
def send_error(self, num=20, id_val=1):
""" Utility for transmitting an error to the client """
err = {'id': id_val,
'result': None,
'error': (num, self.errors[num], None)}
self.logger.debug("Error number {}".format(num, self.peer_name[0]))
self.write_queue.put(json.dumps(err, separators=(',', ':')) + "\n")
def send_success(self, id_val=1):
""" Utility for transmitting success to the client """
succ = {'id': id_val, 'result': True, 'error': None}
self.logger.debug("success response: {}".format(pformat(succ)))
self.write_queue.put(json.dumps(succ, separators=(',', ':')) + "\n")
def push_difficulty(self):
""" Pushes the current difficulty to the client. Currently this
only happens uppon initial connect, but would be used for vardiff
"""
send = {'params': [self.difficulty],
'id': None,
'method': 'mining.set_difficulty'}
self.write_queue.put(json.dumps(send, separators=(',', ':')) + "\n")
def push_job(self, flush=False, timeout=False):
""" Pushes the latest job down to the client. Flush is whether
or not he should dump his previous jobs or not. Dump will occur
when a new block is found since work on the old block is
invalid."""
job = None
while job is None:
job = self.jobmanager.latest_job
if job is None:
self.logger.warn("No jobs available for worker!")
sleep(0.1)
if self.last_job == job and not timeout:
self.logger.info("Ignoring non timeout resend of job id {} to worker {}.{}"
.format(job.job_id, self.address, self.worker))
return
# we push the next difficulty here instead of in the vardiff block to
# prevent a potential mismatch between client and server
if self.next_diff != self.difficulty:
self.logger.info(
"Pushing diff update {} -> {} before job for {}.{}"
.format(self.difficulty, self.next_diff, self.address, self.worker))
self.difficulty = self.next_diff
self.push_difficulty()
self.logger.debug("Sending job id {} to worker {}.{}{}"
.format(job.job_id, self.address, self.worker,
" after timeout" if timeout else ''))
self._push(job)
def _push(self, job, flush=False, block=True):
""" Abbreviated push update that will occur when pushing new block
notifications. Mico-optimized to try and cut stale share rates as much
as possible. """
self.last_job = job
self.last_job_push = time.time()
# get client local job id to map current difficulty
self.job_counter += 1
if self.job_counter % 10 == 0:
# Run a swap to avoid GC
tmp = self.job_mapper
self.old_job_mapper = self.job_mapper
self.job_mapper = tmp
self.job_mapper.clear()
job_id = str(self.job_counter)
self.job_mapper[job_id] = (self.difficulty, weakref.ref(job))
self.write_queue.put(job.stratum_string() % (job_id, "true" if flush else "false"), block=block)
def submit_job(self, data, t):
""" Handles recieving work submission and checking that it is valid
, if it meets network diff, etc. Sends reply to stratum client. """
params = data['params']
# [worker_name, job_id, extranonce2, ntime, nonce]
# ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"]
if __debug__:
self.logger.debug(
"Recieved work submit:\n\tworker_name: {0}\n\t"
"job_id: {1}\n\textranonce2: {2}\n\t"
"ntime: {3}\n\tnonce: {4} ({int_nonce})"
.format(
*params,
int_nonce=struct.unpack(str("<L"), unhexlify(params[4]))))
if self.idle:
self.idle = False
self.server.idle_clients -= 1
self.last_share_submit = time.time()
try:
difficulty, job = self.job_mapper[data['params'][1]]
job = job() # weakref will be none if it's been GCed
except KeyError:
try:
difficulty, job = self.old_job_mapper[data['params'][1]]
job = job() # weakref will be none if it's been GCed
except KeyError:
job = None # Job not in jobmapper at all, we got a bogus submit
# since we can't identify the diff we just have to assume it's
# current diff
difficulty = self.difficulty
if job is None:
self.send_error(self.STALE_SHARE_ERR, id_val=data['id'])
self.reporter.log_share(client=self,
diff=self.difficulty,
typ=self.STALE_SHARE,
params=params,
start=t)
return difficulty, self.STALE_SHARE
# assemble a complete block header bytestring
header = job.block_header(
nonce=params[4],
extra1=self._id,
extra2=params[2],
ntime=params[3])
# Check a submitted share against previous shares to eliminate
# duplicates
share = (self._id, params[2], params[4], params[3])
if share in job.acc_shares:
self.logger.info("Duplicate share rejected from worker {}.{}!"
.format(self.address, self.worker))
self.send_error(self.DUP_SHARE_ERR, id_val=data['id'])
self.reporter.log_share(client=self,
diff=difficulty,
typ=self.DUP_SHARE,
params=params,
job=job,
start=t)
return difficulty, self.DUP_SHARE
job_target = target_from_diff(difficulty, job.diff1)
hash_int = uint256_from_str(self.algo['module'](header))
if hash_int >= job_target:
self.logger.info("Low diff share rejected from worker {}.{}!"
.format(self.address, self.worker))
self.send_error(self.LOW_DIFF_ERR, id_val=data['id'])
self.reporter.log_share(client=self,
diff=difficulty,
typ=self.LOW_DIFF_SHARE,
params=params,
job=job,
start=t)
return difficulty, self.LOW_DIFF_SHARE
# we want to send an ack ASAP, so do it here
self.send_success(id_val=data['id'])
# Add the share to the accepted set to check for dups
job.acc_shares.add(share)
self.accepted_shares += difficulty
self.reporter.log_share(client=self,
diff=difficulty,
typ=self.VALID_SHARE,
params=params,
job=job,
header_hash=hash_int,
header=header,
start=t)
return difficulty, self.VALID_SHARE
def recalc_vardiff(self):
# ideal difficulty is the n1 shares they solved divided by target
# shares per minute
spm_tar = self.config['vardiff']['spm_target']
ideal_diff = self.reporter.spm(self.address) / spm_tar
self.logger.debug("VARDIFF: Calculated client {} ideal diff {}"
.format(self._id, ideal_diff))
# find the closest tier for them
new_diff = min(self.config['vardiff']['tiers'], key=lambda x: abs(x - ideal_diff))
if new_diff != self.difficulty:
self.logger.info(
"VARDIFF: Moving to D{} from D{} on {}.{}"
.format(new_diff, self.difficulty, self.address, self.worker))
self.next_diff = new_diff
else:
self.logger.debug("VARDIFF: Not adjusting difficulty, already "
"close enough")
self.last_diff_adj = time.time()
self.push_job(timeout=True)
@loop(fin='stop', exit_exceptions=(socket.error, ))
def read(self):
# designed to time out approximately "push_job_interval" after the user
# last recieved a job. Some miners will consider the mining server dead
# if they don't recieve something at least once a minute, regardless of
# whether a new job is _needed_. This aims to send a job _only_ as
# often as needed
line = with_timeout(time.time() - self.last_job_push + self.config['push_job_interval'] - self.time_seed,
self.fp.readline,
timeout_value='timeout')
if line == 'timeout':
t = time.time()
if not self.idle and (t - self.last_share_submit) > self.config['idle_worker_threshold']:
self.idle = True
self.server.idle_clients += 1
# push a new job if
if (t - self.last_share_submit) > self.config['idle_worker_disconnect_threshold']:
self.logger.info("Disconnecting worker {}.{} at ip {} for inactivity"
.format(self.address, self.worker, self.peer_name[0]))
self.stop()
if (self.authenticated is True and # don't send to non-authed
# force send if we need to push a new difficulty
(self.next_diff != self.difficulty or
# send if we're past the push interval
t > (self.last_job_push +
self.config['push_job_interval'] -
self.time_seed))):
if self.config['vardiff']['enabled'] is True:
self.recalc_vardiff()
self.push_job(timeout=True)
return
line = line.strip()
# Reading from a defunct connection yeilds an EOF character which gets
# stripped off
if not line:
raise LoopExit("Closed file descriptor encountered")
try:
data = json.loads(line)
except ValueError:
self.logger.warn("Data {}.. not JSON".format(line[:15]))
self.send_error()
self._incr('unk_err')
return
# handle malformed data
data.setdefault('id', 1)
data.setdefault('params', [])
if __debug__:
self.logger.debug("Data {} recieved on client {}".format(data, self._id))
# run a different function depending on the action requested from
# user
if 'method' not in data:
self.logger.warn("Empty action in JSON {}".format(self.peer_name[0]))
self._incr('unk_err')
self.send_error(id_val=data['id'])
return
meth = data['method'].lower()
if meth == 'mining.subscribe':
if self.subscribed is True:
self.send_error(id_val=data['id'])
return
try:
self.client_type = data['params'][0]
except IndexError:
pass
ret = {
'result': (
(
# These values aren't used for anything, although
# perhaps they should be
("mining.set_difficulty", self._id),
("mining.notify", self._id)
),
self._id,
self.manager.config['extranonce_size']
),
'error': None,
'id': data['id']
}
self.subscribed = True
self.logger.debug("Sending subscribe response: {}".format(pformat(ret)))
self.write_queue.put(json.dumps(ret) + "\n")
elif meth == "mining.authorize":
if self.subscribed is False:
self._incr('not_subbed_err')
self.send_error(25, id_val=data['id'])
return
if self.authenticated is True:
self._incr('not_authed_err')
self.send_error(24, id_val=data['id'])
return
try:
password = data['params'][1]
username = data['params'][0]
# allow the user to use the password field as an argument field
try:
args = password_arg_parser.parse_args(password.split())
except ArgumentParserError:
# Ignore malformed parser data
pass
else:
if args.diff:
diff = max(self.config['minimum_manual_diff'], args.diff)
self.difficulty = diff
self.next_diff = diff
except IndexError:
password = ""
username = ""
self.manager.log_event(
"{name}.auth:1|c".format(name=self.manager.config['procname']))
self.logger.info("Authentication request from {} for username {}"
.format(self.peer_name[0], username))
user_worker = self.convert_username(username)
# unpack into state dictionary
self.address, self.worker = user_worker
self.authenticated = True
self.server.set_user(self)
# notify of success authing and send him current diff and latest
# job
self.send_success(data['id'])
self.push_difficulty()
self.push_job()
elif meth == "mining.submit":
if self.authenticated is False:
self._incr('not_authed_err')
self.send_error(24, id_val=data['id'])
return
t = time.time()
diff, typ = self.submit_job(data, t)
# Log the share to our stat counters
key = ""
if typ > 0:
key += "reject_"
key += StratumClient.share_type_strings[typ] + "_share"
if typ == 0:
# Increment valid shares to calculate hashrate
self._incr(key + "_n1", diff)
self.manager.log_event(
"{name}.{type}:1|c\n"
"{name}.{type}_n1:{diff}|c\n"
"{name}.submit_time:{t}|ms"
.format(name=self.manager.config['procname'], type=key,
diff=diff, t=(time.time() - t) * 1000))
# don't recalc their diff more often than interval
if (self.config['vardiff']['enabled'] is True and
(t - self.last_diff_adj) > self.config['vardiff']['interval']):
self.recalc_vardiff()
elif meth == "mining.get_transactions":
self.send_error(id_val=data['id'])
elif meth == "mining.extranonce.subscribe":
self.send_success(id_val=data['id'])
else:
self.logger.info("Unkown action {} for command {}"
.format(data['method'][:20], self.peer_name[0]))
self._incr('unk_err')
self.send_error(id_val=data['id'])
@property
def summary(self):
""" Displayed on the all client view in the http status monitor """
return dict(worker=self.worker, idle=self.idle)
@property
def last_share_submit_delta(self):
return datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(self.last_share_submit)
@property
def details(self):
""" Displayed on the single client view in the http status monitor """
return dict(alltime_accepted_shares=self.accepted_shares,
difficulty=self.difficulty,
type=self.client_type,
worker=self.worker,
id=self._id,
jobmapper_size=len(self.old_job_mapper) + len(self.job_mapper),
last_share_submit=str(self.last_share_submit_delta),
idle=self.idle,
address=self.address,
ip_address=self.peer_name[0],
connection_time=str(self.connection_duration))
|
{
"content_hash": "c958855bdb49e83a6d1bd2bacb21ef33",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 113,
"avg_line_length": 40.861333333333334,
"alnum_prop": 0.5414409710892123,
"repo_name": "sigwo/powerpool",
"id": "22c06943fe89526c3cd23f00b41b5e9ac70e993a",
"size": "30646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerpool/stratum_server.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "144070"
},
{
"name": "Shell",
"bytes": "581"
}
],
"symlink_target": ""
}
|
from hy.models.expression import HyExpression
from hy.models.integer import HyInteger
from hy.models.float import HyFloat
from hy.models.complex import HyComplex
from hy.models.symbol import HySymbol
from hy.models.string import HyString
from hy.models.dict import HyDict
from hy.models.list import HyList
from hy.models.set import HySet
from hy.models.cons import HyCons
from hy.lex import LexException, PrematureEndOfInput, tokenize
def test_lex_exception():
""" Ensure tokenize throws a fit on a partial input """
try:
tokenize("(foo")
assert True is False
except PrematureEndOfInput:
pass
try:
tokenize("{foo bar")
assert True is False
except PrematureEndOfInput:
pass
try:
tokenize("(defn foo [bar]")
assert True is False
except PrematureEndOfInput:
pass
try:
tokenize("(foo \"bar")
assert True is False
except PrematureEndOfInput:
pass
def test_unbalanced_exception():
"""Ensure the tokenization fails on unbalanced expressions"""
try:
tokenize("(bar))")
assert True is False
except LexException:
pass
try:
tokenize("(baz [quux]])")
assert True is False
except LexException:
pass
def test_lex_expression_symbols():
""" Make sure that expressions produce symbols """
objs = tokenize("(foo bar)")
assert objs == [HyExpression([HySymbol("foo"), HySymbol("bar")])]
def test_lex_expression_strings():
""" Test that expressions can produce strings """
objs = tokenize("(foo \"bar\")")
assert objs == [HyExpression([HySymbol("foo"), HyString("bar")])]
def test_lex_expression_integer():
""" Make sure expressions can produce integers """
objs = tokenize("(foo 2)")
assert objs == [HyExpression([HySymbol("foo"), HyInteger(2)])]
def test_lex_symbols():
""" Make sure that symbols are valid expressions"""
objs = tokenize("foo ")
assert objs == [HySymbol("foo")]
def test_lex_strings():
""" Make sure that strings are valid expressions"""
objs = tokenize("\"foo\" ")
assert objs == [HyString("foo")]
def test_lex_integers():
""" Make sure that integers are valid expressions"""
objs = tokenize("42 ")
assert objs == [HyInteger(42)]
def test_lex_expression_float():
""" Make sure expressions can produce floats """
objs = tokenize("(foo 2.)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(2.)])]
objs = tokenize("(foo -0.5)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(-0.5)])]
objs = tokenize("(foo 1.e7)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(1.e7)])]
def test_lex_expression_complex():
""" Make sure expressions can produce complex """
objs = tokenize("(foo 2.j)")
assert objs == [HyExpression([HySymbol("foo"), HyComplex(2.j)])]
objs = tokenize("(foo -0.5j)")
assert objs == [HyExpression([HySymbol("foo"), HyComplex(-0.5j)])]
objs = tokenize("(foo 1.e7j)")
assert objs == [HyExpression([HySymbol("foo"), HyComplex(1.e7j)])]
objs = tokenize("(foo j)")
assert objs == [HyExpression([HySymbol("foo"), HySymbol("j")])]
def test_lex_line_counting():
""" Make sure we can count lines / columns """
entry = tokenize("(foo (one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 15
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 6
assert entry.end_line == 1
assert entry.end_column == 14
def test_lex_line_counting_multi():
""" Make sure we can do multi-line tokenization """
entries = tokenize("""
(foo (one two))
(foo bar)
""")
entry = entries[0]
assert entry.start_line == 2
assert entry.start_column == 1
assert entry.end_line == 2
assert entry.end_column == 15
entry = entries[1]
assert entry.start_line == 3
assert entry.start_column == 1
assert entry.end_line == 3
assert entry.end_column == 9
def test_lex_line_counting_multi_inner():
""" Make sure we can do multi-line tokenization (inner) """
entry = tokenize("""(foo
bar)""")[0]
inner = entry[0]
assert inner.start_line == 1
assert inner.start_column == 2
inner = entry[1]
assert inner.start_line == 2
assert inner.start_column == 5
def test_dicts():
""" Ensure that we can tokenize a dict. """
objs = tokenize("{foo bar bar baz}")
assert objs == [HyDict(["foo", "bar", "bar", "baz"])]
objs = tokenize("(bar {foo bar bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HyDict(["foo", "bar",
"bar", "baz"])])]
objs = tokenize("{(foo bar) (baz quux)}")
assert objs == [HyDict([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
def test_sets():
""" Ensure that we can tokenize a set. """
objs = tokenize("#{1 2}")
assert objs == [HySet([HyInteger(1), HyInteger(2)])]
objs = tokenize("(bar #{foo bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HySet(["foo", "bar", "baz"])])]
objs = tokenize("#{(foo bar) (baz quux)}")
assert objs == [HySet([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
def test_nospace():
""" Ensure we can tokenize without spaces if we have to """
entry = tokenize("(foo(one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 14
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 5
assert entry.end_line == 1
assert entry.end_column == 13
def test_escapes():
""" Ensure we can escape things """
entry = tokenize("(foo \"foo\\n\")")[0]
assert entry[1] == "foo\n"
entry = tokenize("(foo \"foo\s\")")[0]
assert entry[1] == "foo\\s"
def test_unicode_escapes():
"""Ensure unicode escapes are handled correctly"""
s = r'"a\xac\u1234\u20ac\U00008000"'
assert len(s) == 29
entry = tokenize(s)[0]
assert len(entry) == 5
assert [ord(x) for x in entry] == [97, 172, 4660, 8364, 32768]
def test_hashbang():
""" Ensure we can escape things """
entry = tokenize("#!this is a comment\n")
assert entry == []
def test_complex():
"""Ensure we tokenize complex numbers properly"""
# This is a regression test for #143
entry = tokenize("(1j)")[0][0]
assert entry == HyComplex("1.0j")
entry = tokenize("(j)")[0][0]
assert entry == HySymbol("j")
def test_reader_macro():
"""Ensure reader macros are handles properly"""
entry = tokenize("#^()")
assert entry[0][0] == HySymbol("dispatch_reader_macro")
assert entry[0][1] == HyString("^")
assert len(entry[0]) == 3
def test_lex_comment_382():
"""Ensure that we can tokenize sources with a comment at the end"""
entry = tokenize("foo ;bar\n;baz")
assert entry == [HySymbol("foo")]
def test_lex_mangling_star():
"""Ensure that mangling starred identifiers works according to plan"""
entry = tokenize("*foo*")
assert entry == [HySymbol("FOO")]
entry = tokenize("*")
assert entry == [HySymbol("*")]
entry = tokenize("*foo")
assert entry == [HySymbol("*foo")]
def test_lex_mangling_hyphen():
"""Ensure that hyphens get translated to underscores during mangling"""
entry = tokenize("foo-bar")
assert entry == [HySymbol("foo_bar")]
entry = tokenize("-")
assert entry == [HySymbol("-")]
def test_lex_mangling_qmark():
"""Ensure that identifiers ending with a question mark get mangled ok"""
entry = tokenize("foo?")
assert entry == [HySymbol("is_foo")]
entry = tokenize("?")
assert entry == [HySymbol("?")]
entry = tokenize("im?foo")
assert entry == [HySymbol("im?foo")]
entry = tokenize(".foo?")
assert entry == [HySymbol(".is_foo")]
entry = tokenize("foo.bar?")
assert entry == [HySymbol("foo.is_bar")]
entry = tokenize("foo?.bar")
assert entry == [HySymbol("is_foo.bar")]
entry = tokenize(".foo?.bar.baz?")
assert entry == [HySymbol(".is_foo.bar.is_baz")]
def test_simple_cons():
"""Check that cons gets tokenized correctly"""
entry = tokenize("(a . b)")[0]
assert entry == HyCons(HySymbol("a"), HySymbol("b"))
def test_dotted_list():
"""Check that dotted lists get tokenized correctly"""
entry = tokenize("(a b c . (d . e))")[0]
assert entry == HyCons(HySymbol("a"),
HyCons(HySymbol("b"),
HyCons(HySymbol("c"),
HyCons(HySymbol("d"),
HySymbol("e")))))
def test_cons_list():
"""Check that cons of something and a list gets tokenized as a list"""
entry = tokenize("(a . [])")[0]
assert entry == HyList([HySymbol("a")])
assert type(entry) == HyList
entry = tokenize("(a . ())")[0]
assert entry == HyExpression([HySymbol("a")])
assert type(entry) == HyExpression
entry = tokenize("(a b . {})")[0]
assert entry == HyDict([HySymbol("a"), HySymbol("b")])
assert type(entry) == HyDict
|
{
"content_hash": "c1e6a91bc2628301e40f5eba95b4b049",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 76,
"avg_line_length": 28.941896024464832,
"alnum_prop": 0.595519864750634,
"repo_name": "zackmdavis/hy",
"id": "07429d40f5b235cb051a266d6458be3d85186f37",
"size": "10653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lex/test_lex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1999"
},
{
"name": "Hy",
"bytes": "141673"
},
{
"name": "Makefile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "214319"
}
],
"symlink_target": ""
}
|
import sys
from robot.errors import DataError
from robot import utils
from .model import LibraryDoc, KeywordDoc
class JavaDocBuilder(object):
def build(self, path):
doc = ClassDoc(path)
libdoc = LibraryDoc(name=doc.qualifiedName(),
doc=self._get_doc(doc),
version=self._get_version(doc),
scope=self._get_scope(doc),
named_args=False,
doc_format=self._get_doc_format(doc))
libdoc.inits = self._initializers(doc)
libdoc.keywords = self._keywords(doc)
return libdoc
def _get_doc(self, doc):
text = doc.getRawCommentText()
return '\n'.join(line.strip() for line in text.splitlines())
def _get_version(self, doc):
return self._get_attr(doc, 'VERSION')
def _get_scope(self, doc):
scope = self._get_attr(doc, 'SCOPE', upper=True)
return {'TESTSUITE': 'test suite',
'GLOBAL': 'global'}.get(scope, 'test suite')
def _get_doc_format(self, doc):
return self._get_attr(doc, 'DOC_FORMAT', upper=True)
def _get_attr(self, doc, name, upper=False):
name = 'ROBOT_LIBRARY_' + name
for field in doc.fields():
if field.name() == name and field.isPublic():
value = field.constantValue()
if upper:
value = utils.normalize(value, ignore='_').upper()
return value
return ''
def _initializers(self, doc):
inits = [self._keyword_doc(init) for init in doc.constructors()]
if len(inits) == 1 and not inits[0].args:
return []
return inits
def _keywords(self, doc):
return [self._keyword_doc(m) for m in doc.methods()]
def _keyword_doc(self, method):
doc, tags = utils.split_tags_from_doc(self._get_doc(method))
return KeywordDoc(
name=utils.printable_name(method.name(), code_style=True),
args=self._get_keyword_arguments(method),
doc=doc,
tags=tags
)
def _get_keyword_arguments(self, method):
params = method.parameters()
if not params:
return []
names = [p.name() for p in params]
if self._is_varargs(params[-1]):
names[-1] = '*' + names[-1]
elif self._is_kwargs(params[-1]):
names[-1] = '**' + names[-1]
if len(params) > 1 and self._is_varargs(params[-2]):
names[-2] = '*' + names[-2]
return names
def _is_varargs(self, param):
return (param.typeName().startswith('java.util.List')
or param.type().dimension() == '[]')
def _is_kwargs(self, param):
return param.typeName().startswith('java.util.Map')
def ClassDoc(path):
"""Process the given Java source file and return ClassDoc instance.
Processing is done using com.sun.tools.javadoc APIs. Returned object
implements com.sun.javadoc.ClassDoc interface:
http://docs.oracle.com/javase/7/docs/jdk/api/javadoc/doclet/
"""
try:
from com.sun.tools.javadoc import JavadocTool, Messager, ModifierFilter
from com.sun.tools.javac.util import List, Context
from com.sun.tools.javac.code.Flags import PUBLIC
except ImportError:
raise DataError("Creating documentation from Java source files "
"requires 'tools.jar' to be in CLASSPATH.")
context = Context()
Messager.preRegister(context, 'libdoc')
jdoctool = JavadocTool.make0(context)
filter = ModifierFilter(PUBLIC)
java_names = List.of(path)
if sys.platform[4:7] < '1.8': # API changed in Java 8
root = jdoctool.getRootDocImpl('en', 'utf-8', filter, java_names,
List.nil(), False, List.nil(),
List.nil(), False, False, True)
else:
root = jdoctool.getRootDocImpl('en', 'utf-8', filter, java_names,
List.nil(), List.nil(), False, List.nil(),
List.nil(), False, False, True)
return root.classes()[0]
|
{
"content_hash": "d1861335adfddae440409f2e32c94c05",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 81,
"avg_line_length": 37.078947368421055,
"alnum_prop": 0.5585521646557843,
"repo_name": "jaloren/robotframework",
"id": "39e119bec243dbb66778795f17e9c05d504c7f04",
"size": "4871",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/robot/libdocpkg/javabuilder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "58264"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2241544"
},
{
"name": "RobotFramework",
"bytes": "2074646"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
from .compiler import DjangoScssCompiler # NOQA
|
{
"content_hash": "5070ba825d1bfe5814fcae06b08093d4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.8163265306122449,
"repo_name": "fusionbox/django-pyscss",
"id": "a31ff8548954203fdb3844364108bba02030d907",
"size": "49",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_pyscss/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "381"
},
{
"name": "Python",
"bytes": "21797"
}
],
"symlink_target": ""
}
|
"""
Django settings for quokka project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import MEDIA_ROOT
import sys
import socket
try:
HOSTNAME = socket.gethostname()
except:
HOSTNAME = 'localhost'
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!j!rcmgm!@*zhjkq)3tl*r&&zug3&4hklo*s)#b*5_-=u0s1iw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
{
"content_hash": "6f7bbaef25aade5307aeba8cf37738dd",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 71,
"avg_line_length": 22.7625,
"alnum_prop": 0.7221306974190006,
"repo_name": "OmegaDroid/django_cookbook",
"id": "2ae39bd63a164c938d056f22d9ce861c32e74abd",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cookbook/test/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11661"
}
],
"symlink_target": ""
}
|
import dataclasses
import typing
@dataclasses.dataclass
class A:
x: int
y: str
z: float = 0.0
A(<warning descr="Parameter 'x' unfilled"><warning descr="Parameter 'y' unfilled">)</warning></warning>
A(1<warning descr="Parameter 'y' unfilled">)</warning>
A(1, "a")
A(1, "a", 1.0)
A(1, "a", 1.0, <warning descr="Unexpected argument">"b"</warning>)
@dataclasses.dataclass(init=True)
class A2:
x: int
y: str
z: float = 0.0
A2(<warning descr="Parameter 'x' unfilled"><warning descr="Parameter 'y' unfilled">)</warning></warning>
A2(1<warning descr="Parameter 'y' unfilled">)</warning>
A2(1, "a")
A2(1, "a", 1.0)
A2(1, "a", 1.0, <warning descr="Unexpected argument">"b"</warning>)
@dataclasses.dataclass(init=False)
class B1:
x: int = 1
y: str = "2"
z: float = 0.0
B1()
B1<warning descr="Unexpected argument(s)Possible callees:object(self: object)object.__new__(cls: object)">(1)</warning>
B1<warning descr="Unexpected argument(s)Possible callees:object(self: object)object.__new__(cls: object)">(1, "a")</warning>
B1<warning descr="Unexpected argument(s)Possible callees:object(self: object)object.__new__(cls: object)">(1, "a", 1.0)</warning>
B1<warning descr="Unexpected argument(s)Possible callees:object(self: object)object.__new__(cls: object)">(1, "a", 1.0, "b")</warning>
@dataclasses.dataclass(init=False)
class B2:
x: int
y: str
z: float = 0.0
def __init__(self, x: int):
self.x = x
self.y = str(x)
self.z = 0.0
B2(<warning descr="Parameter 'x' unfilled">)</warning>
B2(1)
B2(1, <warning descr="Unexpected argument">2</warning>)
@dataclasses.dataclass
class C1:
a: typing.ClassVar[int]
b: int
C1(<warning descr="Parameter 'b' unfilled">)</warning>
C1(1)
C1(1, <warning descr="Unexpected argument">2</warning>)
@dataclasses.dataclass
class C2:
a: typing.ClassVar
b: int
C2(<warning descr="Parameter 'b' unfilled">)</warning>
C2(1)
C2(1, <warning descr="Unexpected argument">2</warning>)
@dataclasses.dataclass
class D1:
a: dataclasses.InitVar[int]
b: int
D1(<warning descr="Parameter 'a' unfilled"><warning descr="Parameter 'b' unfilled">)</warning></warning>
D1(1<warning descr="Parameter 'b' unfilled">)</warning>
D1(1, 2)
D1(1, 2, <warning descr="Unexpected argument">3</warning>)
@dataclasses.dataclass
class E1:
a: int = dataclasses.field()
b: int = dataclasses.field(init=True)
c: int = dataclasses.field(init=False)
d: int = dataclasses.field(default=1)
e: int = dataclasses.field(default_factory=int)
E1(1<warning descr="Parameter 'b' unfilled">)</warning>
E1(1, 2)
E1(1, 2, 3)
E1(1, 2, 3, 4)
E1(1, 2, 3, 4, <warning descr="Unexpected argument">5</warning>)
@dataclasses.dataclass
class F1:
foo = "bar" # <- has no type annotation, so doesn't count.
baz: str
F1(<warning descr="Parameter 'baz' unfilled">)</warning>
F1("1")
F1("1", <warning descr="Unexpected argument">"2"</warning>)
|
{
"content_hash": "04f12de07a39f4a68a9a834d2666aa6a",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 134,
"avg_line_length": 26.339285714285715,
"alnum_prop": 0.6620338983050847,
"repo_name": "msebire/intellij-community",
"id": "1d7caa8c426eb33f4d15f8c83798b0c7876d6b65",
"size": "2950",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/testData/inspections/PyArgumentListInspection/InitializingDataclass/a.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
PRIMARY_OS = 'Ubuntu-16.04'
PRIMARY = '''#!/bin/sh
FQDN="{{fqdn}}"
export DEBIAN_FRONTEND=noninteractive
# locale
locale-gen en_US.UTF-8
# hostname
hostnamectl set-hostname $FQDN
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
# packages
curl -s 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | apt-key add --import
echo "deb https://packages.docker.com/1.13/apt/repo ubuntu-xenial main" | tee /etc/apt/sources.list.d/docker.list
apt-get update && apt-get install -y \
apt-transport-https \
docker-engine \
git \
htop \
jq \
inux-image-extra-$(uname -r) \
linux-image-extra-virtual \
seccomp \
strace \
tree
systemctl start docker
sleep 15
usermod -aG docker ubuntu
# compose
curl -L https://github.com/docker/compose/releases/download/1.11.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
{{dinfo}}
reboot
'''.format()
# Script to use if launching from a custom lab AMI image
AMIBUILD = '''#!/bin/sh
FQDN="{{fqdn}}"
# hostname
hostnamectl set-hostname $FQDN
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
{{dinfo}}
reboot
'''.format()
def pre_process():
"""Anything added to this function is executed before launching the instances"""
pass
def post_process():
"""Anything added to this function is executed after launching the instances"""
pass
|
{
"content_hash": "840b7393d30ed984fb609df3dc7b379a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 135,
"avg_line_length": 23.14516129032258,
"alnum_prop": 0.6947735191637631,
"repo_name": "kizbitz/train",
"id": "4d5756a705985fee7a82a0520cb61a63e847e292",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/labs/security/scripts/ubuntu-16.04.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "80124"
},
{
"name": "Shell",
"bytes": "249"
}
],
"symlink_target": ""
}
|
from Module import Command
import string
module_name = "xkcd"
def command_xkcdrandomnumber(cmd, bot, args, msg, event):
return "[4 // Chosen by fair dice roll. Guaranteed to be random.](http://xkcd.com/221/)"
def command_xkcd(cmd, bot, args, msg, event):
if len(args) < 1:
return "Not enough arguments."
try:
id_ = int(args[0])
except:
return "Invalid arguments."
return "http://xkcd.com/%i/" % id_
commands = [
Command('xkcdrandomnumber', command_xkcdrandomnumber, "Returns a random number, based on an xkcd comic. Syntax: `$PREFIXxkcdrandomnumber`", False, False),
Command('xkcd', command_xkcd, "Shows the specified xkcd comic. Syntax: `$PREFIXxkcd comic_id`", False, False, None, None, string.digits, None)
]
|
{
"content_hash": "6fa4c83d73baff8a1b2ba6c394ba6b22",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 158,
"avg_line_length": 34.95454545454545,
"alnum_prop": 0.6710013003901171,
"repo_name": "Mego/DataBot",
"id": "f3ef18f52b01b12f4449ce6a15b67d6d4cff4321",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SE-Chatbot/botbuiltins/xkcd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "322"
},
{
"name": "JavaScript",
"bytes": "19383"
},
{
"name": "Makefile",
"bytes": "891"
},
{
"name": "Python",
"bytes": "176137"
},
{
"name": "Shell",
"bytes": "1447"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from links.views import LinkListView, LinkDetailView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'linkify.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^links/(?P<pk>\d+)/$', LinkDetailView.as_view(), name='detail-link'),
url(r'^links/$', LinkListView.as_view(), name='list-link'),
)
|
{
"content_hash": "f3cccf5240eb7f3ba6e65d000944b70f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 29.235294117647058,
"alnum_prop": 0.6559356136820925,
"repo_name": "promptworks/schematics",
"id": "ad8e6e574c502b4d5547f1252fabd9b5c3cc1b72",
"size": "497",
"binary": false,
"copies": "12",
"ref": "refs/heads/development",
"path": "examples/django/linkify/linkify/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "201359"
},
{
"name": "Shell",
"bytes": "89"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import shutil
import sys
import time
import urllib2
import warnings
import re
# Dropping a table inexplicably produces a warning despite
# the 'IF EXISTS' clause. Squelch these warnings.
warnings.simplefilter('ignore')
import MySQLdb
import environment
import utils
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
tablet_cell_map = {
62344: 'nj',
62044: 'nj',
41983: 'nj',
31981: 'ny',
}
def get_backup_storage_flags():
return ['-backup_storage_implementation', 'file',
'-file_backup_storage_root',
os.path.join(environment.tmproot, 'backupstorage')]
def get_all_extra_my_cnf(extra_my_cnf):
all_extra_my_cnf = [environment.vttop + '/config/mycnf/default-fast.cnf']
flavor_my_cnf = mysql_flavor().extra_my_cnf()
if flavor_my_cnf:
all_extra_my_cnf.append(flavor_my_cnf)
if extra_my_cnf:
all_extra_my_cnf.append(extra_my_cnf)
return all_extra_my_cnf
class Tablet(object):
"""This class helps manage a vttablet instance.
To use it for vttablet, you need to use init_tablet and/or
start_vttablet.
"""
default_uid = 62344
seq = 0
tablets_running = 0
default_db_config = {
'app': {
'uname': 'vt_app',
'charset': 'utf8'
},
'dba': {
'uname': 'vt_dba',
'charset': 'utf8'
},
'filtered': {
'uname': 'vt_filtered',
'charset': 'utf8'
},
'repl': {
'uname': 'vt_repl',
'charset': 'utf8'
}
}
# this will eventually be coming from the proto3
tablet_type_value = {
'UNKNOWN': 0,
'MASTER': 1,
'REPLICA': 2,
'RDONLY': 3,
'BATCH': 3,
'SPARE': 4,
'EXPERIMENTAL': 5,
'BACKUP': 6,
'RESTORE': 7,
'WORKER': 8,
}
def __init__(self, tablet_uid=None, port=None, mysql_port=None, cell=None,
use_mysqlctld=False):
self.tablet_uid = tablet_uid or (Tablet.default_uid + Tablet.seq)
self.port = port or (environment.reserve_ports(1))
self.mysql_port = mysql_port or (environment.reserve_ports(1))
self.grpc_port = environment.reserve_ports(1)
self.use_mysqlctld = use_mysqlctld
Tablet.seq += 1
if cell:
self.cell = cell
else:
self.cell = tablet_cell_map.get(tablet_uid, 'nj')
self.proc = None
# filled in during init_tablet
self.keyspace = None
self.shard = None
# utility variables
self.tablet_alias = 'test_%s-%010d' % (self.cell, self.tablet_uid)
self.zk_tablet_path = (
'/zk/test_%s/vt/tablets/%010d' % (self.cell, self.tablet_uid))
def update_stream_python_endpoint(self):
protocol = protocols_flavor().binlog_player_python_protocol()
port = self.port
if protocol == 'gorpc':
from vtdb import gorpc_update_stream
elif protocol == 'grpc':
# import the grpc update stream client implementation, change the port
from vtdb import grpc_update_stream
port = self.grpc_port
return (protocol, 'localhost:%d' % port)
def mysqlctl(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctl') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid)]
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if with_ports:
args.extend(['-port', str(self.port),
'-mysql_port', str(self.mysql_port)])
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def mysqlctld(self, cmd, extra_my_cnf=None, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctld') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid),
'-mysql_port', str(self.mysql_port),
'-socket_file', os.path.join(self.tablet_dir, 'mysqlctl.sock')]
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def init_mysql(self, extra_my_cnf=None):
if self.use_mysqlctld:
return self.mysqlctld(
['-init_db_sql_file', environment.vttop + '/config/init_db.sql'],
extra_my_cnf=extra_my_cnf)
else:
return self.mysqlctl(
['init', '-init_db_sql_file',
environment.vttop + '/config/init_db.sql'],
extra_my_cnf=extra_my_cnf, with_ports=True)
def start_mysql(self):
return self.mysqlctl(['start'], with_ports=True)
def shutdown_mysql(self):
return self.mysqlctl(['shutdown'], with_ports=True)
def teardown_mysql(self):
if utils.options.keep_logs:
return self.shutdown_mysql()
return self.mysqlctl(['teardown', '-force'])
def remove_tree(self):
if utils.options.keep_logs:
return
try:
shutil.rmtree(self.tablet_dir)
except OSError as e:
if utils.options.verbose == 2:
print >> sys.stderr, e, self.tablet_dir
def mysql_connection_parameters(self, dbname, user='vt_dba'):
return dict(user=user,
unix_socket=self.tablet_dir + '/mysql.sock',
db=dbname)
def connect(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, conn.cursor()
def connect_dict(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, MySQLdb.cursors.DictCursor(conn)
# Query the MySQL instance directly
def mquery(
self, dbname, query, write=False, user='vt_dba', conn_params=None):
if conn_params is None:
conn_params = {}
conn, cursor = self.connect(dbname, user=user, **conn_params)
if write:
conn.begin()
if isinstance(query, basestring):
query = [query]
for q in query:
# logging.debug('mysql(%s,%s): %s', self.tablet_uid, dbname, q)
cursor.execute(q)
if write:
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def assert_table_count(self, dbname, table, n, where=''):
result = self.mquery(dbname, 'select count(*) from ' + table + ' ' + where)
if result[0][0] != n:
raise utils.TestError('expected %d rows in %s' % (n, table), result)
def reset_replication(self):
self.mquery('', mysql_flavor().reset_replication_commands())
def populate(self, dbname, create_sql, insert_sqls=[]):
self.create_db(dbname)
if isinstance(create_sql, basestring):
create_sql = [create_sql]
for q in create_sql:
self.mquery(dbname, q)
for q in insert_sqls:
self.mquery(dbname, q, write=True)
def has_db(self, name):
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname == name:
return True
return False
def drop_db(self, name):
self.mquery('', 'drop database if exists %s' % name)
while self.has_db(name):
logging.debug('%s sleeping while waiting for database drop: %s',
self.tablet_alias, name)
time.sleep(0.3)
self.mquery('', 'drop database if exists %s' % name)
def create_db(self, name):
self.drop_db(name)
self.mquery('', 'create database %s' % name)
def clean_dbs(self):
logging.debug('mysql(%s): removing all databases', self.tablet_uid)
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname in ['information_schema', 'mysql']:
continue
self.drop_db(dbname)
def wait_check_db_var(self, name, value):
for _ in range(3):
try:
return self.check_db_var(name, value)
except utils.TestError as e:
print >> sys.stderr, 'WARNING: ', e
time.sleep(1.0)
raise e
def check_db_var(self, name, value):
row = self.get_db_var(name)
if row != (name, value):
raise utils.TestError('variable not set correctly', name, row)
def get_db_var(self, name):
conn, cursor = self.connect()
try:
cursor.execute("show variables like '%s'" % name)
return cursor.fetchone()
finally:
conn.close()
def update_addrs(self):
args = [
'UpdateTabletAddrs',
'-hostname', 'localhost',
'-ip-addr', '127.0.0.1',
'-mysql-port', '%d' % self.mysql_port,
'-vt-port', '%d' % self.port,
self.tablet_alias
]
return utils.run_vtctl(args)
def init_tablet(self, tablet_type, keyspace, shard,
start=False, dbname=None, parent=True, wait_for_start=True,
include_mysql_port=True, **kwargs):
self.tablet_type = tablet_type
self.keyspace = keyspace
self.shard = shard
self.dbname = dbname or ('vt_' + self.keyspace)
args = ['InitTablet',
'-hostname', 'localhost',
'-port', str(self.port)]
if include_mysql_port:
args.extend(['-mysql_port', str(self.mysql_port)])
if parent:
args.append('-parent')
if dbname:
args.extend(['-db_name_override', dbname])
if keyspace:
args.extend(['-keyspace', keyspace])
if shard:
args.extend(['-shard', shard])
args.extend([self.tablet_alias, tablet_type])
utils.run_vtctl(args)
if start:
if not wait_for_start:
expected_state = None
elif (tablet_type == 'master' or tablet_type == 'replica' or
tablet_type == 'rdonly' or tablet_type == 'batch'):
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
self.start_vttablet(wait_for_state=expected_state, **kwargs)
@property
def tablet_dir(self):
return '%s/vt_%010d' % (environment.vtdataroot, self.tablet_uid)
def grpc_enabled(self):
return (
protocols_flavor().tabletconn_protocol() == 'grpc' or
protocols_flavor().tablet_manager_protocol() == 'grpc' or
protocols_flavor().binlog_player_protocol() == 'grpc')
def flush(self):
utils.curl('http://localhost:%s%s' %
(self.port, environment.flush_logs_url),
stderr=utils.devnull, stdout=utils.devnull)
def start_vttablet(
self, port=None, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
lameduck_period=None, security_policy=None,
target_tablet_type=None, full_mycnf_args=False,
extra_args=None, extra_env=None, include_mysql_port=True,
init_tablet_type=None, init_keyspace=None,
init_shard=None, init_db_name_override=None,
supports_backups=False):
"""Starts a vttablet process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
args = environment.binary_args('vttablet')
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
args.extend(['-tablet_hostname', 'localhost'])
args.extend(['-tablet-path', self.tablet_alias])
args.extend(environment.topo_server().flags())
args.extend(['-binlog_player_protocol',
protocols_flavor().binlog_player_protocol()])
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-tablet_protocol', protocols_flavor().tabletconn_protocol()])
args.extend(['-binlog_player_healthcheck_topology_refresh', '1s'])
args.extend(['-binlog_player_retry_delay', '1s'])
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if full_mycnf_args:
# this flag is used to specify all the mycnf_ flags, to make
# sure that code works.
relay_log_path = os.path.join(self.tablet_dir, 'relay-logs',
'vt-%010d-relay-bin' % self.tablet_uid)
args.extend([
'-mycnf_server_id', str(self.tablet_uid),
'-mycnf_data_dir', os.path.join(self.tablet_dir, 'data'),
'-mycnf_innodb_data_home_dir', os.path.join(self.tablet_dir,
'innodb', 'data'),
'-mycnf_innodb_log_group_home_dir', os.path.join(self.tablet_dir,
'innodb', 'logs'),
'-mycnf_socket_file', os.path.join(self.tablet_dir, 'mysql.sock'),
'-mycnf_error_log_path', os.path.join(self.tablet_dir, 'error.log'),
'-mycnf_slow_log_path', os.path.join(self.tablet_dir,
'slow-query.log'),
'-mycnf_relay_log_path', relay_log_path,
'-mycnf_relay_log_index_path', relay_log_path + '.index',
'-mycnf_relay_log_info_path', os.path.join(self.tablet_dir,
'relay-logs',
'relay-log.info'),
'-mycnf_bin_log_path', os.path.join(
self.tablet_dir, 'bin-logs', 'vt-%010d-bin' % self.tablet_uid),
'-mycnf_master_info_file', os.path.join(self.tablet_dir,
'master.info'),
'-mycnf_pid_file', os.path.join(self.tablet_dir, 'mysql.pid'),
'-mycnf_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
'-mycnf_slave_load_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
])
if include_mysql_port:
args.extend(['-mycnf_mysql_port', str(self.mysql_port)])
if target_tablet_type:
self.tablet_type = target_tablet_type
args.extend(['-target_tablet_type', target_tablet_type,
'-health_check_interval', '2s',
'-enable_replication_lag_check',
'-degraded_threshold', '5s'])
# this is used to run InitTablet as part of the vttablet startup
if init_tablet_type:
self.tablet_type = init_tablet_type
args.extend(['-init_tablet_type', init_tablet_type])
if init_keyspace:
self.keyspace = init_keyspace
self.shard = init_shard
args.extend(['-init_keyspace', init_keyspace,
'-init_shard', init_shard])
if init_db_name_override:
self.dbname = init_db_name_override
args.extend(['-init_db_name_override', init_db_name_override])
else:
self.dbname = 'vt_' + init_keyspace
if supports_backups:
args.extend(['-restore_from_backup'] + get_backup_storage_flags())
if extra_args:
args.extend(extra_args)
args.extend(['-port', '%s' % (port or self.port),
'-log_dir', environment.vtlogroot])
self._add_dbconfigs(args, repl_extra_flags)
if memcache:
args.extend(['-rowcache-bin', environment.memcached_bin()])
memcache_socket = os.path.join(self.tablet_dir, 'memcache.sock')
args.extend(['-rowcache-socket', memcache_socket])
args.extend(['-enable-rowcache'])
if filecustomrules:
args.extend(['-filecustomrules', filecustomrules])
if zkcustomrules:
args.extend(['-zkcustomrules', zkcustomrules])
if schema_override:
args.extend(['-schema-override', schema_override])
if table_acl_config:
args.extend(['-table-acl-config', table_acl_config])
args.extend(['-queryserver-config-strict-table-acl'])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if self.grpc_enabled():
args.extend(['-grpc_port', str(self.grpc_port)])
if lameduck_period:
args.extend(['-lameduck-period', lameduck_period])
if security_policy:
args.extend(['-security_policy', security_policy])
if extra_args:
args.extend(extra_args)
args.extend(['-enable-autocommit'])
stderr_fd = open(
os.path.join(environment.vtlogroot, 'vttablet-%d.stderr' %
self.tablet_uid), 'w')
# increment count only the first time
if not self.proc:
Tablet.tablets_running += 1
self.proc = utils.run_bg(args, stderr=stderr_fd, extra_env=extra_env)
log_message = (
'Started vttablet: %s (%s) with pid: %s - Log files: '
'%s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s' %
(self.tablet_uid, self.tablet_alias, self.proc.pid,
environment.vtlogroot, self.proc.pid))
# This may race with the stderr output from the process (though
# that's usually empty).
stderr_fd.write(log_message + '\n')
stderr_fd.close()
logging.debug(log_message)
# wait for query service to be in the right state
if wait_for_state:
self.wait_for_vttablet_state(wait_for_state, port=port)
return self.proc
def wait_for_vttablet_state(self, expected, timeout=60.0, port=None):
expr = re.compile('^' + expected + '$')
while True:
v = utils.get_vars(port or self.port)
last_seen_state = '?'
if v is None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for state %s' % expected)
logging.debug(
' vttablet %s not answering at /debug/vars, waiting...',
self.tablet_alias)
else:
if 'TabletStateName' not in v:
logging.debug(
' vttablet %s not exporting TabletStateName, waiting...',
self.tablet_alias)
else:
s = v['TabletStateName']
last_seen_state = s
if expr.match(s):
break
else:
logging.debug(
' vttablet %s in state %s != %s', self.tablet_alias, s,
expected)
timeout = utils.wait_step(
'waiting for state %s (last seen state: %s)' %
(expected, last_seen_state),
timeout, sleep_time=0.1)
def wait_for_mysqlctl_socket(self, timeout=30.0):
mysql_sock = os.path.join(self.tablet_dir, 'mysql.sock')
mysqlctl_sock = os.path.join(self.tablet_dir, 'mysqlctl.sock')
while True:
if os.path.exists(mysql_sock) and os.path.exists(mysqlctl_sock):
return
timeout = utils.wait_step(
'waiting for mysql and mysqlctl socket files: %s %s' %
(mysql_sock, mysqlctl_sock), timeout)
def _add_dbconfigs(self, args, repl_extra_flags=None):
if repl_extra_flags is None:
repl_extra_flags = {}
config = dict(self.default_db_config)
if self.keyspace:
config['app']['dbname'] = self.dbname
config['repl']['dbname'] = self.dbname
config['repl'].update(repl_extra_flags)
for key1 in config:
for key2 in config[key1]:
args.extend(['-db-config-' + key1 + '-' + key2, config[key1][key2]])
def get_status(self):
return utils.get_status(self.port)
def get_healthz(self):
return urllib2.urlopen('http://localhost:%d/healthz' % self.port).read()
def kill_vttablet(self, wait=True):
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias,
str(wait))
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.terminate()
if wait:
self.proc.wait()
self.proc = None
def hard_kill_vttablet(self):
logging.debug('hard killing vttablet: %s', self.tablet_alias)
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.kill()
self.proc.wait()
self.proc = None
def wait_for_binlog_server_state(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog state %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'UpdateStreamState' not in v:
logging.debug(
' vttablet not exporting BinlogServerState, waiting...')
else:
s = v['UpdateStreamState']
if s != expected:
logging.debug(" vttablet's binlog server in state %s != %s", s,
expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog server state %s' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog service is in state %s',
self.tablet_alias, expected)
def wait_for_binlog_player_count(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog count %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'BinlogPlayerMapSize' not in v:
logging.debug(
' vttablet not exporting BinlogPlayerMapSize, waiting...')
else:
s = v['BinlogPlayerMapSize']
if s != expected:
logging.debug(" vttablet's binlog player map has count %d != %d",
s, expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog player count %d' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog player has %d players',
self.tablet_alias, expected)
@classmethod
def check_vttablet_count(klass):
if Tablet.tablets_running > 0:
raise utils.TestError('This test is not killing all its vttablets')
def execute(self, sql, bindvars=None, transaction_id=None, auto_log=True):
"""execute uses 'vtctl VtTabletExecute' to execute a command.
"""
args = [
'VtTabletExecute',
'-keyspace', self.keyspace,
'-shard', self.shard,
]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if transaction_id:
args.extend(['-transaction_id', str(transaction_id)])
args.extend([self.tablet_alias, sql])
return utils.run_vtctl_json(args, auto_log=auto_log)
def begin(self, auto_log=True):
"""begin uses 'vtctl VtTabletBegin' to start a transaction.
"""
args = [
'VtTabletBegin',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
]
result = utils.run_vtctl_json(args, auto_log=auto_log)
return result['transaction_id']
def commit(self, transaction_id, auto_log=True):
"""commit uses 'vtctl VtTabletCommit' to commit a transaction.
"""
args = [
'VtTabletCommit',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def rollback(self, transaction_id, auto_log=True):
"""rollback uses 'vtctl VtTabletRollback' to rollback a transaction.
"""
args = [
'VtTabletRollback',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def kill_tablets(tablets):
for t in tablets:
logging.debug('killing vttablet: %s', t.tablet_alias)
if t.proc is not None:
Tablet.tablets_running -= 1
t.proc.terminate()
for t in tablets:
if t.proc is not None:
t.proc.wait()
t.proc = None
|
{
"content_hash": "d5445cadb6506baab4608af37f10fe31",
"timestamp": "",
"source": "github",
"line_count": 706,
"max_line_length": 79,
"avg_line_length": 34.09065155807365,
"alnum_prop": 0.5991773308957953,
"repo_name": "tjyang/vitess",
"id": "555abc8b1880880aacd9d887f3c8c0f43e6e75c4",
"size": "24068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tablet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9588"
},
{
"name": "CSS",
"bytes": "202224"
},
{
"name": "Go",
"bytes": "4771381"
},
{
"name": "HTML",
"bytes": "46202"
},
{
"name": "Java",
"bytes": "165580"
},
{
"name": "JavaScript",
"bytes": "48518"
},
{
"name": "Liquid",
"bytes": "8009"
},
{
"name": "Makefile",
"bytes": "5071"
},
{
"name": "PHP",
"bytes": "726238"
},
{
"name": "Protocol Buffer",
"bytes": "75481"
},
{
"name": "Python",
"bytes": "657888"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "30648"
},
{
"name": "Yacc",
"bytes": "19565"
}
],
"symlink_target": ""
}
|
import shutil
import csv
import json
import logging
import os
import sys
import time
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import foctor_core.foctor_core as fc
class HeadlessBrowser:
def __init__(self):
self.cur_path = os.path.dirname(os.path.abspath(__file__))
self.display = Display(visible=False)
self.binary = None
self.profile = None
self.driver = None
self.parsed = 0
@fc.timing
def setup_profile(self, firebug=True, netexport=True):
"""
Setup the profile for firefox
:param firebug: whether add firebug extension
:param netexport: whether add netexport extension
:return: a firefox profile object
"""
profile = webdriver.FirefoxProfile()
profile.set_preference("app.update.enabled", False)
if firebug:
profile.add_extension(os.path.join(self.cur_path, 'extensions/firebug-2.0.8.xpi'))
profile.set_preference("extensions.firebug.currentVersion", "2.0.8")
profile.set_preference("extensions.firebug.allPagesActivation", "on")
profile.set_preference("extensions.firebug.defaultPanelName", "net")
profile.set_preference("extensions.firebug.net.enableSites", True)
profile.set_preference("extensions.firebug.delayLoad", False)
profile.set_preference("extensions.firebug.onByDefault", True)
profile.set_preference("extensions.firebug.showFirstRunPage", False)
profile.set_preference("extensions.firebug.net.defaultPersist", True) # persist all redirection responses
if netexport:
har_path = os.path.join(self.cur_path, "har")
if not os.path.exists(har_path):
os.mkdir(har_path)
profile.add_extension(os.path.join(self.cur_path, 'extensions/netExport-0.9b7.xpi'))
profile.set_preference("extensions.firebug.DBG_NETEXPORT", True)
profile.set_preference("extensions.firebug.netexport.alwaysEnableAutoExport", True)
profile.set_preference("extensions.firebug.netexport.defaultLogDir", har_path)
profile.set_preference("extensions.firebug.netexport.includeResponseBodies", True)
return profile
def open_virtual_display(self):
self.display.start()
def close_virtual_display(self):
self.display.stop()
def wrap_results(self, **kwargs):
"""
Wrap returned http response into a well formatted dict
:param kwargs: this dict param should contains following keys:
fd: file directory to
url: the test url fo the result
files_count: the number of files under har/ directory
:return (dict): the results of all
"""
if 'fd' not in kwargs \
or 'url' not in kwargs \
or 'files_count' not in kwargs:
logging.error("Missing arguments in wrap_results function")
return {}
external = kwargs['external'] if 'external' in kwargs else None
fd = kwargs['fd']
url = kwargs['url']
length = kwargs['files_count']
results = {}
files = []
wait_time = 15
host = self.divide_url(url)[0]
time.sleep(0.5)
# wait until the har file is generated
while len(os.listdir(fd)) <= length + self.parsed:
time.sleep(1)
wait_time -= 1
if wait_time == 0:
logging.warning("%s waiting har file result timed out" % url)
results['error'] = "wrap har file timeout"
if external is not None:
external[url] = results
return results
time.sleep(1)
# find all har files under har/ directory
for fn in os.listdir(fd):
if fn.endswith(".har") and host in fn:
path = os.path.join(fd, fn)
files.append((fn, os.stat(path).st_mtime))
# sort all har files and parse the latest one
files.sort(key=lambda x: x[1])
if len(files) > 0:
with open(fd + '/' + files[-1][0]) as f:
raw_data = json.load(f)['log']['entries']
results = [{} for i in range(0, len(raw_data))]
for i in range(0, len(results)):
results[i]['request'] = {}
results[i]['request']['method'] = raw_data[i]['request']['method']
headers = {}
for header in raw_data[i]['request']['headers']:
headers[header['name']] = header['value']
results[i]['request']['headers'] = headers
results[i]['response'] = {}
results[i]['response']['status'] = raw_data[i]['response']['status']
results[i]['response']['reason'] = raw_data[i]['response']['statusText']
headers = {}
for header in raw_data[i]['response']['headers']:
headers[header['name']] = header['value']
results[i]['response']['headers'] = headers
results[i]['response']['redirect'] = raw_data[i]['response']['redirectURL']
results[i]['response']['body'] = raw_data[i]['response']['content']
self.parsed += 1 # increment the number of parsed har files
else:
logging.warning("Cannot find har file for %s" % url)
# save test result of this url to the external result object or
# return the result
if external is not None:
external[url] = results
else:
return results
def divide_url(self, url):
"""
divide url into host and path two parts
"""
if 'https://' in url:
host = url[8:].split('/')[0]
path = url[8 + len(host):]
elif 'http://' in url:
host = url[7:].split('/')[0]
path = url[7 + len(host):]
else:
host = url.split('/')[0]
path = url[len(host):]
return host, path
def get(self, host, files_count, path="/", ssl=False, external=None):
"""
Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url
"""
theme = "https" if ssl else "http"
url = host + path
http_url = theme + "://" + url
result = {}
try:
capture_path = os.getcwd() + '/'
har_file_path = capture_path + "har/"
# fc.load_page(self.driver, http_url)
fc.switch_tab(self.driver)
self.load_page(http_url)
print "driver get: " + http_url
time.sleep(2)
# if url[-1] == "/":
# f_name = url.split('/')[-2]
# else:
# f_name = url.split('/')[-1]
# fc.save_html(self.driver, f_name, os.path.join(capture_path, "htmls/"))
# fc.save_screenshot(self.driver, f_name, os.path.join(capture_path, "screenshots/"))
result = self.wrap_results(url=http_url, files_count=files_count, fd=har_file_path)
if external is not None:
external[http_url] = result
except Exception as e:
result['error'] = e.message
print e
return result
def run_file(self, input_file, results):
"""
use foctor_core library do get requests
:param input_file: the file name of the list of test urls
format:
1, www.facebook.com
2, www.google.com
...
:param results: the object to save the responses from server
"""
capture_path = self.cur_path
display_mode = 0 # 0 is virtural display(Xvfb mode)
site_list = []
file_name, file_contents = input_file
result = {"file_name": file_name}
file_metadata = {}
file_comments = []
run_start_time = time.time()
index = 1
csvreader = csv.reader(file_contents, delimiter=',', quotechar='"')
for row in csvreader:
"""
First few lines are expected to be comments in key: value
format. The first line after that could be our column header
row, starting with "url", and the rest are data rows.
This is a sample input file we're trying to parse:
# comment: Global List,,,,,
# date: 03-17-2015,,,,,
# version: 1,,,,,
# description: This is the global list. Last updated in 2012.,,,,
url,country,category,description,rationale,provider
http://8thstreetlatinas.com,glo,PORN,,,PRIV
http://abpr2.railfan.net,glo,MISC,Pictures of trains,,PRIV
"""
# parse file comments, if it looks like "key : value",
# parse it as a key-value pair. otherwise, just
# store it as a raw comment.
if row[0][0] == '#':
row = row[0][1:].strip()
if len(row.split(':')) > 1:
key, value = row.split(':', 1)
key = key.strip()
value = value.strip()
file_metadata[key] = value
else:
file_comments.append(row)
continue
# detect the header row and store it
# it is usually the first row and starts with "url,"
if row[0].strip().lower() == "url":
index_row = row
continue
url = row[0].strip()
if url is None:
continue
meta = row[1:]
site_list.append([index, url])
index += 1
driver, display = fc.do_crawl(sites=site_list, driver=self.driver, display=self.display,
capture_path=capture_path, callback=self.wrap_results,
external=results, fd=os.path.join(capture_path, "har/"),
files_count=len(os.listdir(os.path.join(capture_path, "har/"))))
fc.teardown_driver(driver, display, display_mode)
driver.quit() # quit driver will also clean up the tmp file under /tmp directory
def run(self, input_files, url=None, verbose=0):
"""
run the headless browser with given input
if url given, the proc will only run hlb with given url and ignore input_list.
:param url:
:param input_files: the name of the file in "index url" format. i.e.
1, www.facebook.com
1, www.google.com
...
:param verbose:
:return:
"""
if not url and not input_files:
logging.warning("No input file")
return {"error": "no inputs"}
results = {}
self.open_virtual_display()
if verbose > 0:
log_file = sys.stdout
else:
log_file = None
# set up firefox driver
self.binary = FirefoxBinary(os.path.join(self.cur_path, 'firefox/firefox'), log_file=log_file)
self.profile = self.setup_profile()
self.driver = webdriver.Firefox(firefox_profile=self.profile, firefox_binary=self.binary, timeout=60)
self.driver.set_page_load_timeout(60)
isfile = False
if url:
host, path = self.divide_url(url)
results[url] = self.get(host, path)
else:
isfile = True
for input_file in input_files.items():
logging.info("Testing input file %s..." % (input_file[0]))
self.run_file(input_file, results)
# foctor_core will quit the driver by itself so we only quit the driver when we don't use foctor core
if not isfile:
logging.info("Quit driver")
self.driver.quit()
self.close_virtual_display()
logging.debug("Deleting har folder")
shutil.rmtree(os.path.join(self.cur_path, 'har'))
return results
|
{
"content_hash": "d8efe547f8d7611f66b7bde873344347",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 118,
"avg_line_length": 38.07598784194529,
"alnum_prop": 0.5364412868204678,
"repo_name": "lianke123321/centinel",
"id": "695b8e1a7b844ec4a0bdadc5f05a9e4686ddca44",
"size": "12527",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "centinel/primitives/headless_browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "245"
},
{
"name": "Python",
"bytes": "240200"
},
{
"name": "Shell",
"bytes": "8915"
}
],
"symlink_target": ""
}
|
r"""
===================
Overview of Sampler
===================
Sampler class calculates probabilities or quasi-probabilities of bitstrings from quantum circuits.
A sampler is initialized with an empty parameter set. The sampler is used to
create a :class:`~qiskit.providers.JobV1`, via the :meth:`qiskit.primitives.Sampler.run()`
method. This method is called with the following parameters
* quantum circuits (:math:`\psi_i(\theta)`): list of (parameterized) quantum circuits.
(a list of :class:`~qiskit.circuit.QuantumCircuit` objects)
* parameter values (:math:`\theta_k`): list of sets of parameter values
to be bound to the parameters of the quantum circuits.
(list of list of float)
The method returns a :class:`~qiskit.providers.JobV1` object, calling
:meth:`qiskit.providers.JobV1.result()` yields a :class:`~qiskit.primitives.SamplerResult`
object, which contains probabilities or quasi-probabilities of bitstrings,
plus optional metadata like error bars in the samples.
Here is an example of how sampler is used.
.. code-block:: python
from qiskit.primitives import Sampler
from qiskit import QuantumCircuit
from qiskit.circuit.library import RealAmplitudes
# a Bell circuit
bell = QuantumCircuit(2)
bell.h(0)
bell.cx(0, 1)
bell.measure_all()
# two parameterized circuits
pqc = RealAmplitudes(num_qubits=2, reps=2)
pqc.measure_all()
pqc2 = RealAmplitudes(num_qubits=2, reps=3)
pqc2.measure_all()
theta1 = [0, 1, 1, 2, 3, 5]
theta2 = [0, 1, 2, 3, 4, 5, 6, 7]
# initialization of the sampler
sampler = Sampler()
# Sampler runs a job on the Bell circuit
job = sampler.run(circuits=[bell], parameter_values=[[]], parameters=[[]])
job_result = job.result()
print([q.binary_probabilities() for q in job_result.quasi_dists])
# Sampler runs a job on the parameterized circuits
job2 = sampler.run(
circuits=[pqc, pqc2],
parameter_values=[theta1, theta2],
parameters=[pqc.parameters, pqc2.parameters])
job_result = job2.result()
print([q.binary_probabilities() for q in job_result.quasi_dists])
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sequence
from copy import copy
from typing import cast
from warnings import warn
import numpy as np
from qiskit.circuit import Parameter, QuantumCircuit
from qiskit.circuit.parametertable import ParameterView
from qiskit.providers import JobV1 as Job
from qiskit.providers import Options
from qiskit.utils.deprecation import deprecate_arguments, deprecate_function
from .sampler_result import SamplerResult
from .utils import _circuit_key
class BaseSampler(ABC):
"""Sampler base class
Base class of Sampler that calculates quasi-probabilities of bitstrings from quantum circuits.
"""
__hash__ = None
def __init__(
self,
circuits: Iterable[QuantumCircuit] | QuantumCircuit | None = None,
parameters: Iterable[Iterable[Parameter]] | None = None,
options: dict | None = None,
):
"""
Args:
circuits: Quantum circuits to be executed.
parameters: Parameters of each of the quantum circuits.
Defaults to ``[circ.parameters for circ in circuits]``.
options: Default options.
Raises:
ValueError: For mismatch of circuits and parameters list.
"""
if circuits is not None or parameters is not None:
warn(
"The BaseSampler 'circuits', and `parameters` kwarg are deprecated "
"as of 0.22.0 and will be removed no earlier than 3 months after the "
"release date. You can use 'run' method to append objects.",
DeprecationWarning,
2,
)
if isinstance(circuits, QuantumCircuit):
circuits = (circuits,)
self._circuits = [] if circuits is None else list(circuits)
# To guarantee that they exist as instance variable.
# With only dynamic set, the python will not know if the attribute exists or not.
self._circuit_ids: dict[tuple, int] = self._circuit_ids
if parameters is None:
self._parameters = [circ.parameters for circ in self._circuits]
else:
self._parameters = [ParameterView(par) for par in parameters]
if len(self._parameters) != len(self._circuits):
raise ValueError(
f"Different number of parameters ({len(self._parameters)}) "
f"and circuits ({len(self._circuits)})"
)
self._run_options = Options()
if options is not None:
self._run_options.update_options(**options)
def __new__(
cls,
circuits: Iterable[QuantumCircuit] | QuantumCircuit | None = None,
parameters: Iterable[Iterable[Parameter]] | None = None, # pylint: disable=unused-argument
**kwargs, # pylint: disable=unused-argument
):
self = super().__new__(cls)
if circuits is None:
self._circuit_ids = {}
elif isinstance(circuits, Iterable):
circuits = copy(circuits)
self._circuit_ids = {_circuit_key(circuit): i for i, circuit in enumerate(circuits)}
else:
self._circuit_ids = {_circuit_key(circuits): 0}
return self
@deprecate_function(
"The BaseSampler.__enter__ method is deprecated as of Qiskit Terra 0.22.0 "
"and will be removed no sooner than 3 months after the releasedate. "
"BaseSampler should be initialized directly.",
)
def __enter__(self):
return self
@deprecate_function(
"The BaseSampler.__exit__ method is deprecated as of Qiskit Terra 0.22.0 "
"and will be removed no sooner than 3 months after the releasedate. "
"BaseSampler should be initialized directly.",
)
def __exit__(self, *exc_info):
self.close()
def close(self):
"""Close the session and free resources"""
...
@property
def circuits(self) -> tuple[QuantumCircuit, ...]:
"""Quantum circuits to be sampled.
Returns:
The quantum circuits to be sampled.
"""
return tuple(self._circuits)
@property
def parameters(self) -> tuple[ParameterView, ...]:
"""Parameters of quantum circuits.
Returns:
List of the parameters in each quantum circuit.
"""
return tuple(self._parameters)
@property
def options(self) -> Options:
"""Return options values for the estimator.
Returns:
options
"""
return self._run_options
def set_options(self, **fields):
"""Set options values for the estimator.
Args:
**fields: The fields to update the options
"""
self._run_options.update_options(**fields)
@deprecate_function(
"The BaseSampler.__call__ method is deprecated as of Qiskit Terra 0.22.0 "
"and will be removed no sooner than 3 months after the releasedate. "
"Use run method instead.",
)
@deprecate_arguments({"circuit_indices": "circuits"})
def __call__(
self,
circuits: Sequence[int | QuantumCircuit],
parameter_values: Sequence[Sequence[float]] | None = None,
**run_options,
) -> SamplerResult:
"""Run the sampling of bitstrings.
Args:
circuits: the list of circuit indices or circuit objects.
parameter_values: Parameters to be bound to the circuit.
run_options: Backend runtime options used for circuit execution.
Returns:
The result of the sampler. The i-th result corresponds to
``self.circuits[circuits[i]]`` evaluated with parameters bound as
``parameter_values[i]``.
Raises:
ValueError: For mismatch of object id.
ValueError: For mismatch of length of Sequence.
"""
# Support ndarray
if isinstance(parameter_values, np.ndarray):
parameter_values = parameter_values.tolist()
# Allow objects
circuits = [
self._circuit_ids.get(_circuit_key(circuit))
if not isinstance(circuit, (int, np.integer))
else circuit
for circuit in circuits
]
if any(circuit is None for circuit in circuits):
raise ValueError(
"The circuits passed when calling sampler is not one of the circuits used to "
"initialize the session."
)
circuits = cast("list[int]", circuits)
# Allow optional
if parameter_values is None:
for i in circuits:
if len(self._circuits[i].parameters) != 0:
raise ValueError(
f"The {i}-th circuit ({len(circuits)}) is parameterised,"
"but parameter values are not given."
)
parameter_values = [[]] * len(circuits)
# Validation
if len(circuits) != len(parameter_values):
raise ValueError(
f"The number of circuits ({len(circuits)}) does not match "
f"the number of parameter value sets ({len(parameter_values)})."
)
for i, value in zip(circuits, parameter_values):
if len(value) != len(self._parameters[i]):
raise ValueError(
f"The number of values ({len(value)}) does not match "
f"the number of parameters ({len(self._parameters[i])}) for the {i}-th circuit."
)
if max(circuits) >= len(self.circuits):
raise ValueError(
f"The number of circuits is {len(self.circuits)}, "
f"but the index {max(circuits)} is given."
)
run_opts = copy(self.options)
run_opts.update_options(**run_options)
return self._call(
circuits=circuits,
parameter_values=parameter_values,
**run_opts.__dict__,
)
def run(
self,
circuits: QuantumCircuit | Sequence[QuantumCircuit],
parameter_values: Sequence[float] | Sequence[Sequence[float]] | None = None,
**run_options,
) -> Job:
"""Run the job of the sampling of bitstrings.
Args:
circuits: One of more circuit objects.
parameter_values: Parameters to be bound to the circuit.
run_options: Backend runtime options used for circuit execution.
Returns:
The job object of the result of the sampler. The i-th result corresponds to
``circuits[i]`` evaluated with parameters bound as ``parameter_values[i]``.
Raises:
ValueError: Invalid arguments are given.
"""
# Support ndarray
if isinstance(parameter_values, np.ndarray):
parameter_values = parameter_values.tolist()
if not isinstance(circuits, Sequence):
circuits = [circuits]
if parameter_values is not None and (
len(parameter_values) == 0 or not isinstance(parameter_values[0], (Sequence, Iterable))
):
parameter_values = [parameter_values] # type: ignore[assignment]
# Allow optional
if parameter_values is None:
for i, circuit in enumerate(circuits):
if circuit.num_parameters != 0:
raise ValueError(
f"The {i}-th circuit ({len(circuits)}) is parameterised,"
"but parameter values are not given."
)
parameter_values = [[]] * len(circuits)
# Validation
if len(circuits) != len(parameter_values):
raise ValueError(
f"The number of circuits ({len(circuits)}) does not match "
f"the number of parameter value sets ({len(parameter_values)})."
)
for i, (circuit, parameter_value) in enumerate(zip(circuits, parameter_values)):
if len(parameter_value) != circuit.num_parameters:
raise ValueError(
f"The number of values ({len(parameter_value)}) does not match "
f"the number of parameters ({circuit.num_parameters}) for the {i}-th circuit."
)
for i, circuit in enumerate(circuits):
if circuit.num_clbits == 0:
raise ValueError(
f"The {i}-th circuit does not have any classical bit. "
"Sampler requires classical bits, plus measurements "
"on the desired qubits."
)
run_opts = copy(self.options)
run_opts.update_options(**run_options)
return self._run(
circuits,
parameter_values,
**run_opts.__dict__,
)
@abstractmethod
def _call(
self,
circuits: Sequence[int],
parameter_values: Sequence[Sequence[float]],
**run_options,
) -> SamplerResult:
...
# This will be comment out after 0.22. (This is necessary for the compatibility.)
# @abstractmethod
def _run(
self,
circuits: Sequence[QuantumCircuit],
parameter_values: Sequence[Sequence[float]],
**run_options,
) -> Job:
raise NotImplementedError(
"_run method is not implemented. This method will be @abstractmethod after 0.22."
)
|
{
"content_hash": "4e128092cdc65dc627ae8a55f675f4a2",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 100,
"avg_line_length": 35.557291666666664,
"alnum_prop": 0.5963087739856452,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "64854a328077670c50803adc23e0c10329c3f88a",
"size": "14131",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/primitives/base_sampler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from pprint import pformat
import distro
def pprint(obj):
for line in pformat(obj).split('\n'):
print(4 * ' ' + line)
print('os_release_info:')
pprint(distro.os_release_info())
print('lsb_release_info:')
pprint(distro.lsb_release_info())
print('distro_release_info:')
pprint(distro.distro_release_info())
print('id: {0}'.format(distro.id()))
print('name: {0}'.format(distro.name()))
print('name_pretty: {0}'.format(distro.name(True)))
print('version: {0}'.format(distro.version()))
print('version_pretty: {0}'.format(distro.version(True)))
print('like: {0}'.format(distro.like()))
print('codename: {0}'.format(distro.codename()))
print('linux_distribution_full: {0}'.format(distro.linux_distribution()))
print('linux_distribution: {0}'.format(distro.linux_distribution(False)))
print('major_version: {0}'.format(distro.major_version()))
print('minor_version: {0}'.format(distro.minor_version()))
print('build_number: {0}'.format(distro.build_number()))
|
{
"content_hash": "07becea0ace663e5fa22042eaa8f42fa",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 33.63333333333333,
"alnum_prop": 0.6987115956392468,
"repo_name": "nir0s/distro",
"id": "5c5ed9ef6f77e7c1bcb535c8f978fe1295e185e7",
"size": "1606",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "query_local_distro.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4425"
},
{
"name": "Python",
"bytes": "119024"
},
{
"name": "Shell",
"bytes": "7680"
}
],
"symlink_target": ""
}
|
from typing import Iterable, List, Optional, Sequence, Text
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import JsonableError
from zerver.models import (
Realm,
UserProfile,
get_user_including_cross_realm,
)
def user_profiles_from_unvalidated_emails(emails: Iterable[Text], realm: Realm) -> List[UserProfile]:
user_profiles = [] # type: List[UserProfile]
for email in emails:
try:
user_profile = get_user_including_cross_realm(email, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
user_profiles.append(user_profile)
return user_profiles
def get_user_profiles(emails: Iterable[Text], realm: Realm) -> List[UserProfile]:
try:
return user_profiles_from_unvalidated_emails(emails, realm)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
class Addressee:
# This is really just a holder for vars that tended to be passed
# around in a non-type-safe way before this class was introduced.
#
# It also avoids some nonsense where you have to think about whether
# topic should be None or '' for a PM, or you have to make an array
# of one stream.
#
# Eventually we can use this to cache Stream and UserProfile objects
# in memory.
#
# This should be treated as an immutable class.
def __init__(self, msg_type: str,
user_profiles: Optional[Sequence[UserProfile]]=None,
stream_name: Optional[Text]=None,
topic: Text=None) -> None:
assert(msg_type in ['stream', 'private'])
self._msg_type = msg_type
self._user_profiles = user_profiles
self._stream_name = stream_name
self._topic = topic
def msg_type(self) -> str:
return self._msg_type
def is_stream(self) -> bool:
return self._msg_type == 'stream'
def is_private(self) -> bool:
return self._msg_type == 'private'
def user_profiles(self) -> List[UserProfile]:
assert(self.is_private())
return self._user_profiles # type: ignore # assertion protects us
def stream_name(self) -> Text:
assert(self.is_stream())
return self._stream_name
def topic(self) -> Text:
assert(self.is_stream())
return self._topic
@staticmethod
def legacy_build(sender: UserProfile,
message_type_name: Text,
message_to: Sequence[Text],
topic_name: Text,
realm: Optional[Realm]=None) -> 'Addressee':
# For legacy reason message_to used to be either a list of
# emails or a list of streams. We haven't fixed all of our
# callers yet.
if realm is None:
realm = sender.realm
if message_type_name == 'stream':
if len(message_to) > 1:
raise JsonableError(_("Cannot send to multiple streams"))
if message_to:
stream_name = message_to[0]
else:
# This is a hack to deal with the fact that we still support
# default streams (and the None will be converted later in the
# callpath).
if sender.default_sending_stream:
# Use the users default stream
stream_name = sender.default_sending_stream.name
else:
raise JsonableError(_('Missing stream'))
return Addressee.for_stream(stream_name, topic_name)
elif message_type_name == 'private':
emails = message_to
return Addressee.for_private(emails, realm)
else:
raise JsonableError(_("Invalid message type"))
@staticmethod
def for_stream(stream_name: Text, topic: Text) -> 'Addressee':
return Addressee(
msg_type='stream',
stream_name=stream_name,
topic=topic,
)
@staticmethod
def for_private(emails: Sequence[Text], realm: Realm) -> 'Addressee':
user_profiles = get_user_profiles(emails, realm)
return Addressee(
msg_type='private',
user_profiles=user_profiles,
)
@staticmethod
def for_user_profile(user_profile: UserProfile) -> 'Addressee':
user_profiles = [user_profile]
return Addressee(
msg_type='private',
user_profiles=user_profiles,
)
|
{
"content_hash": "9cd4943839b9d3b6292c17d03985395a",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 101,
"avg_line_length": 35.61068702290076,
"alnum_prop": 0.602357984994641,
"repo_name": "mahim97/zulip",
"id": "58761baeee011075e1c0e608b24a7a80b7f8104d",
"size": "4666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/lib/addressee.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
}
|
import pytest
from pybind11_tests import ExamplePythonTypes, ConstructorStats, has_optional, has_exp_optional
def test_repr():
# In Python 3.3+, repr() accesses __qualname__
assert "ExamplePythonTypes__Meta" in repr(type(ExamplePythonTypes))
assert "ExamplePythonTypes" in repr(ExamplePythonTypes)
def test_static():
ExamplePythonTypes.value = 15
assert ExamplePythonTypes.value == 15
assert ExamplePythonTypes.value2 == 5
with pytest.raises(AttributeError) as excinfo:
ExamplePythonTypes.value2 = 15
assert str(excinfo.value) == "can't set attribute"
def test_instance(capture):
with pytest.raises(TypeError) as excinfo:
ExamplePythonTypes()
assert str(excinfo.value) == "pybind11_tests.ExamplePythonTypes: No constructor defined!"
instance = ExamplePythonTypes.new_instance()
with capture:
dict_result = instance.get_dict()
dict_result['key2'] = 'value2'
instance.print_dict(dict_result)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
with capture:
dict_result = instance.get_dict_2()
dict_result['key2'] = 'value2'
instance.print_dict_2(dict_result)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
with capture:
set_result = instance.get_set()
set_result.add('key4')
instance.print_set(set_result)
assert capture.unordered == """
key: key1
key: key2
key: key3
key: key4
"""
with capture:
set_result = instance.get_set2()
set_result.add('key3')
instance.print_set_2(set_result)
assert capture.unordered == """
key: key1
key: key2
key: key3
"""
with capture:
list_result = instance.get_list()
list_result.append('value2')
instance.print_list(list_result)
assert capture.unordered == """
Entry at position 0: value
list item 0: overwritten
list item 1: value2
"""
with capture:
list_result = instance.get_list_2()
list_result.append('value2')
instance.print_list_2(list_result)
assert capture.unordered == """
list item 0: value
list item 1: value2
"""
with capture:
list_result = instance.get_list_2()
list_result.append('value2')
instance.print_list_2(tuple(list_result))
assert capture.unordered == """
list item 0: value
list item 1: value2
"""
array_result = instance.get_array()
assert array_result == ['array entry 1', 'array entry 2']
with capture:
instance.print_array(array_result)
assert capture.unordered == """
array item 0: array entry 1
array item 1: array entry 2
"""
varray_result = instance.get_valarray()
assert varray_result == [1, 4, 9]
with capture:
instance.print_valarray(varray_result)
assert capture.unordered == """
valarray item 0: 1
valarray item 1: 4
valarray item 2: 9
"""
with pytest.raises(RuntimeError) as excinfo:
instance.throw_exception()
assert str(excinfo.value) == "This exception was intentionally thrown."
assert instance.pair_passthrough((True, "test")) == ("test", True)
assert instance.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert instance.pair_passthrough([True, "test"]) == ("test", True)
assert instance.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert instance.get_bytes_from_string().decode() == "foo"
assert instance.get_bytes_from_str().decode() == "bar"
assert instance.get_str_from_string().encode().decode() == "baz"
assert instance.get_str_from_bytes().encode().decode() == "boo"
class A(object):
def __str__(self):
return "this is a str"
def __repr__(self):
return "this is a repr"
with capture:
instance.test_print(A())
assert capture == """
this is a str
this is a repr
"""
cstats = ConstructorStats.get(ExamplePythonTypes)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# PyPy does not seem to propagate the tp_docs field at the moment
def test_class_docs(doc):
assert doc(ExamplePythonTypes) == "Example 2 documentation"
def test_method_docs(doc):
assert doc(ExamplePythonTypes.get_dict) == """
get_dict(self: m.ExamplePythonTypes) -> dict
Return a Python dictionary
"""
assert doc(ExamplePythonTypes.get_dict_2) == """
get_dict_2(self: m.ExamplePythonTypes) -> Dict[str, str]
Return a C++ dictionary
"""
assert doc(ExamplePythonTypes.get_list) == """
get_list(self: m.ExamplePythonTypes) -> list
Return a Python list
"""
assert doc(ExamplePythonTypes.get_list_2) == """
get_list_2(self: m.ExamplePythonTypes) -> List[str]
Return a C++ list
"""
assert doc(ExamplePythonTypes.get_dict) == """
get_dict(self: m.ExamplePythonTypes) -> dict
Return a Python dictionary
"""
assert doc(ExamplePythonTypes.get_set) == """
get_set(self: m.ExamplePythonTypes) -> set
Return a Python set
"""
assert doc(ExamplePythonTypes.get_set2) == """
get_set2(self: m.ExamplePythonTypes) -> Set[str]
Return a C++ set
"""
assert doc(ExamplePythonTypes.get_array) == """
get_array(self: m.ExamplePythonTypes) -> List[str[2]]
Return a C++ array
"""
assert doc(ExamplePythonTypes.get_valarray) == """
get_valarray(self: m.ExamplePythonTypes) -> List[int]
Return a C++ valarray
"""
assert doc(ExamplePythonTypes.print_dict) == """
print_dict(self: m.ExamplePythonTypes, arg0: dict) -> None
Print entries of a Python dictionary
"""
assert doc(ExamplePythonTypes.print_dict_2) == """
print_dict_2(self: m.ExamplePythonTypes, arg0: Dict[str, str]) -> None
Print entries of a C++ dictionary
"""
assert doc(ExamplePythonTypes.print_set) == """
print_set(self: m.ExamplePythonTypes, arg0: set) -> None
Print entries of a Python set
"""
assert doc(ExamplePythonTypes.print_set_2) == """
print_set_2(self: m.ExamplePythonTypes, arg0: Set[str]) -> None
Print entries of a C++ set
"""
assert doc(ExamplePythonTypes.print_list) == """
print_list(self: m.ExamplePythonTypes, arg0: list) -> None
Print entries of a Python list
"""
assert doc(ExamplePythonTypes.print_list_2) == """
print_list_2(self: m.ExamplePythonTypes, arg0: List[str]) -> None
Print entries of a C++ list
"""
assert doc(ExamplePythonTypes.print_array) == """
print_array(self: m.ExamplePythonTypes, arg0: List[str[2]]) -> None
Print entries of a C++ array
"""
assert doc(ExamplePythonTypes.pair_passthrough) == """
pair_passthrough(self: m.ExamplePythonTypes, arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
assert doc(ExamplePythonTypes.tuple_passthrough) == """
tuple_passthrough(self: m.ExamplePythonTypes, arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
""" # noqa: E501 line too long
assert doc(ExamplePythonTypes.throw_exception) == """
throw_exception(self: m.ExamplePythonTypes) -> None
Throw an exception
"""
assert doc(ExamplePythonTypes.new_instance) == """
new_instance() -> m.ExamplePythonTypes
Return an instance
"""
def test_module():
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert ExamplePythonTypes.__name__ == "ExamplePythonTypes"
assert ExamplePythonTypes.__module__ == "pybind11_tests"
assert ExamplePythonTypes.get_set.__name__ == "get_set"
assert ExamplePythonTypes.get_set.__module__ == "pybind11_tests"
def test_print(capture):
from pybind11_tests import test_print_function
with capture:
test_print_function()
assert capture == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
no new line here -- next print
flush
py::print + str.format = this
"""
assert capture.stderr == "this goes to stderr"
def test_str_api():
from pybind11_tests import test_str_format
s1, s2 = test_str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
def test_dict_api():
from pybind11_tests import test_dict_keyword_constructor
assert test_dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
def test_accessors():
from pybind11_tests import test_accessor_api, test_tuple_accessor, test_accessor_assignment
class SubTestObject:
attr_obj = 1
attr_char = 2
class TestObject:
basic_attr = 1
begin_end = [1, 2, 3]
d = {"operator[object]": 1, "operator[char *]": 2}
sub = SubTestObject()
def func(self, x, *args):
return self.basic_attr + x + sum(args)
d = test_accessor_api(TestObject())
assert d["basic_attr"] == 1
assert d["begin_end"] == [1, 2, 3]
assert d["operator[object]"] == 1
assert d["operator[char *]"] == 2
assert d["attr(object)"] == 1
assert d["attr(char *)"] == 2
assert d["missing_attr_ptr"] == "raised"
assert d["missing_attr_chain"] == "raised"
assert d["is_none"] is False
assert d["operator()"] == 2
assert d["operator*"] == 7
assert test_tuple_accessor(tuple()) == (0, 1, 2)
d = test_accessor_assignment()
assert d["get"] == 0
assert d["deferred_get"] == 0
assert d["set"] == 1
assert d["deferred_set"] == 1
assert d["var"] == 99
@pytest.mark.skipif(not has_optional, reason='no <optional>')
def test_optional():
from pybind11_tests import double_or_zero, half_or_none, test_nullopt
assert double_or_zero(None) == 0
assert double_or_zero(42) == 84
pytest.raises(TypeError, double_or_zero, 'foo')
assert half_or_none(0) is None
assert half_or_none(42) == 21
pytest.raises(TypeError, half_or_none, 'foo')
assert test_nullopt() == 42
assert test_nullopt(None) == 42
assert test_nullopt(42) == 42
assert test_nullopt(43) == 43
@pytest.mark.skipif(not has_exp_optional, reason='no <experimental/optional>')
def test_exp_optional():
from pybind11_tests import double_or_zero_exp, half_or_none_exp, test_nullopt_exp
assert double_or_zero_exp(None) == 0
assert double_or_zero_exp(42) == 84
pytest.raises(TypeError, double_or_zero_exp, 'foo')
assert half_or_none_exp(0) is None
assert half_or_none_exp(42) == 21
pytest.raises(TypeError, half_or_none_exp, 'foo')
assert test_nullopt_exp() == 42
assert test_nullopt_exp(None) == 42
assert test_nullopt_exp(42) == 42
assert test_nullopt_exp(43) == 43
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
from pybind11_tests import (test_default_constructors, test_converting_constructors,
test_cast_functions)
types = [str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
assert test_default_constructors() == expected
data = {
str: 42,
bool: "Not empty",
int: "42",
float: "+1e3",
tuple: range(3),
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
memoryview: b'abc'
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
assert test_converting_constructors(inputs) == expected
assert test_cast_functions(inputs) == expected
def test_move_out_container():
"""Properties use the `reference_internal` policy by default. If the underlying function
returns an rvalue, the policy is automatically changed to `move` to avoid referencing
a temporary. In case the return value is a container of user-defined types, the policy
also needs to be applied to the elements, not just the container."""
from pybind11_tests import MoveOutContainer
c = MoveOutContainer()
moved_out_list = c.move_list
assert [x.value for x in moved_out_list] == [0, 1, 2]
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
from pybind11_tests import get_implicit_casting
z = get_implicit_casting()
assert z['d'] == {
'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
}
assert z['l'] == [3, 6, 9, 12, 15]
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
import pybind11_tests
from pybind11_tests import (good_utf8_string, bad_utf8_string,
good_utf16_string, bad_utf16_string,
good_utf32_string, # bad_utf32_string,
good_wchar_string, # bad_wchar_string,
u8_Z, u8_eacute, u16_ibang, u32_mathbfA, wchar_heart)
assert good_utf8_string() == u"Say utf8‽ 🎂 𝐀"
assert good_utf16_string() == u"b‽🎂𝐀z"
assert good_utf32_string() == u"a𝐀🎂‽z"
assert good_wchar_string() == u"a⸘𝐀z"
with pytest.raises(UnicodeDecodeError):
bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)
if hasattr(pybind11_tests, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
pybind11_tests.bad_utf32_string()
if hasattr(pybind11_tests, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
pybind11_tests.bad_wchar_string()
assert u8_Z() == 'Z'
assert u8_eacute() == u'é'
assert u16_ibang() == u'‽'
assert u32_mathbfA() == u'𝐀'
assert wchar_heart() == u'♥'
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
from pybind11_tests import ord_char, ord_char16, ord_char32, ord_wchar, wchar_size
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
toolong_message = "Expected a character, but multi-character string found"
assert ord_char(u'a') == 0x61 # simple ASCII
assert ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert ord_char(u'ab')
assert str(excinfo.value) == toolong_message
assert ord_char16(u'a') == 0x61
assert ord_char16(u'é') == 0xE9
assert ord_char16(u'Ā') == 0x100
assert ord_char16(u'‽') == 0x203d
assert ord_char16(u'♥') == 0x2665
with pytest.raises(ValueError) as excinfo:
assert ord_char16(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert ord_char16(u'aa')
assert str(excinfo.value) == toolong_message
assert ord_char32(u'a') == 0x61
assert ord_char32(u'é') == 0xE9
assert ord_char32(u'Ā') == 0x100
assert ord_char32(u'‽') == 0x203d
assert ord_char32(u'♥') == 0x2665
assert ord_char32(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert ord_char32(u'aa')
assert str(excinfo.value) == toolong_message
assert ord_wchar(u'a') == 0x61
assert ord_wchar(u'é') == 0xE9
assert ord_wchar(u'Ā') == 0x100
assert ord_wchar(u'‽') == 0x203d
assert ord_wchar(u'♥') == 0x2665
if wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert ord_wchar(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert ord_wchar(u'aa')
assert str(excinfo.value) == toolong_message
|
{
"content_hash": "b4958b09747957272dfa8b636a443c59",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 107,
"avg_line_length": 33.223107569721115,
"alnum_prop": 0.616740616380861,
"repo_name": "apple/coremltools",
"id": "c5ade90b1349f883824e2064a842756dad79ee1e",
"size": "16785",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "deps/pybind11/tests/test_python_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79917"
},
{
"name": "C++",
"bytes": "1420033"
},
{
"name": "CMake",
"bytes": "20418"
},
{
"name": "Makefile",
"bytes": "4258"
},
{
"name": "Mustache",
"bytes": "2676"
},
{
"name": "Objective-C",
"bytes": "4061"
},
{
"name": "Objective-C++",
"bytes": "28933"
},
{
"name": "Python",
"bytes": "5004520"
},
{
"name": "Shell",
"bytes": "19662"
}
],
"symlink_target": ""
}
|
"""Hiitrack models."""
from .funnel import FunnelModel
from .property import PropertyModel, PropertyValueModel
from .visitor import VisitorModel
from .bucket import BucketModel, bucket_check, bucket_create
from .user import UserModel, user_authorize
from .event import EventModel
|
{
"content_hash": "cb07f44a3d5067d2bb2bd778f1ed7f08",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 35.125,
"alnum_prop": 0.8185053380782918,
"repo_name": "hiidef/hiitrack-api",
"id": "e3e6b1f465b975ff965250d5a2d72c3cfc6ac9f3",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiitrack/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172974"
}
],
"symlink_target": ""
}
|
from django.shortcuts import _get_queryset
from django.conf import settings
def get_object_or_None(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), a MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def get_config(key, default=None):
"""
Get settings from django.conf if exists,
return default value otherwise
example:
ADMIN_EMAIL = get_config('ADMIN_EMAIL', 'default@email.com')
"""
return getattr(settings, key, default)
|
{
"content_hash": "10562a2f71de86cf4a0fe725a6fdfe35",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 28.09375,
"alnum_prop": 0.6562847608453838,
"repo_name": "hfercc/mese2014",
"id": "e056092caff454e0ea22e27d71ec1c089c05528c",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "annoying/functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103122"
},
{
"name": "JavaScript",
"bytes": "1054910"
},
{
"name": "Python",
"bytes": "1121791"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
}
|
"""Tests for yapf.line_joiner."""
import textwrap
import unittest
from yapf.yapflib import comment_splicer
from yapf.yapflib import line_joiner
from yapf.yapflib import pytree_unwrapper
from yapf.yapflib import pytree_utils
from yapf.yapflib import style
class LineJoinerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreatePEP8Style())
def _ParseAndUnwrap(self, code):
"""Produces unwrapped lines from the given code.
Parses the code into a tree, performs comment splicing and runs the
unwrapper.
Arguments:
code: code to parse as a string
Returns:
List of unwrapped lines.
"""
tree = pytree_utils.ParseCodeToTree(code)
comment_splicer.SpliceComments(tree)
uwlines = pytree_unwrapper.UnwrapPyTree(tree)
for uwl in uwlines:
uwl.CalculateFormattingInformation()
return uwlines
def _CheckLineJoining(self, code, join_lines):
"""Check that the given UnwrappedLines are joined as expected.
Arguments:
code: The code to check to see if we can join it.
join_lines: True if we expect the lines to be joined.
"""
uwlines = self._ParseAndUnwrap(code)
self.assertEqual(line_joiner.CanMergeMultipleLines(uwlines), join_lines)
def testSimpleSingleLineStatement(self):
code = textwrap.dedent(u"""\
if isinstance(a, int): continue
""")
self._CheckLineJoining(code, join_lines=True)
def testSimpleMultipleLineStatement(self):
code = textwrap.dedent(u"""\
if isinstance(b, int):
continue
""")
self._CheckLineJoining(code, join_lines=False)
def testSimpleMultipleLineComplexStatement(self):
code = textwrap.dedent(u"""\
if isinstance(c, int):
while True:
continue
""")
self._CheckLineJoining(code, join_lines=False)
def testSimpleMultipleLineStatementWithComment(self):
code = textwrap.dedent(u"""\
if isinstance(d, int): continue # We're pleased that d's an int.
""")
self._CheckLineJoining(code, join_lines=True)
def testSimpleMultipleLineStatementWithLargeIndent(self):
code = textwrap.dedent(u"""\
if isinstance(e, int): continue
""")
self._CheckLineJoining(code, join_lines=True)
def testOverColumnLimit(self):
code = textwrap.dedent(u"""\
if instance(bbbbbbbbbbbbbbbbbbbbbbbbb, int): cccccccccccccccccccccccccc = ddddddddddddddddddddd
""")
self._CheckLineJoining(code, join_lines=False)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "39f79a4816fcaffad0fdc272d6ee834e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 103,
"avg_line_length": 29.04494382022472,
"alnum_prop": 0.6847195357833655,
"repo_name": "jamesblunt/yapf",
"id": "7cd4f2ccc8eee6140ca78db98fcf01d2d59d8933",
"size": "3181",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "yapftests/line_joiner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "370143"
},
{
"name": "VimL",
"bytes": "1555"
}
],
"symlink_target": ""
}
|
import json
import errno
import os.path
from flask_restful import abort
from .util import is_valid_mac_address
class MachineNotFound(Exception):
pass
class MissingMachineField(Exception):
pass
class Machine(object):
def __init__(self, **kwargs):
self.mac = kwargs.pop('mac')
self.name = kwargs.pop('name', None)
self.profile = kwargs.pop('profile', None)
self.nics = kwargs.pop('nics', [])
self.default_gw = kwargs.pop('default_gw', None)
self.default_gw_idx = kwargs.pop('default_gw_idx', 0)
self.dns = kwargs.pop('dns', None)
self.ntp = kwargs.pop('ntp', None)
self.sshkeys = kwargs.pop('sshkeys', [])
self.coreos_channel = kwargs.pop('coreos_channel', None)
self.coreos_version = kwargs.pop('coreos_version', None)
self.coreos_etcd_enabled = kwargs.pop('coreos_etcd_enabled', None)
self.coreos_etcd_token = kwargs.pop('coreos_etcd_token', None)
self.coreos_etcd_role = kwargs.pop('coreos_etcd_role', None)
self.state = kwargs.pop('state', None)
self.repo_url = kwargs.pop('repo_url', None)
self.meta = kwargs.pop('meta', {})
def load(self):
try:
with open("machines/{}.json".format(self.mac), 'r') as infile:
data = json.load(infile)
self.name = data.get('name')
self.profile = data.get('profile')
self.nics = data.get('nics', [])
self.default_gw = data.get('default_gw', None)
self.default_gw_idx = data.get('default_gw_idx', None)
self.dns = data.get('dns', None)
self.ntp = data.get('ntp', None)
self.sshkeys = data.get('sshkeys', [])
self.coreos_channel = data.get('coreos_channel', None)
self.coreos_version = data.get('coreos_version', None)
self.coreos_etcd_enabled = data.get('coreos_etcd_enabled', None)
self.coreos_etcd_token = data.get('coreos_etcd_token', None)
self.coreos_etcd_role = data.get('coreos_etcd_role', None)
self.state = data.get('state', None)
self.repo_url = data.get('repo_url', None)
self.meta = data.get('meta', {})
except IOError as e:
if e.errno == errno.ENOENT:
raise MachineNotFound
raise
def validate(self):
for f in ['name', 'profile']:
if getattr(self, f) is None:
raise MissingMachineField("{} must not be null".format(f))
def save(self):
self.validate()
try:
with open("machines/{}.json".format(self.mac), 'w') as outfile:
data = {
'name': self.name,
'profile': self.profile,
'nics': self.nics,
'default_gw': self.default_gw,
'default_gw_idx': self.default_gw_idx,
'dns': self.dns,
'ntp': self.ntp,
'sshkeys': self.sshkeys,
'coreos_channel': self.coreos_channel,
'coreos_version': self.coreos_version,
'coreos_etcd_enabled': self.coreos_etcd_enabled,
'coreos_etcd_token': self.coreos_etcd_token,
'coreos_etcd_role': self.coreos_etcd_role,
'state': self.state,
'repo_url': self.repo_url,
'meta': self.meta
}
# Don't set default gw index if not default gw is set
if self.default_gw is None:
data['default_gw_idx'] = None
json.dump(data, outfile)
except IOError as e:
if e.errno == errno.ENOENT:
raise MachineNotFound
raise
def destroy(self):
os.remove("machines/{}.json".format(self.mac))
def exists(self):
return os.path.isfile("machines/{}.json".format(self.mac))
def get_machine(mac):
m = Machine(mac=mac)
try:
m.load()
except MachineNotFound:
abort(404)
return m
class Machines(object):
@staticmethod
def list():
result = []
for f in os.listdir("machines"):
fname = os.path.splitext(f)[0]
if is_valid_mac_address(fname):
result.append({'mac': fname})
return result
|
{
"content_hash": "e8f93abde6e40013fe29bcf1373d348b",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 36.47154471544715,
"alnum_prop": 0.52652697280428,
"repo_name": "neticdk/bodil",
"id": "59802c79fcb295d604026db3cc0fdf835c4676e9",
"size": "4486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bodil/machine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19179"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
"""
A Kik bot that just logs every event that it gets (new message, message read, etc.),
and echos back whatever chat messages it receives.
"""
import logging
import sys
import kik_unofficial.datatypes.xmpp.chatting as chatting
from kik_unofficial.client import KikClient
from kik_unofficial.callbacks import KikClientCallback
from kik_unofficial.datatypes.xmpp.errors import SignUpError, LoginError
from kik_unofficial.datatypes.xmpp.roster import FetchRosterResponse, PeersInfoResponse
from kik_unofficial.datatypes.xmpp.sign_up import RegisterResponse, UsernameUniquenessResponse
from kik_unofficial.datatypes.xmpp.login import LoginResponse, ConnectionFailedResponse
username = sys.argv[1] if len(sys.argv) > 1 else input("Username: ")
password = sys.argv[2] if len(sys.argv) > 2 else input('Password: ')
def main():
# set up logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logging.Formatter(KikClient.log_format()))
logger.addHandler(stream_handler)
# create the bot
bot = EchoBot()
class EchoBot(KikClientCallback):
def __init__(self):
self.client = KikClient(self, username, password)
def on_authenticated(self):
print("Now I'm Authenticated, let's request roster")
self.client.request_roster()
def on_login_ended(self, response: LoginResponse):
print("Full name: {} {}".format(response.first_name, response.last_name))
def on_chat_message_received(self, chat_message: chatting.IncomingChatMessage):
print("[+] '{}' says: {}".format(chat_message.from_jid, chat_message.body))
print("[+] Replaying.")
self.client.send_chat_message(chat_message.from_jid, "You said \"" + chat_message.body + "\"!")
def on_message_delivered(self, response: chatting.IncomingMessageDeliveredEvent):
print("[+] Chat message with ID {} is delivered.".format(response.message_id))
def on_message_read(self, response: chatting.IncomingMessageReadEvent):
print("[+] Human has read the message with ID {}.".format(response.message_id))
def on_group_message_received(self, chat_message: chatting.IncomingGroupChatMessage):
print("[+] '{}' from group ID {} says: {}".format(chat_message.from_jid, chat_message.group_jid,
chat_message.body))
def on_is_typing_event_received(self, response: chatting.IncomingIsTypingEvent):
print("[+] {} is now {}typing.".format(response.from_jid, "not " if not response.is_typing else ""))
def on_group_is_typing_event_received(self, response: chatting.IncomingGroupIsTypingEvent):
print("[+] {} is now {}typing in group {}".format(response.from_jid, "not " if not response.is_typing else "",
response.group_jid))
def on_roster_received(self, response: FetchRosterResponse):
print("[+] Chat partners:\n" + '\n'.join([str(member) for member in response.peers]))
def on_friend_attribution(self, response: chatting.IncomingFriendAttribution):
print("[+] Friend attribution request from " + response.referrer_jid)
def on_image_received(self, image_message: chatting.IncomingImageMessage):
print("[+] Image message was received from {}".format(image_message.from_jid))
def on_peer_info_received(self, response: PeersInfoResponse):
print("[+] Peer info: " + str(response.users))
def on_group_status_received(self, response: chatting.IncomingGroupStatus):
print("[+] Status message in {}: {}".format(response.group_jid, response.status))
def on_group_receipts_received(self, response: chatting.IncomingGroupReceiptsEvent):
print("[+] Received receipts in group {}: {}".format(response.group_jid, ",".join(response.receipt_ids)))
def on_status_message_received(self, response: chatting.IncomingStatusResponse):
print("[+] Status message from {}: {}".format(response.from_jid, response.status))
def on_username_uniqueness_received(self, response: UsernameUniquenessResponse):
print("Is {} a unique username? {}".format(response.username, response.unique))
def on_sign_up_ended(self, response: RegisterResponse):
print("[+] Registered as " + response.kik_node)
# Error handling
def on_connection_failed(self, response: ConnectionFailedResponse):
print("[-] Connection failed: " + response.message)
def on_login_error(self, login_error: LoginError):
if login_error.is_captcha():
login_error.solve_captcha_wizard(self.client)
def on_register_error(self, response: SignUpError):
print("[-] Register error: {}".format(response.message))
if __name__ == '__main__':
main()
|
{
"content_hash": "1f6fc37833486429cba623e58dc93f1d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 118,
"avg_line_length": 45.2803738317757,
"alnum_prop": 0.686687306501548,
"repo_name": "tomer8007/kik-bot-api-unofficial",
"id": "9099b8dcc93e2ca240eaab20281c493e3f3d70e9",
"size": "4868",
"binary": false,
"copies": "1",
"ref": "refs/heads/new",
"path": "examples/echo_bot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "191098"
},
{
"name": "Shell",
"bytes": "715"
}
],
"symlink_target": ""
}
|
"""Reproduce a Select entity state."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
import logging
from typing import Any
from homeassistant.components.select.const import ATTR_OPTIONS
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, HomeAssistant, State
from . import ATTR_OPTION, DOMAIN, SERVICE_SELECT_OPTION
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_state(
hass: HomeAssistant,
state: State,
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce a single state."""
if (cur_state := hass.states.get(state.entity_id)) is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in cur_state.attributes.get(ATTR_OPTIONS, []):
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: state.entity_id, ATTR_OPTION: state.state},
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistant,
states: Iterable[State],
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce multiple select states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
{
"content_hash": "43c05fd15d0df10245f3644f3cda8087",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 81,
"avg_line_length": 27.734375,
"alnum_prop": 0.643943661971831,
"repo_name": "aronsky/home-assistant",
"id": "d41fd5dae46674abe053dfde96b4e4cada6de42d",
"size": "1775",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/select/reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
__author__ = 'treww'
import unittest
import json
import http
class CountersTests(unittest.TestCase):
def test_get_root_group(self):
server = http.client.HTTPConnection("localhost", 8888)
server.connect()
server.request("GET", "/api/v1/groups")
response = server.getresponse()
self.assertEqual(response.code, http.client.OK)
self.assertTrue(response.getheader('Content-Type'), 'application/json')
content = response.read()
object = json.loads(content.decode("utf-8"))
self.assertTrue(isinstance(object, list))
self.assertEqual(len(object), 1)
self.assertEqual(object[0]['name'], 'root')
self.assertEqual(object[0]['id'], object[0]['parent_id'])
None
def test_add_groups(self):
server = http.client.HTTPConnection("localhost", 8888)
server.connect()
#get root group
server.request("GET", "/api/v1/groups")
response = server.getresponse()
self.assertEqual(response.code, 200)
content = response.read()
root_group = json.loads(content.decode("utf-8"))[0]
#create new group child of root
new_group = { "name" : "new_group1" },
server.request('POST', "/api/v1/groups/{}".format(root_group["id"]), json.dumps(new_group))
response = server.getresponse();
self.assertEqual(response.code, http.client.CREATED)
# server must return location of new group
location = response.getheader("Location")
self.assertNotEqual(len(location), 0)
response.read()
# try to request created group
server.request("GET", location)
response = server.getresponse()
self.assertEqual(response.code, 200)
content = response.read()
new_group = json.loads(content.decode("utf-8"))
self.assertTrue(isinstance(new_group, list))
self.assertEqual(location, '/api/v1/groups/{}'.format(new_group[0]['id']))
self.assertEqual(new_group[0]['name'], 'new_group1')
new_group_id = new_group[0]['id']
# try to find created group betweeen children of root
server.request("GET", '/api/v1/groups/')
response = server.getresponse()
self.assertEqual(response.code, 200)
content = response.read()
groups = json.loads(content.decode("utf-8"))
ids = [group['id'] for group in groups]
self.assertEqual(ids.count(new_group_id), 1)
# delete created group
server.request("DELETE", "/api/v1/groups/{}".format(new_group_id))
response = server.getresponse()
self.assertEqual(response.code, 200)
response.read()
# try to request delete group
server.request("GET", location)
response = server.getresponse()
self.assertEqual(response.code, 404)
None
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "e7c9c2a7fc3dc9d5a17d4287b7bb6ad5",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 99,
"avg_line_length": 37.34615384615385,
"alnum_prop": 0.6165465156196361,
"repo_name": "treww/counters",
"id": "4c0ed892ac3dd36a2481242f9475e288cddf9886",
"size": "2913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9947"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
import re
import time
# 3rd party
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
STATS_URL = "/;csv;norefresh"
EVENT_TYPE = SOURCE_TYPE_NAME = 'haproxy'
class Services(object):
BACKEND = 'BACKEND'
FRONTEND = 'FRONTEND'
ALL = (BACKEND, FRONTEND)
ALL_STATUSES = (
'up', 'open', 'down', 'maint', 'nolb'
)
STATUSES_TO_SERVICE_CHECK = {
'UP': AgentCheck.OK,
'DOWN': AgentCheck.CRITICAL,
'no check': AgentCheck.UNKNOWN,
'MAINT': AgentCheck.OK,
}
class HAProxy(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.host_status = defaultdict(lambda: defaultdict(lambda: None))
METRICS = {
"qcur": ("gauge", "queue.current"),
"scur": ("gauge", "session.current"),
"slim": ("gauge", "session.limit"),
"spct": ("gauge", "session.pct"), # Calculated as: (scur/slim)*100
"stot": ("rate", "session.rate"),
"bin": ("rate", "bytes.in_rate"),
"bout": ("rate", "bytes.out_rate"),
"dreq": ("rate", "denied.req_rate"),
"dresp": ("rate", "denied.resp_rate"),
"ereq": ("rate", "errors.req_rate"),
"econ": ("rate", "errors.con_rate"),
"eresp": ("rate", "errors.resp_rate"),
"wretr": ("rate", "warnings.retr_rate"),
"wredis": ("rate", "warnings.redis_rate"),
"req_rate": ("gauge", "requests.rate"), # HA Proxy 1.4 and higher
"hrsp_1xx": ("rate", "response.1xx"), # HA Proxy 1.4 and higher
"hrsp_2xx": ("rate", "response.2xx"), # HA Proxy 1.4 and higher
"hrsp_3xx": ("rate", "response.3xx"), # HA Proxy 1.4 and higher
"hrsp_4xx": ("rate", "response.4xx"), # HA Proxy 1.4 and higher
"hrsp_5xx": ("rate", "response.5xx"), # HA Proxy 1.4 and higher
"hrsp_other": ("rate", "response.other"), # HA Proxy 1.4 and higher
"qtime": ("gauge", "queue.time"), # HA Proxy 1.5 and higher
"ctime": ("gauge", "connect.time"), # HA Proxy 1.5 and higher
"rtime": ("gauge", "response.time"), # HA Proxy 1.5 and higher
"ttime": ("gauge", "session.time"), # HA Proxy 1.5 and higher
}
SERVICE_CHECK_NAME = 'haproxy.backend_up'
def check(self, instance):
url = instance.get('url')
username = instance.get('username')
password = instance.get('password')
collect_aggregates_only = _is_affirmative(
instance.get('collect_aggregates_only', True)
)
collect_status_metrics = _is_affirmative(
instance.get('collect_status_metrics', False)
)
collect_status_metrics_by_host = _is_affirmative(
instance.get('collect_status_metrics_by_host', False)
)
count_status_by_service = _is_affirmative(
instance.get('count_status_by_service', True)
)
tag_service_check_by_host = _is_affirmative(
instance.get('tag_service_check_by_host', False)
)
services_incl_filter = instance.get('services_include', [])
services_excl_filter = instance.get('services_exclude', [])
verify = not _is_affirmative(instance.get('disable_ssl_validation', False))
self.log.debug('Processing HAProxy data for %s' % url)
data = self._fetch_data(url, username, password, verify)
process_events = instance.get('status_check', self.init_config.get('status_check', False))
self._process_data(
data, collect_aggregates_only, process_events,
url=url, collect_status_metrics=collect_status_metrics,
collect_status_metrics_by_host=collect_status_metrics_by_host,
tag_service_check_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service,
)
def _fetch_data(self, url, username, password, verify):
''' Hit a given URL and return the parsed json '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
self.log.debug("HAProxy Fetching haproxy search data from: %s" % url)
r = requests.get(url, auth=auth, headers=headers(self.agentConfig), verify=verify)
r.raise_for_status()
return r.content.splitlines()
def _process_data(self, data, collect_aggregates_only, process_events, url=None,
collect_status_metrics=False, collect_status_metrics_by_host=False,
tag_service_check_by_host=False, services_incl_filter=None,
services_excl_filter=None, count_status_by_service=True):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''
# Split the first line into an index of fields
# The line looks like:
# "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,"
fields = [f.strip() for f in data[0][2:].split(',') if f]
self.hosts_statuses = defaultdict(int)
back_or_front = None
# Skip the first line, go backwards to set back_or_front
for line in data[:0:-1]:
if not line.strip():
continue
# Store each line's values in a dictionary
data_dict = self._line_to_dict(fields, line)
if self._is_aggregate(data_dict):
back_or_front = data_dict['svname']
self._update_data_dict(data_dict, back_or_front)
self._update_hosts_statuses_if_needed(
collect_status_metrics, collect_status_metrics_by_host,
data_dict, self.hosts_statuses
)
if self._should_process(data_dict, collect_aggregates_only):
# update status
# Send the list of data to the metric and event callbacks
self._process_metrics(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if process_events:
self._process_event(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
self._process_service_check(
data_dict, url,
tag_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if collect_status_metrics:
self._process_status_metric(
self.hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service
)
self._process_backend_hosts_metric(
self.hosts_statuses,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
return data
def _line_to_dict(self, fields, line):
data_dict = {}
for i, val in enumerate(line.split(',')[:]):
if val:
try:
# Try converting to a long, if failure, just leave it
val = float(val)
except Exception:
pass
data_dict[fields[i]] = val
return data_dict
def _update_data_dict(self, data_dict, back_or_front):
"""
Adds spct if relevant, adds service
"""
data_dict['back_or_front'] = back_or_front
# The percentage of used sessions based on 'scur' and 'slim'
if 'slim' in data_dict and 'scur' in data_dict:
try:
data_dict['spct'] = (data_dict['scur'] / data_dict['slim']) * 100
except (TypeError, ZeroDivisionError):
pass
def _is_aggregate(self, data_dict):
return data_dict['svname'] in Services.ALL
def _update_hosts_statuses_if_needed(self, collect_status_metrics,
collect_status_metrics_by_host,
data_dict, hosts_statuses):
if data_dict['svname'] == Services.BACKEND:
return
if collect_status_metrics and 'status' in data_dict and 'pxname' in data_dict:
if collect_status_metrics_by_host and 'svname' in data_dict:
key = (data_dict['pxname'], data_dict['svname'], data_dict['status'])
else:
key = (data_dict['pxname'], data_dict['status'])
hosts_statuses[key] += 1
def _should_process(self, data_dict, collect_aggregates_only):
"""
if collect_aggregates_only, we process only the aggregates
else we process all except Services.BACKEND
"""
if collect_aggregates_only:
if self._is_aggregate(data_dict):
return True
return False
elif data_dict['svname'] == Services.BACKEND:
return False
return True
def _is_service_excl_filtered(self, service_name, services_incl_filter,
services_excl_filter):
if self._tag_match_patterns(service_name, services_excl_filter):
if self._tag_match_patterns(service_name, services_incl_filter):
return False
return True
return False
def _tag_match_patterns(self, tag, filters):
if not filters:
return False
for rule in filters:
if re.search(rule, tag):
return True
return False
def _process_backend_hosts_metric(self, hosts_statuses, services_incl_filter=None,
services_excl_filter=None):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})
for host_status, count in hosts_statuses.iteritems():
try:
service, hostname, status = host_status
except Exception:
service, status = host_status
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
status = status.lower()
if 'up' in status:
agg_statuses[service]['available'] += count
elif 'down' in status or 'maint' in status or 'nolb' in status:
agg_statuses[service]['unavailable'] += count
else:
# create the entries for this service anyway
agg_statuses[service]
for service in agg_statuses:
tags = ['service:%s' % service]
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service]['available'],
tags=tags + ['available:true'])
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service]['unavailable'],
tags=tags + ['available:false'])
return agg_statuses
def _process_status_metric(self, hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=None, services_excl_filter=None,
count_status_by_service=True):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})
# use a counter unless we have a unique tag set to gauge
counter = defaultdict(int)
if count_status_by_service and collect_status_metrics_by_host:
# `service` and `backend` tags will exist
counter = None
for host_status, count in hosts_statuses.iteritems():
try:
service, hostname, status = host_status
except Exception:
service, status = host_status
status = status.lower()
tags = []
if count_status_by_service:
tags.append('service:%s' % service)
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
if collect_status_metrics_by_host:
tags.append('backend:%s' % hostname)
self._gauge_all_statuses(
"haproxy.count_per_status",
count, status, tags, counter
)
if 'up' in status or 'open' in status:
agg_statuses[service]['available'] += count
if 'down' in status or 'maint' in status or 'nolb' in status:
agg_statuses[service]['unavailable'] += count
if counter is not None:
# send aggregated counts as gauges
for key, count in counter.iteritems():
metric_name, tags = key[0], key[1]
self.gauge(metric_name, count, tags=tags)
for service in agg_statuses:
for status, count in agg_statuses[service].iteritems():
tags = ['status:%s' % status]
if count_status_by_service:
tags.append('service:%s' % service)
self.gauge("haproxy.count_per_status", count, tags=tags)
def _gauge_all_statuses(self, metric_name, count, status, tags, counter):
if counter is not None:
counter_key = tuple([metric_name, tuple(tags + ['status:%s' % status])])
counter[counter_key] += count
else:
# assume we have enough context, just send a gauge
self.gauge(metric_name, count, tags + ['status:%s' % status])
for state in Services.ALL_STATUSES:
if state != status:
self.gauge(metric_name, 0, tags + ['status:%s' % state.replace(" ", "_")])
def _process_metrics(self, data, url, services_incl_filter=None,
services_excl_filter=None):
"""
Data is a dictionary related to one host
(one line) extracted from the csv.
It should look like:
{'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...}
"""
hostname = data['svname']
service_name = data['pxname']
back_or_front = data['back_or_front']
tags = ["type:%s" % back_or_front, "instance_url:%s" % url]
tags.append("service:%s" % service_name)
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
for key, value in data.items():
if HAProxy.METRICS.get(key):
suffix = HAProxy.METRICS[key][1]
name = "haproxy.%s.%s" % (back_or_front.lower(), suffix)
if HAProxy.METRICS[key][0] == 'rate':
self.rate(name, value, tags=tags)
else:
self.gauge(name, value, tags=tags)
def _process_event(self, data, url, services_incl_filter=None,
services_excl_filter=None):
'''
Main event processing loop. An event will be created for a service
status change.
Service checks on the server side can be used to provide the same functionality
'''
hostname = data['svname']
service_name = data['pxname']
key = "%s:%s" % (hostname, service_name)
status = self.host_status[url][key]
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status is None:
self.host_status[url][key] = data['status']
return
if status != data['status'] and data['status'] in ('UP', 'DOWN'):
# If the status of a host has changed, we trigger an event
try:
lastchg = int(data['lastchg'])
except Exception:
lastchg = 0
# Create the event object
ev = self._create_event(
data['status'], hostname, lastchg, service_name,
data['back_or_front']
)
self.event(ev)
# Store this host status so we can check against it later
self.host_status[url][key] = data['status']
def _create_event(self, status, hostname, lastchg, service_name, back_or_front):
HAProxy_agent = self.hostname.decode('utf-8')
if status == "DOWN":
alert_type = "error"
title = "%s reported %s:%s %s" % (HAProxy_agent, service_name, hostname, status)
else:
if status == "UP":
alert_type = "success"
else:
alert_type = "info"
title = "%s reported %s:%s back and %s" % (HAProxy_agent, service_name, hostname, status)
tags = ["service:%s" % service_name]
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
return {
'timestamp': int(time.time() - lastchg),
'event_type': EVENT_TYPE,
'host': HAProxy_agent,
'msg_title': title,
'alert_type': alert_type,
"source_type_name": SOURCE_TYPE_NAME,
"event_object": hostname,
"tags": tags
}
def _process_service_check(self, data, url, tag_by_host=False,
services_incl_filter=None, services_excl_filter=None):
''' Report a service check, tagged by the service and the backend.
Statuses are defined in `STATUSES_TO_SERVICE_CHECK` mapping.
'''
service_name = data['pxname']
status = data['status']
haproxy_hostname = self.hostname.decode('utf-8')
check_hostname = haproxy_hostname if tag_by_host else ''
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status in Services.STATUSES_TO_SERVICE_CHECK:
service_check_tags = ["service:%s" % service_name]
hostname = data['svname']
if data['back_or_front'] == Services.BACKEND:
service_check_tags.append('backend:%s' % hostname)
status = Services.STATUSES_TO_SERVICE_CHECK[status]
message = "%s reported %s:%s %s" % (haproxy_hostname, service_name,
hostname, status)
self.service_check(self.SERVICE_CHECK_NAME, status, message=message,
hostname=check_hostname, tags=service_check_tags)
|
{
"content_hash": "f475390aaae567eb1d4f48ac0dd47081",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 232,
"avg_line_length": 40.25833333333333,
"alnum_prop": 0.5543883253984683,
"repo_name": "pmav99/praktoras",
"id": "3ef3a4e1ed07aabd6cbe7e543bf204450b2fb1a3",
"size": "19476",
"binary": false,
"copies": "1",
"ref": "refs/heads/conmon-13",
"path": "checks.d/haproxy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "9060"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "2179610"
},
{
"name": "Ruby",
"bytes": "103726"
},
{
"name": "Shell",
"bytes": "58242"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
"""Draw Random Circles. Start and stop with mouse.
"""
from graphics import *
import random, time
def main():
win = GraphWin("Random Circles", 300, 300)
text = Text(Point(win.getWidth()/2, 30), "Click to start; click to end")
text.draw(win)
win.getMouse()
text.undraw()
while win.checkMouse() == None: #NEW
r = random.randrange(256)
b = random.randrange(256)
g = random.randrange(256)
color = color_rgb(r, g, b)
radius = random.randrange(3, 40)
x = random.randrange(5, 295)
y = random.randrange(5, 295)
circle = Circle(Point(x,y), radius)
circle.setFill(color)
circle.draw(win)
time.sleep(.05)
win.close()
main()
|
{
"content_hash": "9937ad83a4860993118f736aaa41791a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 76,
"avg_line_length": 24.741935483870968,
"alnum_prop": 0.5619295958279009,
"repo_name": "hwheeler01/comp150",
"id": "eff9090af1389f467f0dd14a20fafa8f4f264997",
"size": "767",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "_site/examples/randomCirclesWhile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11466"
},
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "121532"
},
{
"name": "HTML",
"bytes": "5858311"
},
{
"name": "JavaScript",
"bytes": "524"
},
{
"name": "Jupyter Notebook",
"bytes": "6422478"
},
{
"name": "Python",
"bytes": "365319"
}
],
"symlink_target": ""
}
|
import pandas as pd
import sys
reload(sys)
sys.setdefaultencoding("utf_8")
import lev
from nltk import word_tokenize
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
import utils
import datetime
from collections import Counter
# Load train
train = pd.read_csv('train.csv', encoding='utf-8')
train_qs = pd.Series(train['question1'].tolist() + train['question2'].tolist()).astype(str)
words = (" ".join(train_qs)).lower().split()
count_words = len(words)
print "all words %d" % (count_words)
counts = Counter(words)
weights = {}
for word, count in counts.items():
weights[word] = float(count) / count_words
# define new train data
data_train = []
t_init = datetime.datetime.now()
batch = 10000
size_train = len(train)
t_mean = t_init - t_init
for item in train.iterrows():
tmp_dict = {}
if True:
tmp = item[1].to_dict()
question1 = str(tmp['question1']).lower()
question2 = str(tmp['question2']).lower()
target = tmp['is_duplicate']
tmp_dict = utils.vectorizer(question1, question2, tmp_dict,weights)
tmp_dict['target'] = target
data_train.append(tmp_dict)
if len(data_train) % batch == 0:
print "iteration data %d" % (len(data_train),)
t_mean += (datetime.datetime.now() - t_init)
tmp = {'time': str(datetime.datetime.now() - t_init), 'batch': batch, 'left': size_train - len(data_train)}
iteration = len(data_train) // batch
t_mean_tmp = t_mean / iteration
tmp['tpred'] = (tmp['left'] // batch) * t_mean_tmp
tmp['tpred'] = str(tmp['tpred'])
print "second per {batch} record(s) {time} prediction for {left} left records : {tpred}".format(**tmp)
t_init = datetime.datetime.now()
d = pd.DataFrame(data_train)
# save for future prediction
d.to_csv('train_distance.csv', index=False)
import update
|
{
"content_hash": "d1c0d68d971bc2d36d79859a8c3f43e7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 115,
"avg_line_length": 29.4375,
"alnum_prop": 0.6454352441613588,
"repo_name": "hulkwork/kaggle-quora",
"id": "5b48c3a170c22c2e4f1d8a705bbc5d30c3c78b76",
"size": "1884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21105"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pub_445_long_request_uri():
mnistTrain = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
mnistTest = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
mnistTrain.set_name(col=784, name="label")
mnistTest.set_name(col=784, name="label")
mnistModel = H2OGradientBoostingEstimator(ntrees=2, max_depth=2)
mnistModel.train(x=list(range(784)),y="label",training_frame=mnistTrain,validation_frame=mnistTest)
if __name__ == "__main__":
pyunit_utils.standalone_test(pub_445_long_request_uri)
else:
pub_445_long_request_uri()
|
{
"content_hash": "d27a710b3acbd6c5252028c1b6eaec93",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 103,
"avg_line_length": 34.5,
"alnum_prop": 0.7312252964426877,
"repo_name": "YzPaul3/h2o-3",
"id": "23bbd87e8fbd2b62e110546aeb1b3e03bc2e0312",
"size": "759",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_jira/pyunit_pub_445_big_request_uri.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5474525"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "31873"
},
{
"name": "Python",
"bytes": "2048415"
},
{
"name": "R",
"bytes": "1851561"
},
{
"name": "Rebol",
"bytes": "6863"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "45447"
},
{
"name": "TeX",
"bytes": "499364"
}
],
"symlink_target": ""
}
|
"""Rest alarm notifier with trusted authentication."""
from keystoneclient.v3 import client as keystone_client
from oslo_config import cfg
from six.moves.urllib import parse
from aodh.alarm.notifier import rest
cfg.CONF.import_opt('http_timeout', 'aodh.service')
cfg.CONF.import_group('service_credentials', 'aodh.service')
class TrustRestAlarmNotifier(rest.RestAlarmNotifier):
"""Notifier supporting keystone trust authentication.
This alarm notifier is intended to be used to call an endpoint using
keystone authentication. It uses the aodh service user to
authenticate using the trust ID provided.
The URL must be in the form trust+http://trust-id@host/action.
"""
@staticmethod
def notify(action, alarm_id, alarm_name, severity, previous, current,
reason, reason_data):
trust_id = action.username
auth_url = cfg.CONF.service_credentials.os_auth_url.replace(
"v2.0", "v3")
client = keystone_client.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure,
timeout=cfg.CONF.http_timeout,
trust_id=trust_id)
# Remove the fake user
netloc = action.netloc.split("@")[1]
# Remove the trust prefix
scheme = action.scheme[6:]
action = parse.SplitResult(scheme, netloc, action.path, action.query,
action.fragment)
headers = {'X-Auth-Token': client.auth_token}
rest.RestAlarmNotifier.notify(
action, alarm_id, alarm_name, severity, previous, current, reason,
reason_data, headers)
|
{
"content_hash": "b3acd828d40060b25ee14919c0cac363",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 36.63461538461539,
"alnum_prop": 0.6619422572178477,
"repo_name": "chungg/aodh",
"id": "303fa74fd49fe92dd27338ec0756ca1ef9a12f57",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aodh/alarm/notifier/trust.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "697346"
},
{
"name": "Shell",
"bytes": "5196"
}
],
"symlink_target": ""
}
|
import numpy as np
import re
from neon.util.persist import load_obj
class ModelDescription(dict):
"""
Container class for the model serialization dictionary. Provides
helper methods for searching and manipulating the dicitonary.
Arguments:
pdict (dict or str): the configuration dictionary generated
by Model.serialize() or the name of a
pickle file containing that dictionary
"""
def __init__(self, pdict):
if type(pdict) is str:
pdict = load_obj(pdict)
super(ModelDescription, self).__init__(pdict)
@property
def version(self):
'''
Print neon version
Returns:
str: version string
'''
return self['neon_version']
def layers(self, field='name', regex=None):
'''
Print out the layer names in the model with some
options for filtering the results
Arguments:
field (str, optional): the configuration field to file against
(e.g. layer 'name')
regex (str, optional): regular expression to apply to field
to file the results (e.g. "conv")
Example:
layers(field='name', regex='conv') will return all layers
with the name containing "conv"
'''
if regex is not None:
regex = re.compile(regex)
return self.find_layers(self['model']['config'], field, regex=regex)
@staticmethod
def find_layers(layers, field, regex=None):
matches = []
for l in layers['layers']:
if field in l['config']:
value = l['config'][field]
if regex is None or regex.match(value):
matches.append(value)
if type(l) is dict and 'layers' in l['config']:
matches.extend(ModelDescription.find_layers(l['config'], field, regex=regex))
return matches
def getlayer(self, layer_name):
"""
Find a layer by its name.
Arguments:
name (str): name of the layer
Returns:
dict: Layer config dictionary
"""
return self.find_by_name(self['model']['config'], layer_name)
@staticmethod
def find_by_name(layers, layer_name):
for l in layers['layers']:
if 'name' in l['config'] and l['config']['name'] == layer_name:
return l
if type(l) is dict and 'config' in l and 'layers' in l['config']:
val = ModelDescription.find_by_name(l['config'], layer_name)
if val is not None:
return val
@staticmethod
def match(o1, o2):
"""
Compare two ModelDescription object instances
Arguments:
o1 (ModelDescription, dict): object to compare
o2 (ModelDescription, dict): object to compare
Returns:
bool: true if objects match
"""
type_o1 = type(o1)
if type_o1 is not type(o2):
return False
if type_o1 is dict:
if set(o1.keys()) != set(o2.keys()):
print 'Missing keys'
return False
for key in o1:
if key == 'name':
# ignore layer names
return True
if not ModelDescription.match(o1[key], o2[key]):
return False
elif any([type_o1 is x for x in [list, tuple]]):
if len(o1) != len(o2):
return False
for val1, val2 in zip(o1, o2):
if not ModelDescription.match(val1, val2):
return False
elif type_o1 is np.ndarray:
match = np.array_equal(o1, o2)
return match
else:
return o1 == o2
return True
def __eq__(self, other):
# check the model params for a match
if 'model' in self and 'model' in other:
return self.match(self['model'], other['model'])
else:
return False
|
{
"content_hash": "97882ee850374ed88cf22d63bd906949",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 93,
"avg_line_length": 31.717557251908396,
"alnum_prop": 0.524187725631769,
"repo_name": "DougFirErickson/neon",
"id": "ee3455afb669e7845223679d370c008fee87a7ad",
"size": "4896",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neon/util/modeldesc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "67530"
},
{
"name": "CSS",
"bytes": "696700"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "10977"
},
{
"name": "Python",
"bytes": "1436442"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v11.services.types import (
keyword_theme_constant_service,
)
from .base import KeywordThemeConstantServiceTransport, DEFAULT_CLIENT_INFO
class KeywordThemeConstantServiceGrpcTransport(
KeywordThemeConstantServiceTransport
):
"""gRPC backend transport for KeywordThemeConstantService.
Service to fetch Smart Campaign keyword themes.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def suggest_keyword_theme_constants(
self,
) -> Callable[
[keyword_theme_constant_service.SuggestKeywordThemeConstantsRequest],
keyword_theme_constant_service.SuggestKeywordThemeConstantsResponse,
]:
r"""Return a callable for the suggest keyword theme
constants method over gRPC.
Returns KeywordThemeConstant suggestions by keyword themes.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.SuggestKeywordThemeConstantsRequest],
~.SuggestKeywordThemeConstantsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "suggest_keyword_theme_constants" not in self._stubs:
self._stubs[
"suggest_keyword_theme_constants"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v11.services.KeywordThemeConstantService/SuggestKeywordThemeConstants",
request_serializer=keyword_theme_constant_service.SuggestKeywordThemeConstantsRequest.serialize,
response_deserializer=keyword_theme_constant_service.SuggestKeywordThemeConstantsResponse.deserialize,
)
return self._stubs["suggest_keyword_theme_constants"]
def close(self):
self.grpc_channel.close()
__all__ = ("KeywordThemeConstantServiceGrpcTransport",)
|
{
"content_hash": "f4d1ff6ae4a0de7f46a247db96e31335",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 118,
"avg_line_length": 44.13636363636363,
"alnum_prop": 0.6099382080329557,
"repo_name": "googleads/google-ads-python",
"id": "dc30f471edaf723b3e2c60ed3edbf74d45e40f68",
"size": "12252",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/services/keyword_theme_constant_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
"""WSGI middleware initialization for the sample-ecommerce application."""
from sample_ecommerce.config.app_cfg import base_config
from sample_ecommerce.config.environment import load_environment
__all__ = ['make_app']
# Use base_config to setup the necessary PasteDeploy application factory.
# make_base_app will wrap the TG2 app with all the middleware it needs.
make_base_app = base_config.setup_tg_wsgi_app(load_environment)
def make_app(global_conf, full_stack=True, **app_conf):
"""
Set sample-ecommerce up with the settings found in the PasteDeploy configuration
file used.
:param global_conf: The global settings for sample-ecommerce (those
defined under the ``[DEFAULT]`` section).
:type global_conf: dict
:param full_stack: Should the whole TG2 stack be set up?
:type full_stack: str or bool
:return: The sample-ecommerce application with all the relevant middleware
loaded.
This is the PasteDeploy factory for the sample-ecommerce application.
``app_conf`` contains all the application-specific settings (those defined
under ``[app:main]``.
"""
app = make_base_app(global_conf, full_stack=True, **app_conf)
# Wrap your base TurboGears 2 application with custom middleware here
return app
|
{
"content_hash": "2e0185e76b67520071f54811ea76ef53",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 84,
"avg_line_length": 34.6578947368421,
"alnum_prop": 0.7099468488990129,
"repo_name": "gasbasd/tgapp-stroller2",
"id": "bf7a38fb60b0e588235051d758cb7109a4d7efc6",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample-ecommerce/sample_ecommerce/config/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "74545"
}
],
"symlink_target": ""
}
|
import getopt
import sys
from genologics.entities import Process
from genologics.lims import Lims
HOSTNAME = ""
VERSION = ""
BASE_URI = ""
api = None
args = None
def get_workflow_stage(lims, workflow_name, stage_name=None):
workflows = [w for w in lims.get_workflows() if w.name == workflow_name]
if len(workflows) != 1:
return
if not stage_name:
return workflows[0].stages[0]
stages = [s for s in workflows[0].stages if s.name == stage_name]
if len(stages) != 1:
return
return stages[0]
def get_parent_process_id(art):
return art.parent_process.id
def assignWorkflow():
LIMSID = args["limsid"]
usernameargs = args["username"]
passwordargs = args["password"]
stepURI = args["stepURI"]
apiLocation = stepURI.find('/api')
BASE_URI = stepURI[0:apiLocation]
l = Lims(baseuri=BASE_URI, username=usernameargs, password=passwordargs)
p = Process(l, id=LIMSID)
artifacts = p.all_inputs()
for art in artifacts:
sample = art.samples[0]
submitted_art = sample.artifact
if art.samples[0].udf.get("Proceed To SeqLab") and not art.samples[0].udf.get("2D Barcode"): #checks to see if sample is in plate or fluidX tube
stage = get_workflow_stage(l, "PreSeqLab EG 6.0", "Sequencing Plate Preparation EG 2.0")
l.route_artifacts([submitted_art], stage_uri=stage.uri)
elif art.samples[0].udf.get("Proceed To SeqLab") and art.samples[0].udf.get("2D Barcode"): #if is a fluidX tube will need to find the derived artifact created by the FluidX Transfer step
fluidX_artifacts = l.get_artifacts(process_type="FluidX Transfer From Rack Into Plate EG 1.0 ST", sample_name=art.samples[0].name, type='Analyte')
if len(fluidX_artifacts) >1: #its possible that the FluidX Transfer has occurred more than once so must find the most recent occurrence of that step
fluidX_artifacts.sort(key=get_parent_process_id, reverse=True) #sorts the artifacts returned to place the most recent artifact at position 0 in list
fluidX_artifact=fluidX_artifacts[0]
else:
fluidX_artifact=fluidX_artifacts[0]
stage = get_workflow_stage(l, "PreSeqLab EG 6.0", "Sequencing Plate Preparation EG 2.0")
l.route_artifacts([fluidX_artifact], stage_uri=stage.uri)
def main():
global api
global args
args = {}
opts, extraparams = getopt.getopt(sys.argv[1:], "l:s:u:p:")
for o, p in opts:
if o == '-l':
args["limsid"] = p
elif o == '-s':
args["stepURI"] = p
elif o == '-u':
args["username"] = p
elif o == '-p':
args["password"] = p
## at this point, we have the parameters the EPP plugin passed, and we have network plumbing
## so let's get this show on the road!
assignWorkflow()
if __name__ == "__main__":
main()
|
{
"content_hash": "27b034e92d4c0c92cefe83d60bd2aafd",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 194,
"avg_line_length": 34.26744186046512,
"alnum_prop": 0.6331862911435358,
"repo_name": "EdinburghGenomics/clarity_scripts",
"id": "14a6375e341126230fef25256fd1ca1ec2cb0f0e",
"size": "2969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prodscripts/AssignWorkflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2317"
},
{
"name": "Python",
"bytes": "602935"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from bcc import BPF
from time import strftime
import argparse
examples = """examples:
./gethostlatency # time getaddrinfo/gethostbyname[2] calls
./gethostlatency -p 181 # only trace PID 181
"""
parser = argparse.ArgumentParser(
description="Show latency for getaddrinfo/gethostbyname[2] calls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", help="trace this PID only", type=int,
default=-1)
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
# load BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
struct val_t {
u32 pid;
char comm[TASK_COMM_LEN];
char host[80];
u64 ts;
};
struct data_t {
u32 pid;
u64 delta;
char comm[TASK_COMM_LEN];
char host[80];
};
BPF_HASH(start, u32, struct val_t);
BPF_PERF_OUTPUT(events);
int do_entry(struct pt_regs *ctx) {
if (!PT_REGS_PARM1(ctx))
return 0;
struct val_t val = {};
u32 pid = bpf_get_current_pid_tgid();
if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0) {
bpf_probe_read_user(&val.host, sizeof(val.host),
(void *)PT_REGS_PARM1(ctx));
val.pid = bpf_get_current_pid_tgid();
val.ts = bpf_ktime_get_ns();
start.update(&pid, &val);
}
return 0;
}
int do_return(struct pt_regs *ctx) {
struct val_t *valp;
struct data_t data = {};
u64 delta;
u32 pid = bpf_get_current_pid_tgid();
u64 tsp = bpf_ktime_get_ns();
valp = start.lookup(&pid);
if (valp == 0)
return 0; // missed start
bpf_probe_read_kernel(&data.comm, sizeof(data.comm), valp->comm);
bpf_probe_read_kernel(&data.host, sizeof(data.host), (void *)valp->host);
data.pid = valp->pid;
data.delta = tsp - valp->ts;
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&pid);
return 0;
}
"""
if args.ebpf:
print(bpf_text)
exit()
b = BPF(text=bpf_text)
b.attach_uprobe(name="c", sym="getaddrinfo", fn_name="do_entry", pid=args.pid)
b.attach_uprobe(name="c", sym="gethostbyname", fn_name="do_entry",
pid=args.pid)
b.attach_uprobe(name="c", sym="gethostbyname2", fn_name="do_entry",
pid=args.pid)
b.attach_uretprobe(name="c", sym="getaddrinfo", fn_name="do_return",
pid=args.pid)
b.attach_uretprobe(name="c", sym="gethostbyname", fn_name="do_return",
pid=args.pid)
b.attach_uretprobe(name="c", sym="gethostbyname2", fn_name="do_return",
pid=args.pid)
# header
print("%-9s %-6s %-16s %10s %s" % ("TIME", "PID", "COMM", "LATms", "HOST"))
def print_event(cpu, data, size):
event = b["events"].event(data)
print("%-9s %-6d %-16s %10.2f %s" % (strftime("%H:%M:%S"), event.pid,
event.comm.decode('utf-8', 'replace'), (float(event.delta) / 1000000),
event.host.decode('utf-8', 'replace')))
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
{
"content_hash": "d3add2eb8c492ccd7b1eecb708340705",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 78,
"avg_line_length": 28.36283185840708,
"alnum_prop": 0.6087363494539781,
"repo_name": "tuxology/bcc",
"id": "0ba5a1eb2c57016039b7e12523612c662c047c4c",
"size": "3837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/gethostlatency.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "119767"
},
{
"name": "C++",
"bytes": "744166"
},
{
"name": "CMake",
"bytes": "27864"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "230597"
},
{
"name": "Makefile",
"bytes": "1480"
},
{
"name": "Objective-C",
"bytes": "20501"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "325821"
},
{
"name": "Shell",
"bytes": "9047"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import ORACLE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.ORACLE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.ORACLE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and (Backend.isDbmsWithin(ORACLE_ALIASES) or (conf.dbms or "").lower() in ORACLE_ALIASES):
setDbms(DBMS.ORACLE)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT ROWNUM=ROWNUM FROM DUAL does not work connecting
# directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("ROWNUM=ROWNUM")
if result:
infoMsg = "confirming %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
setDbms(DBMS.ORACLE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.ORACLE
logger.info(infoMsg)
for version in ("11i", "10g", "9i", "8i"):
number = int(re.search("([\d]+)", version).group(1))
output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2))
if output:
Backend.setVersion(version)
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
if conf.tbl:
conf.tbl = conf.tbl.upper()
|
{
"content_hash": "9fa82d11e8e0f6f1cd2fe13cc01456d3",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 178,
"avg_line_length": 30.15447154471545,
"alnum_prop": 0.5796710703693718,
"repo_name": "V11/volcano",
"id": "4b56b3122700fe38d3ed89eb9196ef5b2338eb4b",
"size": "3732",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "server/sqlmap/plugins/dbms/oracle/fingerprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "118"
},
{
"name": "JavaScript",
"bytes": "41"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
print("This is deep brother module talking.", __name__)
def someBrotherFunction():
pass
print("The __module__ of function here is", someBrotherFunction.__module__)
|
{
"content_hash": "24598c1c9a86b1a0d2a6d7fda5b6a274",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 21.1,
"alnum_prop": 0.7061611374407583,
"repo_name": "kayhayen/Nuitka",
"id": "3667ab8fff4d6997bc229e92e22aad7dc2341d7e",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/programs/deep/some_package/DeepBrother.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1868"
},
{
"name": "C",
"bytes": "617681"
},
{
"name": "C++",
"bytes": "149777"
},
{
"name": "Python",
"bytes": "6603718"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
"""Errors used by the Google Ads Client Library."""
class GoogleAdsError(Exception):
"""Parent class of all errors raised by this library."""
pass
class GoogleAdsValueError(GoogleAdsError):
"""Error indicating that the user input for a function was invalid."""
pass
class AdWordsReportError(GoogleAdsError):
"""Error indicating that an AdWords report download request failed.
Attributes:
code: The HTTP status code with which the report failed.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The actual HTTP response content. This could be something like a
404 page or an XML error message from the AdWords report service.
"""
def __init__(self, code, error, content, message=None):
"""Initializes an AdWordsReportError.
Args:
code: The HTTP status code number that was returned.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The HTTP response body as a string.
[optional]
message: A user-friendly error message string. If one is not provided, a
default message will be used.
"""
super(AdWordsReportError, self).__init__(
message if message else ('AdWords report download failed with HTTP '
'status code: %s' % code))
self.code = code
self.error = error
self.content = content
class AdWordsReportBadRequestError(AdWordsReportError):
"""Error indicating a bad request was made to the AdWords report service.
Attributes:
type: A string identifying what type of error this is.
trigger: A string containing the value from your request that caused the
problem.
field_path: A string showing where, in the report's fields, the trigger can
be found.
"""
def __init__(self, type_, trigger, field_path, code, error, content):
"""Initializes an AdWordsReportError.
Args:
type_: A string identifying what type of error this is.
trigger: A string containing the value from your request that caused the
problem.
field_path: A string showing where, in the report's fields, the trigger
can be found.
code: The HTTP status code number that was returned.
error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError
(Python 3) describing the failure.
content: The HTTP response body as a string.
"""
super(AdWordsReportBadRequestError, self).__init__(
code, error, content, 'Type: %s\nTrigger: %s\nField Path: %s' %
(type_, trigger, field_path))
self.type = type_
self.trigger = trigger
self.field_path = field_path
class DfpReportError(GoogleAdsError):
"""Error indicating that a DFP report download request failed.
Attributes:
report_job_id: The ID of the report job which failed.
"""
def __init__(self, report_job_id):
"""Initializes a DfpReportError.
Args:
report_job_id: The ID of the report job which failed.
"""
super(DfpReportError, self).__init__(
'DFP report job failed. The ID of the failed report is: %s'
% report_job_id)
self.report_job_id = report_job_id
|
{
"content_hash": "466d82301039949380cfa79143a83e66",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 34.755319148936174,
"alnum_prop": 0.6755433119069483,
"repo_name": "losnikitos/googleads-python-lib",
"id": "618bc64e38bd1007ce124757e5b6fe565512f1f4",
"size": "3865",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "googleads/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Remove ReMo bot from voting notifications."""
remobot = orm['profiles.UserProfile'].objects.get(user__username='remobot')
remobot.receive_email_on_add_voting_comment = False
remobot.save()
def backwards(self, orm):
"""Do nothing when going backwards."""
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 7, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': u"orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
u'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': u"orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles']
symmetrical = True
|
{
"content_hash": "f8efd774762b0119fb352268ca084c2f",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 199,
"avg_line_length": 85.33858267716535,
"alnum_prop": 0.554714892046503,
"repo_name": "chirilo/remo",
"id": "e3ac9548b43fc5c236f5e66aaf967c34c0a009dc",
"size": "10862",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/profiles/migrations/0049_remove_remobot_from_voting_notifications.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606447"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7483058"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
}
|
import pytest
from helpers import for_each_database, for_one_database, get_credentials, open_cursor
from query_fixture import query_fixture
from turbodbc import DatabaseError, Error, InterfaceError, connect
@for_one_database
def test_new_cursor_properties(dsn, configuration):
connection = connect(dsn, **get_credentials(configuration))
cursor = connection.cursor()
# https://www.python.org/dev/peps/pep-0249/#rowcount
assert cursor.rowcount == -1
assert cursor.description is None
assert cursor.arraysize == 1
@for_one_database
def test_closed_cursor_raises_when_used(dsn, configuration):
connection = connect(dsn, **get_credentials(configuration))
cursor = connection.cursor()
cursor.close()
with pytest.raises(InterfaceError):
cursor.execute("SELECT 42")
with pytest.raises(InterfaceError):
cursor.executemany("SELECT 42")
with pytest.raises(InterfaceError):
cursor.executemanycolumns("SELECT 42", [])
with pytest.raises(InterfaceError):
cursor.fetchone()
with pytest.raises(InterfaceError):
cursor.fetchmany()
with pytest.raises(InterfaceError):
cursor.fetchall()
with pytest.raises(InterfaceError):
next(cursor)
@for_one_database
def test_closing_twice_does_not_raise(dsn, configuration):
connection = connect(dsn, **get_credentials(configuration))
cursor = connection.cursor()
cursor.close()
cursor.close()
@for_one_database
def test_open_cursor_without_result_set_raises(dsn, configuration):
connection = connect(dsn, **get_credentials(configuration))
cursor = connection.cursor()
with pytest.raises(InterfaceError):
cursor.fetchone()
@for_one_database
def test_setinputsizes_does_not_raise(dsn, configuration):
"""
It is legal for setinputsizes() to do nothing, so anything except
raising an exception is ok
"""
cursor = connect(dsn, **get_credentials(configuration)).cursor()
cursor.setinputsizes([10, 20])
@for_one_database
def test_setoutputsize_does_not_raise(dsn, configuration):
"""
It is legal for setinputsizes() to do nothing, so anything except
raising an exception is ok
"""
cursor = connect(dsn, **get_credentials(configuration)).cursor()
cursor.setoutputsize(1000, 42) # with column
cursor.setoutputsize(1000, column=42) # with column
cursor.setoutputsize(1000) # without column
@for_one_database
def test_rowcount_is_reset_after_execute_raises(dsn, configuration):
with open_cursor(configuration) as cursor:
with query_fixture(cursor, configuration, "INSERT INTEGER") as table_name:
cursor.execute(f"INSERT INTO {table_name} VALUES (?)", [42])
assert cursor.rowcount == 1
with pytest.raises(Error):
cursor.execute("this is not even a valid SQL statement")
assert cursor.rowcount == -1
@for_one_database
def test_rowcount_is_reset_after_executemany_raises(dsn, configuration):
with open_cursor(configuration) as cursor:
with query_fixture(cursor, configuration, "INSERT INTEGER") as table_name:
cursor.execute(f"INSERT INTO {table_name} VALUES (?)", [42])
assert cursor.rowcount == 1
with pytest.raises(Error):
cursor.executemany("this is not even a valid SQL statement")
assert cursor.rowcount == -1
@for_one_database
def test_rowcount_is_reset_after_executemanycolumns_raises(dsn, configuration):
with open_cursor(configuration) as cursor:
with query_fixture(cursor, configuration, "INSERT INTEGER") as table_name:
cursor.execute(f"INSERT INTO {table_name} VALUES (?)", [42])
assert cursor.rowcount == 1
with pytest.raises(Error):
cursor.executemanycolumns("this is not even a valid SQL statement", [])
assert cursor.rowcount == -1
@for_one_database
def test_connection_does_not_strongly_reference_cursors(dsn, configuration):
connection = connect(dsn, **get_credentials(configuration))
cursor = connection.cursor()
import sys
assert sys.getrefcount(cursor) == 2
@for_one_database
def test_pep343_with_statement(dsn, configuration):
with connect(dsn, **get_credentials(configuration)) as connection:
with connection.cursor() as cursor:
cursor.execute("SELECT 42")
# cursor should be closed
with pytest.raises(InterfaceError):
cursor.execute("SELECT 42")
@for_each_database
def test_insert_duplicate_uniquecol_raises(dsn, configuration):
with open_cursor(configuration) as cursor:
with query_fixture(
cursor, configuration, "INSERT DUPLICATE UNIQUECOL"
) as table_name:
with pytest.raises(DatabaseError):
cursor.execute(f"INSERT INTO {table_name} VALUES (1)")
# some databases (e.g. exasol) report failure not in the execute statement above, but only
# when closing the odbc handle, i.e. at cursor.close:
cursor.close()
|
{
"content_hash": "6162540e1bd81792c3d5a2e429534b46",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 106,
"avg_line_length": 33.7682119205298,
"alnum_prop": 0.6809178270249069,
"repo_name": "blue-yonder/turbodbc",
"id": "c9e94ba94753da688d203c339f4e05966d68aa18",
"size": "5099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/turbodbc_test/test_cursor_basics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "285"
},
{
"name": "C",
"bytes": "1227"
},
{
"name": "C++",
"bytes": "599284"
},
{
"name": "CMake",
"bytes": "32908"
},
{
"name": "Earthly",
"bytes": "15678"
},
{
"name": "Makefile",
"bytes": "8082"
},
{
"name": "Python",
"bytes": "134301"
},
{
"name": "Shell",
"bytes": "357"
}
],
"symlink_target": ""
}
|
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
def spiderDefaultSave(item):
print("saving item: " + str(item))
for key, value in item.getDict().items():
print("default save for item with field: " + key)
print("and value: " + str(value))
def customSave(item):
print("Custom save for item: " + str(item))
|
{
"content_hash": "f0fef9c3122695a28b0fb587a9382244",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.6395348837209303,
"repo_name": "Zincr0/pyscrap",
"id": "85dc215a64de175fe245e5536947a5c0bc8fd6e1",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscrap/pipeline.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24251"
}
],
"symlink_target": ""
}
|
from salt.modules.inspectlib.fsdb import CsvDBEntity
class IgnoredDir(CsvDBEntity):
"""
Ignored directories
"""
_TABLE = "inspector_ignored"
def __init__(self):
self.path = ""
class AllowedDir(CsvDBEntity):
"""
Allowed directories
"""
_TABLE = "inspector_allowed"
def __init__(self):
self.path = ""
class Package(CsvDBEntity):
"""
Package.
"""
_TABLE = "inspector_pkg"
def __init__(self):
self.id = 0
self.name = ""
class PackageCfgFile(CsvDBEntity):
"""
Config file, belongs to the package
"""
_TABLE = "inspector_pkg_cfg_files"
def __init__(self):
self.id = 0
self.pkgid = 0
self.path = ""
class PayloadFile(CsvDBEntity):
"""
Payload file.
"""
_TABLE = "inspector_payload"
def __init__(self):
self.id = 0
self.path = ""
self.p_type = ""
self.mode = 0
self.uid = 0
self.gid = 0
self.p_size = 0
self.atime = 0.0
self.mtime = 0.0
self.ctime = 0.0
|
{
"content_hash": "8698ce264288800bc25e915fa104a9cc",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 52,
"avg_line_length": 16.205882352941178,
"alnum_prop": 0.5163339382940109,
"repo_name": "saltstack/salt",
"id": "319204bbf3ae621345889d8e3994369fc564d900",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/modules/inspectlib/entities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.driver.auv_eng.auv
@file mi/dataset/driver/auv_eng/auv/auv_eng_auv_telemetered_driver.py
@author Jeff Roy
@brief Driver for the auv_eng_auv instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.auv_eng_auv import AuvEngAuvParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = AuvEngAuvTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class AuvEngAuvTelemeteredDriver(SimpleDatasetDriver):
"""
Derived auv_eng_auv driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser = AuvEngAuvParser(stream_handle,
self._exception_callback,
is_telemetered=True)
return parser
|
{
"content_hash": "235f9ba66eee6615b2cafcfa8fed07ab",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 28.5,
"alnum_prop": 0.6982456140350877,
"repo_name": "petercable/mi-dataset",
"id": "0e6088dd37390ada78a7d2eb0602c39ff06b0bb0",
"size": "1448",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/auv_eng/auv/auv_eng_auv_telemetered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3604713"
}
],
"symlink_target": ""
}
|
c = get_config()
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
# The port the notebook server will listen on.
c.NotebookApp.port = 8081
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = u'/path/to/.ipython/profile_pyspark/nbcert.pem'
# The string should be of the form type:salt:hashed-password.
PWDFILE='/path/to/.ipython/profile_pyspark/nbpasswd.txt'
c.NotebookApp.password = open(PWDFILE).read().strip()
|
{
"content_hash": "945d00e0bb307380a6ef3e08126cfd05",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 29.470588235294116,
"alnum_prop": 0.7485029940119761,
"repo_name": "lowcloudnine/singularity-spark",
"id": "91d4870732485b175322359d236538911cb09dc6",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipy_server_setup/ipython_notebook_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10152"
},
{
"name": "Python",
"bytes": "40367"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import User
from sentry.api.endpoints.organization_member_index import MemberPermission
class OrganizationUserDetailsEndpoint(OrganizationEndpoint):
permission_classes = (MemberPermission,)
def get(self, request, organization, user_id):
try:
user = User.objects.get(
id=user_id, sentry_orgmember_set__organization_id=organization.id
)
except User.DoesNotExist:
return Response(status=404)
return Response(serialize(user, request.user))
|
{
"content_hash": "9f4e70f2a25bb289c5e254c299d49776",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 33.5,
"alnum_prop": 0.7313432835820896,
"repo_name": "mvaled/sentry",
"id": "642ccf47792e0cddd10b14c036854d0f478a9927",
"size": "737",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_user_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .. import QROOT
from ..decorators import snake_case_methods
from .base import Plottable
__all__ = [
'Line',
'Ellipse',
'Arrow',
]
@snake_case_methods
class Line(Plottable, QROOT.TLine):
_ROOT = QROOT.TLine
def __init__(self, *args, **kwargs):
super(Line, self).__init__(*args)
self._post_init(**kwargs)
@snake_case_methods
class Ellipse(Plottable, QROOT.TEllipse):
_ROOT = QROOT.TEllipse
def __init__(self, *args, **kwargs):
super(Ellipse, self).__init__(*args)
self._post_init(**kwargs)
@snake_case_methods
class Arrow(QROOT.TArrow):
_ROOT = QROOT.TArrow
|
{
"content_hash": "1a9bb2755f2c828712fdfcba37a83643",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 44,
"avg_line_length": 19.823529411764707,
"alnum_prop": 0.6275964391691394,
"repo_name": "kreczko/rootpy",
"id": "92a033666ef7e575c81ad25b2c7d5d1581ff3436",
"size": "674",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rootpy/plotting/shapes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "109"
},
{
"name": "Makefile",
"bytes": "2778"
},
{
"name": "Python",
"bytes": "861080"
},
{
"name": "Shell",
"bytes": "3089"
}
],
"symlink_target": ""
}
|
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from os.path import join, dirname
execfile(join(dirname(__file__), 'src', 'Selenium2Library', 'version.py'))
DESCRIPTION = """
Selenium2Library is a web testing library for Robot Framework
that leverages the Selenium 2 (WebDriver) libraries.
"""[1:-1]
setup(name = 'robotframework-selenium2library',
version = VERSION,
description = 'Web testing library for Robot Framework',
long_description = DESCRIPTION,
author = 'Ryan Tomac , Ed Manlove , Jeremy Johnson',
author_email = '<ryan@tomacfamily.com> , <devPyPlTw@verizon.net> , <jeremy@softworks.com.my>',
url = 'https://github.com/rtomac/robotframework-selenium2library',
license = 'Apache License 2.0',
keywords = 'robotframework testing testautomation selenium selenium2 webdriver web',
platforms = 'any',
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing"
],
install_requires = [
'decorator >= 3.3.2',
'selenium >= 2.8.1',
'robotframework >= 2.6.0',
'docutils >= 0.8.1'
],
py_modules=['ez_setup'],
package_dir = {'' : 'src'},
packages = ['Selenium2Library','Selenium2Library.keywords','Selenium2Library.locators',
'Selenium2Library.utils'],
include_package_data = True,
)
|
{
"content_hash": "53ada8dcf2f293072ce5dc5b5f5e09db",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 102,
"avg_line_length": 38.416666666666664,
"alnum_prop": 0.5927331887201736,
"repo_name": "jussimalinen/robotframework-selenium2library",
"id": "bb06cf6c207063651668481631e484263674cf8d",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9719"
},
{
"name": "Python",
"bytes": "255098"
}
],
"symlink_target": ""
}
|
import logging
from optparse import OptionParser
import os
import requests
import json
import fiona
from shapely.geometry import Point, shape
from shapely.prepared import prep
import sys
SUCCESS_COLOR = '#8FD933'
POSSIBLE_ERROR_COLOR = '#FF9300'
ERROR_COLOR = '#FF0000'
def run_tests(bbox, service_url, status_path=None):
with fiona.open('ne_10m_land.geojson', 'r') as source:
n = source.next()
land_polygon = prep(shape(n['geometry']))
features = []
session = requests.Session()
for lon in range(bbox[0], bbox[2]):
for lat in range(bbox[1], bbox[3]):
test_coords = []
for x_offset in range(1,4):
for y_offset in range(1,4):
test_coords.append((lon + x_offset/4.0, lat + y_offset/4.0))
point_on_land = False
for coord in test_coords:
point = Point(coord[0], coord[1])
if land_polygon.contains(point):
point_on_land = True
break
if not point_on_land:
logging.debug("No points on land, %f,%f" % (lon, lat))
continue
hgt_filename = '%s%02i%s%03i.hgt' % ( 'N' if lat > 0 else 'S', abs(lat), \
'W' if lon < 0 else 'E', abs(lon))
elevation = test(test_coords, service_url, session=session)
logging.debug("%i, %i response:%i" % (lon, lat, elevation))
color = SUCCESS_COLOR
if elevation == -9999:
logging.info("fail %i,%i" % (lon, lat))
color = ERROR_COLOR
elif elevation == 0:
logging.info("maybe fail %i,%i" % (lon, lat))
color = POSSIBLE_ERROR_COLOR
status_feature = {
'type': 'Feature',
'properties' : {
'result': elevation,
'hgt': hgt_filename,
'points': ";".join([",".join([str(f) for f in c]) for c in test_coords]),
'fill-opacity': 0.5,
'fill': color,
'stroke': '#000000',
'stroke-width': 1
},
'geometry': {
'type': 'Polygon',
'coordinates' : [[
[lon, lat],
[lon, lat + 1],
[lon + 1, lat + 1],
[lon + 1, lat],
[lon, lat]
]]
}
}
features.append(status_feature)
if status_path is not None and len(features) % 100 == 0:
write_feature_collection(features, path=status_path)
if status_path is not None:
write_feature_collection(features, path=status_path)
def write_feature_collection(features, path):
feature_collection = {
'type': 'FeatureCollection',
'features' : features
}
with open(path, 'wb') as f:
json.dump(feature_collection, f, separators=(',', ': '), indent=4)
def test(coordinates, service_url, session=None):
feature = {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates' : coordinates
}
}
json_feature = json.dumps(feature)
logging.debug("requesting " + json_feature)
if session:
r = session.post(service_url,
data=json_feature,
headers={
'content-type': 'application/json'
})
else:
r = requests.post(ELEVATION_SERVICE_URL,
data=json_feature,
headers={
'content-type': 'application/json'
})
logging.debug("response " + r.text)
if r.status_code != 200:
logging.error("%i,%i status code:%i" % \
(coordinates[0][0], coordinates[0][1], r.status_code))
return -9999
response_data = r.json()
if not response_data['geometry'] or \
not response_data['geometry']['coordinates'] or \
len(response_data['geometry']['coordinates']) != len(coordinates) or \
len(response_data['geometry']['coordinates'][0]) != 3:
logging.error("Unexpected response format %s" % (r.text))
return -9999
elevations = [x[2] for x in response_data['geometry']['coordinates']]
return max(elevations)
def _main():
usage = "usage: %prog http://example.com/geojson/"
parser = OptionParser(usage=usage,
description="")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
help="Turn on debug logging")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="turn off all logging")
parser.add_option("-b", "--bounds", action="store", dest="bounds",
help="BBOX to test, in W,S,E,N format",
default="-180,-80,180,80")
parser.add_option("-o", "--output", action="store", dest="output",
help="output file", default="status.geojson")
(options, args) = parser.parse_args()
if len(args) != 1:
logging.error("Server url missing")
sys.exit(-1)
logging.basicConfig(level=logging.DEBUG if options.debug else
(logging.ERROR if options.quiet else logging.INFO))
bounds_components = options.bounds.split(",")
if len(bounds_components) != 4:
logging.error("Bounds must have 4 components")
sys.exit(-1)
bounds = [int(f) for f in bounds_components]
for i in [0, 2]:
if bounds[i] < -180 or bounds[i] > 180:
logging.error("bounds component %i out of range -180 to 180" % (i + 1))
sys.exit(-1)
for i in [1, 3]:
if bounds[i] < -90 or bounds[i] > 90:
logging.error("bounds component %i out of range -90 to 90" % (i + 1))
sys.exit(-1)
run_tests(bounds, args[0], status_path=options.output)
if __name__ == "__main__":
_main()
|
{
"content_hash": "812ac8c4dc23e5a5b3df2b5b3f058402",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 93,
"avg_line_length": 34.65714285714286,
"alnum_prop": 0.5119538334707338,
"repo_name": "trailbehind/ElevationServiceTester",
"id": "82de83446fe83f273f974ffb0299bbaaf72742f4",
"size": "6088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6088"
}
],
"symlink_target": ""
}
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['SEASIDE_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['SEASIDE_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
|
{
"content_hash": "639a4aedfa408f38d073b3b1b9456468",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 32.15094339622642,
"alnum_prop": 0.619131455399061,
"repo_name": "theLastTrain/seaside",
"id": "febc359fa3584910294535352f2826d0a1df0252",
"size": "1720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api_1_0/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10221"
},
{
"name": "HTML",
"bytes": "49874"
},
{
"name": "JavaScript",
"bytes": "20250"
},
{
"name": "Python",
"bytes": "78538"
}
],
"symlink_target": ""
}
|
"""
lets.slavery
~~~~~~~~~~~~
Links 2 greenlets by the slavery.
:copyright: (c) 2013-2018 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
import sys
from gevent import GreenletExit, killall
from gevent.event import Event
import six
__all__ = ['MasterGreenletExit', 'join_slaves', 'link_slave', 'spawn_slave',
'spawn_slave_later', 'link_partner', 'spawn_partner',
'spawn_partner_later']
try:
killall(set([None]))
except AttributeError:
# The expected error.
pass
except TypeError:
# killall() of gevent<=1.1a1 couldn't accept an arbitrary iterable.
# https://github.com/gevent/gevent/issues/404
_killall = killall
def killall(greenlets, *args, **kwargs):
return _killall(list(greenlets), *args, **kwargs)
class MasterGreenletExit(GreenletExit):
"""Slave greenlet should exit when the master greenlet finishes execution.
"""
pass
def join_slaves(greenlets, timeout=None, exception=MasterGreenletExit):
"""Waits for the greenlets to finish just like :func:`gevent.joinall`. But
the greenlets are treated as slave greenlets.
When it gets an exception during waiting, it kills the greenlets. If
timeout is not given, it waits for them to finish again before raising the
exception. So after calling it without timeout, always all the greenlets
are ready.
With timeout, it raises the exception immediately without waiting for the
killed greenlets.
:returns: a list of the ready greenlets.
"""
if not greenlets:
return []
active, done, empty_event = set(), [], Event()
def callback(g):
active.discard(g)
done.append(g)
if not active:
empty_event.set()
try:
for greenlet in greenlets:
active.add(greenlet)
greenlet.link(callback)
try:
empty_event.wait(timeout)
except:
exc_info = sys.exc_info()
killall(active, exception, block=False)
if timeout is None:
empty_event.wait()
six.reraise(*exc_info)
finally:
for greenlet in greenlets:
greenlet.unlink(callback)
return done
def link_slave(greenlet, slave, exception=MasterGreenletExit):
"""Links a greenlet greenlet and a slave greenlet. Slave greenlet will be
killed when the greenlet finishes execution.
"""
def punish(greenlet):
slave.unlink(liberate)
slave.kill(exception, block=False)
def liberate(slave):
greenlet.unlink(punish)
greenlet.link(punish)
slave.link(liberate)
def spawn_slave(greenlet, func, *args, **kwargs):
"""Spawns a slave greenlet. Slave greenlet will be killed when the
greenlet finishes execution.
"""
slave = greenlet.spawn(func, *args, **kwargs)
link_slave(greenlet, slave)
return slave
def spawn_slave_later(greenlet, seconds, func, *args, **kwargs):
"""Spawns a slave greenlet the given seconds later. Slave greenlet will be
killed when the greenlet finishes execution.
"""
slave = greenlet.spawn_later(seconds, func, *args, **kwargs)
link_slave(greenlet, slave)
return slave
def link_partner(greenlet, partner, exception=MasterGreenletExit):
"""The greenlets will be killed when another greenlet finishes execution.
"""
link_slave(greenlet, partner, exception=exception)
link_slave(partner, greenlet, exception=exception)
def spawn_partner(greenlet, func, *args, **kwargs):
"""Spawns a partner greenlet. The greenlet and partner greenlets will die
when another greenlet finishes execution.
"""
partner = greenlet.spawn(func, *args, **kwargs)
link_partner(greenlet, partner)
return partner
def spawn_partner_later(greenlet, seconds, func, *args, **kwargs):
"""Spawns a partner greenlet the given seconds later. The greenlet and
partner greenlets will die when another greenlet finishes execution.
"""
partner = greenlet.spawn_later(seconds, func, *args, **kwargs)
link_partner(greenlet, partner)
return partner
|
{
"content_hash": "ed7417114892e18f328a07b6f8e73d61",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 30.175182481751825,
"alnum_prop": 0.6678761490082245,
"repo_name": "sublee/lets",
"id": "2cf22cfec93c092f81f3402e87b742ecb7581002",
"size": "4158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lets/slavery.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "87941"
}
],
"symlink_target": ""
}
|
from __future__ import division
import dill as pickle
from collections import defaultdict
import datetime
import time
import xxhash
from bokeh.embed import components
from src.common.database import Database
from src.engine import EngineConstants
from src.engine.EngineConstants import EngineUtils
from src.engine.corpusprocessing.CorpusHandler import CorpusHandler
from src.engine.naturallanguageprocessing.LexiconBasedSA import LexiconBasedSentimentAnalyser
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import collections
import numpy as np
from nltk.stem.wordnet import WordNetLemmatizer
from bokeh.layouts import column
from bokeh.plotting import figure
DATABASE = Database()
EU = EngineUtils()
__author__ = 'abilgin'
class ModelVisualiser:
def __init__(self, experiment, keyword, num_neighbours):
self.experiment = experiment
self.keyword = keyword
self.number_of_neighbours = int(num_neighbours)
self.bins = sorted(self.experiment.existing_models.keys())
self.neighbours = set()
self.neighbours_per_bin = {}
self.neighbours_per_bin_with_score = {}
self.keyword_vector_per_bin = {}
timestr = time.strftime("%Y%m%d-%H%M%S")
self.logger = EU.setup_logger(__name__, EngineConstants.EXP_LOG_PATH + self.experiment._id + timestr + '.log')
self.keyword_error = EngineConstants.NO_KEYERR
self._analyseVSModelSimilarity()
def _analyseVSModelSimilarity(self):
count_error = 0
for bin in self.bins:
# initialise the data structures
self.neighbours_per_bin[bin] = set()
self.neighbours_per_bin_with_score[bin] = []
# Load a model from db
pickled_model = DATABASE.getGridFS().get(self.experiment.existing_models[bin]).read()
model = pickle.loads(pickled_model)
# get the word vector of the keyword for bin
keyword_bin_id = self.keyword + "(" + str(bin) + ")"
if keyword_bin_id not in self.keyword_vector_per_bin.keys():
try:
self.keyword_vector_per_bin[keyword_bin_id] = model.wv[self.keyword]
except:
count_error += 1
self.logger.error(str(bin) + ": Vector could not be retrieved for " + self.keyword)
# get the nearest neighours for keyword
if self.number_of_neighbours > 0:
similar_neighbours = self._queryModelForKeyword(model)
if len(similar_neighbours) > 0:
for similar_word in similar_neighbours:
try:
self.neighbours_per_bin[bin].add(similar_word[0])
self.neighbours_per_bin_with_score[bin].append(similar_word)
except:
self.logger.error("Similarity analysis error for " + self.keyword + " in " + bin)
self.neighbours.add(similar_word[0])
if count_error == len(self.bins):
self.keyword_error = EngineConstants.ALL_KEYERR
elif count_error > 0:
self.keyword_error = EngineConstants.PARTIAL_KEYERR
def _queryModelForKeyword(self, model):
word_list= []
lem = WordNetLemmatizer()
try:
word_list_raw = model.wv.most_similar(positive = [self.keyword], negative = [], topn = self.number_of_neighbours)
for word in word_list_raw:
if not set('[~!@#$%^&*()_+{}":;\'`]+$').intersection(str(word[0])):
word_lem = lem.lemmatize(str(word[0]))
if word_lem not in word_list:
word_list.append([word_lem, word[1]])
words = [str(v[0]) for v in word_list]
if not words:
self.logger.info('Keyword ' + self.keyword + ' not found with cosine!')
else:
self.logger.info("Most similar words: " + ",".join(words))
except Exception as e:
self.logger.exception(e)
# TODO: Return the agreement set of the different similarity measures
return word_list
def distanceBasedAspectVisualisation(self, aspect_words):
modern_model = self._retrieveMostRecentModel()
return self._drawSimilarityDistancePlot(modern_model, aspect_words)
def _retrieveMostRecentModel(self):
# the most recent model should be the last one
final_existing_bin = self.bins[-1]
pickled_model = DATABASE.getGridFS().get(self.experiment.existing_models[final_existing_bin]).read()
recent_model = pickle.loads(pickled_model)
return recent_model
def _drawSimilarityDistancePlot(self, modern_model, aspect_words):
s = []
words_not_found = []
# sort the bins
self.neighbours_per_bin = collections.OrderedDict(sorted(self.neighbours_per_bin.items()))
# for each aspect word
for aspect in aspect_words:
mean_scores_of_neighbours_per_bin = {}
similarity_scores_of_keyword_per_bin = {}
for bin in self.bins:
similarity_scores_of_keyword_per_bin[bin], display_key_label = self._calculateBinSimilarityWithAspect(modern_model, bin, aspect)
bin_total = 0
for neighbour in self.neighbours_per_bin[bin]:
# retrieve similarity with sentiment and take average
try:
bin_total += float(modern_model.wv.similarity(neighbour, aspect))
except:
bin_total += 0
if len(self.neighbours_per_bin[bin]) > 0:
mean_scores_of_neighbours_per_bin[bin] = bin_total / float(len(self.neighbours_per_bin[bin]))
else:
mean_scores_of_neighbours_per_bin[bin] = 0
if np.any(np.array(similarity_scores_of_keyword_per_bin.values())) or np.any(np.array(similarity_scores_of_keyword_per_bin.values())):
similarity_scores_of_keyword_per_bin = collections.OrderedDict(sorted(similarity_scores_of_keyword_per_bin.items()))
key_sims = np.array(similarity_scores_of_keyword_per_bin.values())
mean_scores_of_neighbours_per_bin = collections.OrderedDict(sorted(mean_scores_of_neighbours_per_bin.items()))
means = np.array(mean_scores_of_neighbours_per_bin.values())
fig = figure(x_range=self.bins, width=800, plot_height=300, title="'" + aspect + "'")
fig.xaxis.axis_label = "Time Intervals"
fig.yaxis.axis_label = "Similarity"
fig.yaxis.major_label_orientation = "vertical"
fig.yaxis.bounds = [0,1]
fig.axis.minor_tick_in = -3
fig.axis.axis_line_width = 3
fig.line(self.bins, key_sims.tolist(), legend=self.keyword, line_color="firebrick", line_width=4)
fig.line(self.bins, means.tolist(), legend="Mean of neighbours", line_color="navy", line_width=4, line_dash=[4, 4])
fig.legend.background_fill_alpha = 0.5
s.append(fig)
else:
words_not_found.append(aspect)
# put all the plots in a column
list = [s[i] for i in range(0, len(s))]
p = column(list)
script, div = components(p)
return script, div, words_not_found
def _calculateBinSimilarityWithAspect(self, model, bin, aspect):
for key_bin_id in self.keyword_vector_per_bin.keys():
if bin in key_bin_id:
vec_key = self.keyword_vector_per_bin[key_bin_id]
try:
return np.dot(vec_key, model.wv[aspect])/(np.linalg.norm(vec_key)* np.linalg.norm(model.wv[aspect])), key_bin_id
except:
return 0, ""
return 0, ""
def timeTrackingVisualisation(self, aspects, algorithm, tsne_perp, tsne_iter):
modern_model = self._retrieveMostRecentModel()
return self._drawTrackingPlot(modern_model, aspects, algorithm, tsne_perp, tsne_iter)
def _drawTrackingPlot(self, modern_model, aspects, algorithm, tsne_perp, tsne_iter):
# Prepare figure
fig = figure(width=1200, plot_height=600, title="Semantic time travel of '" + self.keyword + "' using " + algorithm)
if self.neighbours:
# find the union of keyword's k nearest neighbours over all time points
surrounding_words = filter(bool, self.neighbours)
if aspects:
surrounding_words.extend(aspects)
embeddings = self._getEmbeddingsFromModelForWords(modern_model, surrounding_words)
for key, value in self.keyword_vector_per_bin.items():
embeddings[key] = value
vectors = embeddings.values()
words = embeddings.keys()
if algorithm == "pca":
pca = PCA(n_components=2, whiten=True)
vectors2d = pca.fit(vectors).transform(vectors)
else:
# perplexity ranges from 20 to 50
tsne = TSNE(perplexity=tsne_perp, n_components=2, init='pca', n_iter=tsne_iter, method='exact')
vectors2d = tsne.fit_transform(vectors)
bin_keyword_vectors_x = []
bin_keyword_vectors_y = []
bin_words = []
default_neighbour_vectors_x = []
default_neighbour_vectors_y = []
default_neighbour_words = []
aspect_vectors_x = []
aspect_vectors_y = []
aspect_words = []
for point, word in zip(vectors2d, words):
# categorise points
if "(" in word:
bin_keyword_vectors_x.append(point[0])
bin_keyword_vectors_y.append(point[1])
bin_words.append(word)
elif word in aspects:
aspect_vectors_x.append(point[0])
aspect_vectors_y.append(point[1])
aspect_words.append(word)
else:
default_neighbour_vectors_x.append(point[0])
default_neighbour_vectors_y.append(point[1])
default_neighbour_words.append(word)
fig.circle(default_neighbour_vectors_x, default_neighbour_vectors_y,
line_color="black", fill_color="blue", fill_alpha=0.5, size=10)
fig.text(default_neighbour_vectors_x, default_neighbour_vectors_y, default_neighbour_words, text_font_size="10pt")
fig.square(aspect_vectors_x, aspect_vectors_y,
line_color="black", fill_color="black", fill_alpha=0.5, size=15)
fig.text(aspect_vectors_x, aspect_vectors_y, aspect_words, text_font_size="15pt")
fig.triangle(bin_keyword_vectors_x, bin_keyword_vectors_y,
line_color="black", fill_color="red", fill_alpha=0.5, size=12)
fig.text(bin_keyword_vectors_x, bin_keyword_vectors_y, bin_words, text_font_size="12pt")
script, div = components(fig)
return script, div
def _getEmbeddingsFromModelForWords(self, model, word_list):
similar_embeddings = {}
for word in word_list:
try:
similar_embeddings[word] = model.wv[word]
except:
self.logger.info(word + " not found in model.")
return similar_embeddings
def sentimentVisualisation(self, lexicon, requested_corpus_list):
if not requested_corpus_list:
requested_corpus_list = self.experiment.corpus_list
# check for existing keyword and corpus
missing_corpora = []
for corpus in requested_corpus_list:
res = DATABASE.find_one(EngineConstants.SELECTED_SENTENCES_COLLECTION, {'exp_id': self.experiment._id, 'keyword': self.keyword,
'num_neighbours': self.number_of_neighbours, 'source':corpus})
if not res:
missing_corpora.append(corpus)
if len(missing_corpora) > 0:
ch = CorpusHandler(self.experiment, self.keyword, self.neighbours_per_bin)
selected_sentences = ch.selectSentencesForSentimentAnalysis(missing_corpora)
self._uploadToDB(selected_sentences)
sentiment_analyser = LexiconBasedSentimentAnalyser(self.experiment._id, self.keyword, self.number_of_neighbours)
sentiment_analyser.fromDBRunAnalysis()
return self._drawSentimentEvolutionPlot(lexicon, requested_corpus_list)
def _uploadToDB(self, selected_sentences):
for corpus in selected_sentences.keys():
for time in selected_sentences[corpus].keys():
for genre in selected_sentences[corpus][time].keys():
text = ". ".join(selected_sentences[corpus][time][genre]).encode('utf-8')
texthash = xxhash.xxh64(text).hexdigest()
DATABASE.update(EngineConstants.SELECTED_SENTENCES_COLLECTION, {'texthash': texthash},
{'date': time, 'source': corpus, 'genre': genre, 'exp_id': self.experiment._id,
'keyword' : self.keyword, 'num_neighbours' : self.number_of_neighbours,
'original_text': text, 'texthash': texthash, 'sentiment': {}, 'sentence_polarities': {}})
def _drawSentimentEvolutionPlot(self, lexicon, corpus_list):
data = self._get_plotdata(lexicon, corpus_list)
if not len(data):
self.keyword_error = EngineConstants.NO_DATA
fig = figure(width=1200, plot_height=600, title="Sentiment analysis of '" + self.keyword + "' using " + lexicon + " within " + ', '.join(corpus_list))
fig.square(data.keys(), data.values(), line_color="black", fill_color="blue", fill_alpha=0.5, size=10)
fig.xaxis.axis_label = "Time"
fig.yaxis.axis_label = "Sentiment Orientation"
fig.axis.minor_tick_in = -3
fig.axis.axis_line_width = 3
script, div = components(fig)
return script, div
def _get_plotdata(self, lexicon, corpus_list):
sentiment_per_timestamp = defaultdict(list)
no_date_counter = 0
use_dates = False
for corpus in corpus_list:
for doc in DATABASE.iter_collection(EngineConstants.SELECTED_SENTENCES_COLLECTION, {'exp_id': self.experiment._id,
'num_neighbours': self.number_of_neighbours,
'keyword': self.keyword, 'source': corpus,
'sentiment.' + lexicon: {'$exists': True}}):
if "date" in doc and doc['date']:
use_dates = True
if isinstance(doc['date'], int):
date = doc['date']
elif not isinstance(doc['date'], datetime.datetime):
date = int(doc['date'][:4])
else:
date = doc['date'].year
elif doc['sentiment'] and not use_dates:
date = no_date_counter
no_date_counter += 1
sentiment_per_timestamp[date].append(doc['sentiment'][lexicon])
for time, sent_group in sentiment_per_timestamp.items():
if len(sent_group) > 1:
# take the average of genres
genre_sum = 0
for sent_genre in sent_group:
genre_sum += sent_genre
average_sent = genre_sum / len(sent_group)
sentiment_per_timestamp[time] = [average_sent]
if use_dates:
for date in range(min(sentiment_per_timestamp.keys()), max(sentiment_per_timestamp.keys())):
if not date in sentiment_per_timestamp:
sentiment_per_timestamp[date] = []
data = collections.OrderedDict(sorted(sentiment_per_timestamp.items(), key=lambda t: t[0]))
return data
def getKeywordErrorStatus(self):
return self.keyword_error
def getSimilarKeywordsPerBin(self):
return self.neighbours_per_bin
|
{
"content_hash": "253e7cd9101d139ef122134a53f02307",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 158,
"avg_line_length": 42.859375,
"alnum_prop": 0.5788066593753798,
"repo_name": "aysenurbilgin/tilt_api",
"id": "d40636c0a8e2b80cc01ce49ef655f701dcd10e3e",
"size": "16458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine/visualisation/ModelVisualiser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "100898"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
import six.moves.urllib.request as request
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import jobset
# must be synchronized with test/core/util/port_server_client.h
_PORT_SERVER_PORT = 32766
def start_port_server():
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
# otherwise, leave it up
try:
version = int(
request.urlopen('http://localhost:%d/version_number' %
_PORT_SERVER_PORT).read())
logging.info('detected port server running version %d', version)
running = True
except Exception as e:
logging.exception('failed to detect port server')
running = False
if running:
current_version = int(
subprocess.check_output([
sys.executable, # use the same python binary as this process
os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'dump_version'
]).decode())
logging.info('my port server is version %d', current_version)
running = (version >= current_version)
if not running:
logging.info('port_server version mismatch: killing the old one')
request.urlopen('http://localhost:%d/quitquitquit' %
_PORT_SERVER_PORT).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
logging.info('starting port_server, with log file %s', logfile)
args = [
sys.executable,
os.path.abspath('tools/run_tests/python_utils/port_server.py'),
'-p',
'%d' % _PORT_SERVER_PORT, '-l', logfile
]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
if jobset.platform_string() == 'windows':
# Working directory of port server needs to be outside of Jenkins
# workspace to prevent file lock issues.
tempdir = tempfile.mkdtemp()
if sys.version_info.major == 2:
creationflags = 0x00000008 # detached process
else:
creationflags = 0 # DETACHED_PROCESS doesn't seem to work with python3
port_server = subprocess.Popen(args,
env=env,
cwd=tempdir,
creationflags=creationflags,
close_fds=True)
else:
port_server = subprocess.Popen(args,
env=env,
preexec_fn=os.setsid,
close_fds=True)
time.sleep(1)
# ensure port server is up
waits = 0
while True:
if waits > 10:
logging.warning(
'killing port server due to excessive start up waits')
port_server.kill()
if port_server.poll() is not None:
logging.error('port_server failed to start')
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
request.urlopen('http://localhost:%d/get' %
_PORT_SERVER_PORT).read()
logging.info(
'last ditch attempt to contact port server succeeded')
break
except:
logging.exception(
'final attempt to contact port server failed')
port_log = open(logfile, 'r').read()
print(port_log)
sys.exit(1)
try:
port_server_url = 'http://localhost:%d/get' % _PORT_SERVER_PORT
request.urlopen(port_server_url).read()
logging.info('port server is up and ready')
break
except socket.timeout:
logging.exception('while waiting for port_server')
time.sleep(1)
waits += 1
except IOError:
logging.exception('while waiting for port_server')
time.sleep(1)
waits += 1
except:
logging.exception(
'error while contacting port server at "%s".'
'Will try killing it.', port_server_url)
port_server.kill()
raise
|
{
"content_hash": "3d9909d15ddabaf9f73c4b15296f6aeb",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 87,
"avg_line_length": 39.9,
"alnum_prop": 0.5104427736006684,
"repo_name": "grpc/grpc-ios",
"id": "15eada447733f59e730b223f508bf8acf6e80b3b",
"size": "5366",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "native/tools/run_tests/python_utils/start_port_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "38831"
},
{
"name": "C",
"bytes": "1342403"
},
{
"name": "C#",
"bytes": "111357"
},
{
"name": "C++",
"bytes": "11936431"
},
{
"name": "CMake",
"bytes": "34261"
},
{
"name": "CSS",
"bytes": "1579"
},
{
"name": "Cython",
"bytes": "258768"
},
{
"name": "Dockerfile",
"bytes": "185143"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "22550"
},
{
"name": "JavaScript",
"bytes": "89695"
},
{
"name": "Objective-C",
"bytes": "770017"
},
{
"name": "Objective-C++",
"bytes": "83300"
},
{
"name": "PHP",
"bytes": "517157"
},
{
"name": "PowerShell",
"bytes": "5008"
},
{
"name": "Python",
"bytes": "4064457"
},
{
"name": "Ruby",
"bytes": "715896"
},
{
"name": "Shell",
"bytes": "781923"
},
{
"name": "Starlark",
"bytes": "849400"
},
{
"name": "Swift",
"bytes": "13168"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
}
|
"""
This file defines the main physical constants of the system:
* Speed of sound
* Absorption of materials
* Scattering coefficients
* Air absorption
"""
import io
import json
import os
import numpy as np
# tolerance for computations
eps = 1e-10
# We implement the constants as a dictionary so that they can
# be modified at runtime.
# The class Constants gives an interface to update the value of
# constants or add new ones.
_constants = {}
_constants_default = {
"c": 343.0, # speed of sound at 20 C in dry air
"ffdist": 10.0, # distance to the far field
"fc_hp": 300.0, # cut-off frequency of standard high-pass filter
"frac_delay_length": 81, # Length of the fractional delay filters used for RIR gen
"room_isinside_max_iter": 20, # Max iterations for checking if point is inside room
}
class Constants:
"""
A class to provide easy access package wide to user settable constants.
"""
def set(self, name, val):
# add constant to dictionnary
_constants[name] = val
def get(self, name):
try:
v = _constants[name]
except KeyError:
try:
v = _constants_default[name]
except KeyError:
raise NameError(name + ": no such constant")
return v
# the instanciation of the class
constants = Constants()
# Compute the speed of sound as a function
# of temperature, humidity, and pressure
def calculate_speed_of_sound(t, h, p):
"""
Compute the speed of sound as a function of
temperature, humidity and pressure
Parameters
----------
t: float
temperature [Celsius]
h: float
relative humidity [%]
p: float
atmospheric pressure [kpa]
Returns
-------
Speed of sound in [m/s]
"""
# using crude approximation for now
return 331.4 + 0.6 * t + 0.0124 * h
def _calculate_temperature(c, h):
"""Compute the temperature give a speed of sound ``c`` and humidity ``h``"""
return (c - 331.4 - 0.0124 * h) / 0.6
r"""
Air Absorption Coefficients
---------------------------
Air absorbs sound as `exp(-distance * a)` where `distance` is the distance
travelled by sound and `a` is the absorption coefficient.
The values are measured for octave-bands at 125, 250, 500, 1k, 2k, 4k, and 8k.
The values given here are taken from the annex of the book
Michael Vorlaender, Auralization: Fundamentals of Acoustics, Modelling,
Simulation, Algorithms, and Acoustic Virtual Reality, Springer, 1st Edition,
2008.
"""
# Table of air absorption coefficients
air_absorption_table = {
"10C_30-50%": [x * 1e-3 for x in [0.1, 0.2, 0.5, 1.1, 2.7, 9.4, 29.0]],
"10C_50-70%": [x * 1e-3 for x in [0.1, 0.2, 0.5, 0.8, 1.8, 5.9, 21.1]],
"10C_70-90%": [x * 1e-3 for x in [0.1, 0.2, 0.5, 0.7, 1.4, 4.4, 15.8]],
"20C_30-50%": [x * 1e-3 for x in [0.1, 0.3, 0.6, 1.0, 1.9, 5.8, 20.3]],
"20C_50-70%": [x * 1e-3 for x in [0.1, 0.3, 0.6, 1.0, 1.7, 4.1, 13.5]],
"20C_70-90%": [x * 1e-3 for x in [0.1, 0.3, 0.6, 1.1, 1.7, 3.5, 10.6]],
"center_freqs": [125, 250, 500, 1000, 2000, 4000, 8000],
}
class Physics(object):
"""
A Physics object allows to compute the room physical properties depending
on temperature and humidity.
Parameters
----------
temperature: float, optional
The room temperature
humidity: float in range (0, 100), optional
The room relative humidity in %. Default is 0.
"""
def __init__(self, temperature=None, humidity=None):
self.p = 100.0 # pressure in kilo-Pascal (kPa), not used
if humidity is None:
self.H = 0.0
else:
self.H = humidity
if self.H < 0.0 or self.H > 100:
raise ValueError("Relative humidity is a value between 0 and 100.")
if temperature is None:
self.T = _calculate_temperature(constants.get("c"), self.H)
else:
self.T = temperature
def get_sound_speed(self):
"""
Returns
-------
the speed of sound
"""
return calculate_speed_of_sound(self.T, self.H, self.p)
def get_air_absorption(self):
"""
Returns
-------
``(air_absorption, center_freqs)`` where ``air_absorption`` is a list
corresponding to the center frequencies in ``center_freqs``
"""
key = ""
if self.T < 15:
key += "10C_"
else:
key = "20C_"
if self.H < 50:
key += "30-50%"
elif 50 <= self.H and self.H < 70:
key += "50-70%"
else:
key += "70-90%"
return {
"coeffs": air_absorption_table[key],
"center_freqs": air_absorption_table["center_freqs"],
}
@classmethod
def from_speed(cls, c):
"""Choose a temperature and humidity matching a desired speed of sound"""
H = 0.3
T = _calculate_temperature(c, H)
return cls(temperature=T, humidity=H)
r"""
Material Properties
-------------------
Different materials have different absorbant and scattering coefficients.
We define a class to hold these values. The values are typically measured for
octave-bands at 125, 250, 500, 1k, 2k, 4k, and sometimes 8k.
The values given here are taken from the annex of the book
Michael Vorlaender, Auralization: Fundamentals of Acoustics, Modelling,
Simulation, Algorithms, and Acoustic Virtual Reality, Springer, 1st Edition,
2008.
"""
# the file containing the database of materials
_materials_database_fn = os.path.join(os.path.dirname(__file__), "data/materials.json")
materials_absorption_table = {
"anechoic": {"description": "Anechoic material", "coeffs": [1.0]},
}
materials_scattering_table = {
"no_scattering": {"description": "No scattering", "coeffs": [0.0]},
}
with io.open(_materials_database_fn, "r", encoding="utf8") as f:
materials_data = json.load(f)
center_freqs = materials_data["center_freqs"]
tables = {
"absorption": materials_absorption_table,
"scattering": materials_scattering_table,
}
for key, table in tables.items():
for subtitle, contents in materials_data[key].items():
for keyword, p in contents.items():
table[keyword] = {
"description": p["description"],
"coeffs": p["coeffs"],
"center_freqs": center_freqs[: len(p["coeffs"])],
}
class Material(object):
"""
A class that describes the energy absorption and scattering
properties of walls.
Attributes
----------
energy_absorption: dict
A dictionary containing keys ``description``, ``coeffs``, and
``center_freqs``.
scattering: dict
A dictionary containing keys ``description``, ``coeffs``, and
``center_freqs``.
Parameters
----------
energy_absorption: float, str, or dict
* float: The material created will be equally absorbing at all frequencies
(i.e. flat).
* str: The absorption values will be obtained from the database.
* dict: A dictionary containing keys ``description``, ``coeffs``, and
``center_freqs``.
scattering: float, str, or dict
* float: The material created will be equally scattering at all frequencies
(i.e. flat).
* str: The scattering values will be obtained from the database.
* dict: A dictionary containing keys ``description``, ``coeffs``, and
``center_freqs``.
"""
def __init__(self, energy_absorption, scattering=None):
# Handle the energy absorption input based on its type
if isinstance(energy_absorption, (float, np.float32, np.float64)):
# This material is flat over frequencies
energy_absorption = {"coeffs": [energy_absorption]}
elif isinstance(energy_absorption, str):
# Get the coefficients from the database
energy_absorption = dict(materials_absorption_table[energy_absorption])
elif not isinstance(energy_absorption, dict):
raise TypeError(
"The energy absorption of a material can be defined by a scalar value "
"for a flat absorber, a name refering to a material in the database, "
"or a list with one absoption coefficients per frequency band"
)
if scattering is None:
# By default there is no scattering
scattering = 0.0
if isinstance(scattering, (float, np.float32, np.float64)):
# This material is flat over frequencies
# We match the number of coefficients for the absorption
if len(energy_absorption["coeffs"]) > 1:
scattering = {
"coeffs": [scattering] * len(energy_absorption["coeffs"]),
"center_freqs": energy_absorption["center_freqs"],
}
else:
scattering = {"coeffs": [scattering]}
elif isinstance(scattering, str):
# Get the coefficients from the database
scattering = dict(materials_scattering_table[scattering])
elif not isinstance(scattering, dict):
# In all other cases, the material should be a dictionary
raise TypeError(
"The scattering of a material can be defined by a scalar value "
"for a flat absorber, a name refering to a material in the database, "
"or a list with one absoption coefficients per frequency band"
)
# Now handle the case where energy absorption is flat, but scattering is not
if len(scattering["coeffs"]) > 1 and len(energy_absorption["coeffs"]) == 1:
n_coeffs = len(scattering["coeffs"])
energy_absorption["coeffs"] = energy_absorption["coeffs"] * n_coeffs
energy_absorption["center_freqs"] = list(scattering["center_freqs"])
# checks for `energy_absorption` dict
assert isinstance(energy_absorption, dict), (
"`energy_absorption` must be a "
"dictionary with the keys "
"`coeffs` and `center_freqs`."
)
assert "coeffs" in energy_absorption.keys(), (
"Missing `coeffs` keys in " "`energy_absorption` dict."
)
if len(energy_absorption["coeffs"]) > 1:
assert len(energy_absorption["coeffs"]) == len(
energy_absorption["center_freqs"]
), (
"Length of `energy_absorption['coeffs']` and "
"energy_absorption['center_freqs'] must match."
)
# checks for `scattering` dict
assert isinstance(scattering, dict), (
"`scattering` must be a "
"dictionary with the keys "
"`coeffs` and `center_freqs`."
)
assert "coeffs" in scattering.keys(), (
"Missing `coeffs` keys in " "`scattering` dict."
)
if len(scattering["coeffs"]) > 1:
assert len(scattering["coeffs"]) == len(scattering["center_freqs"]), (
"Length of `scattering['coeffs']` and "
"scattering['center_freqs'] must match."
)
self.energy_absorption = energy_absorption
self.scattering = scattering
def is_freq_flat(self):
"""
Returns ``True`` if the material has flat characteristics over
frequency, ``False`` otherwise.
"""
return (
len(self.energy_absorption["coeffs"]) == 1
and len(self.scattering["coeffs"]) == 1
)
@property
def absorption_coeffs(self):
"""shorthand to the energy absorption coefficients"""
return self.energy_absorption["coeffs"]
@property
def scattering_coeffs(self):
"""shorthand to the scattering coefficients"""
return self.scattering["coeffs"]
def resample(self, octave_bands):
"""resample at given octave bands"""
self.energy_absorption = {
"coeffs": octave_bands(**self.energy_absorption),
"center_freqs": octave_bands.centers,
}
self.scattering = {
"coeffs": octave_bands(**self.scattering),
"center_freqs": octave_bands.centers,
}
@classmethod
def all_flat(cls, materials):
"""
Checks if all materials in a list are frequency flat
Parameters
----------
materials: list or dict of Material objects
The list of materials to check
Returns
-------
``True`` if all materials have a single parameter, else ``False``
"""
if isinstance(materials, dict):
return all([m.is_freq_flat() for m in materials.values()])
else:
return all([m.is_freq_flat() for m in materials])
def make_materials(*args, **kwargs):
"""
Helper method to conveniently create multiple materials.
Each positional and keyword argument should be a valid input
for the Material class. Then, for each of the argument, a
Material will be created by calling the constructor.
If at least one positional argument is provided, a list of
Material objects constructed using the provided positional
arguments is returned.
If at least one keyword argument is provided, a dict with keys
corresponding to the keywords and containing Material objects
constructed with the keyword values is returned.
If only positional arguments are provided, only the list is returned.
If only keyword arguments are provided, only the dict is returned.
If both are provided, both are returned.
If no argument is provided, an empty list is returned.
.. code-block:: python
:linenos:
# energy absorption parameters
floor_eabs = {
"description": "Example floor material",
"coeffs": [0.1, 0.2, 0.1, 0.1, 0.1, 0.05],
"center_freqs": [125, 250, 500, 1000, 2000, 4000],
}
# scattering parameters
audience_scat = {
"description": "Theatre Audience",
"coeffs": [0.3, 0.5, 0.6, 0.6, 0.7, 0.7, 0.7]
"center_freqs": [125, 250, 500, 1000, 2000, 4000],
}
# create a list of materials
my_mat_list = pra.make_materials((floor_eabs, audience_scat))
# create a dict of materials
my_mat_dict = pra.make_materials(floor=(floor_abs, audience_scat))
"""
ret_args = []
for parameters in args:
if isinstance(parameters, (list, tuple)):
ret_args.append(Material(*parameters))
else:
ret_args.append(Material(parameters))
ret_kwargs = {}
for name, parameters in kwargs.items():
if isinstance(parameters, (list, tuple)):
ret_kwargs[name] = Material(*parameters)
else:
ret_kwargs[name] = Material(parameters)
if len(ret_kwargs) == 0:
return ret_args
elif len(ret_args) == 0:
return ret_kwargs
else:
return ret_args, ret_kwargs
|
{
"content_hash": "8c036b18a862726800e3e2ac381013b3",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 88,
"avg_line_length": 32.43283582089552,
"alnum_prop": 0.5911511406219183,
"repo_name": "LCAV/pyroomacoustics",
"id": "4a363d8ca80b0d0a3f80ea779cd0ae46b2c1cb02",
"size": "16514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyroomacoustics/parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "96552"
},
{
"name": "Cython",
"bytes": "2700"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Python",
"bytes": "941773"
}
],
"symlink_target": ""
}
|
"""
==========
Grid Data!
==========
If you don't have Monte Carlo chains, and have grid evaluations instead, that's fine too!
Just flatten your grid, set the weights to the grid evaluation, and set the grid flag. Here is
a nice diamond that you get from modifying a simple multivariate normal distribution.
Note that by default, grid data is not smoothed, though you can explicitly set the smooth
parameter in ``configure_general`` if you do want smoothing.
Note that you *cannot* use dictionary input with the grid method and not specify the full
flattened array. This is because we cannot construct the meshgrid from a dictionary, as
the order of the parameters is not preserved in the dictionary.
"""
import numpy as np
from chainconsumer import ChainConsumer
from scipy.stats import multivariate_normal
x, y = np.linspace(-3, 3, 50), np.linspace(-7, 7, 100)
xx, yy = np.meshgrid(x, y, indexing='ij')
pdf = np.exp(-0.5 * (xx * xx + yy * yy / 4 + np.abs(xx * yy)))
c = ChainConsumer()
c.add_chain([x, y], parameters=["$x$", "$y$"], weights=pdf, grid=True)
fig = c.plotter.plot()
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
###############################################################################
# If you have the flattened array already, you can also pass this
# Turning 2D data to flat data.
xs, ys = xx.flatten(), yy.flatten()
coords = np.vstack((xs, ys)).T
pdf_flat = multivariate_normal.pdf(coords, mean=[0.0, 0.0], cov=[[1.0, 0.7], [0.7, 3.5]])
c = ChainConsumer()
c.add_chain([xs, ys], parameters=["$x$", "$y$"], weights=pdf_flat, grid=True)
c.configure(smooth=1) # Notice how smoothing changes the results!
fig = c.plotter.plot()
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
|
{
"content_hash": "10df8aad4f6167b918155f73920db3d7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 94,
"avg_line_length": 40,
"alnum_prop": 0.6705555555555556,
"repo_name": "Samreay/ChainConsumer",
"id": "6316d44093d9f5b958597e937feafe42e75f9b21",
"size": "1824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Basics/plot_grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "247101"
},
{
"name": "Shell",
"bytes": "1806"
},
{
"name": "TeX",
"bytes": "1819"
}
],
"symlink_target": ""
}
|
import os
import sys
import tempfile
import time
import unittest
import webbrowser
import test_env
import mock
from buildbot.status import results
from buildbot.status.builder import BuildStatus, BuildStepStatus, BuilderStatus
from buildbot.status.logfile import LogFile
from buildbot.status.master import Status as MasterStatus
from master.build_utils import FakeBuild
from master.try_mail_notifier import TryMailNotifier
class TestMailNotifier(unittest.TestCase):
TEST_MODE = 0
TRAIN_MODE = 1
mode = TEST_MODE
def __init__(self, *args, **kwargs):
super(TestMailNotifier, self).__init__(*args, **kwargs)
self.maxDiff = None
os.environ['TZ'] = 'PST+08'
time.tzset()
@mock.patch('time.time') # Needed to fix time while generating the email
def check_mail(self, bs_cfg, builder_cfg, step_cfgs, ms_cfg, expected,
test_name, _):
'''
bs_cfg: BuildStatus config dict
step_cfgs: [BuildStepStatus config dict]
ms_cfg: MasterStatus config dict
'''
mn = TryMailNotifier(
fromaddr='from@example.org',
subject="try %(result)s for %(reason)s on %(builder)s @ r%(revision)s",
mode="all")
builder = mock.Mock(BuilderStatus)
builder.configure_mock(**builder_cfg)
bs = mock.Mock(BuildStatus)
steps = []
for step_cfg in step_cfgs:
# pylint: disable=cell-var-from-loop
step = mock.Mock(BuildStepStatus)
step.urls = {}
step_cfg.update({
'addURL.side_effect': lambda name, url: step.urls.update({name: url}),
'getURLs.side_effect': step.urls.copy,
'getBuild.return_value': bs,
})
step.configure_mock(**step_cfg)
steps.append(step)
bs_cfg.update({'getSteps.return_value': steps})
bs_cfg.update({'getBuilder.return_value': builder})
bs.configure_mock(**bs_cfg)
ms = mock.Mock(MasterStatus)
def getBuildStatusURL(obj):
if isinstance(obj, BuilderStatus):
return "%sbuilders/%s" % (
ms.getBuildbotURL(),
obj.getName())
elif isinstance(obj, BuildStatus):
return "%sbuilders/%s/builds/%s" % (
ms.getBuildbotURL(),
obj.getBuilder().getName(),
obj.getNumber())
else:
assert False, "Don't know how to getURLForThing(%s)" % obj
ms_cfg.update({'getURLForThing.side_effect': getBuildStatusURL})
ms.configure_mock(**ms_cfg)
mn.master_status = ms
mail = mn.buildMessage_internal(
bs.getBuilder().getName(), [bs], bs.getResults())
if mail:
# Set the boundary. Otherwise it's randomly generated and breaks the
# test cases.
mail.set_boundary('===============7454617213454723890==')
# Replace tabs with a space for compat with python 2.6, 2.7, since
# the mime header wrap whitespace changed between those versions.
mail_str = str(mail).replace('\t', ' ').rstrip('\n')
if self.mode == self.TEST_MODE:
with open(expected, 'rb') as expected_file:
self.assertEqual(mail_str, expected_file.read().rstrip('\n'))
elif self.mode == self.TRAIN_MODE:
with tempfile.NamedTemporaryFile(suffix='.html') as f:
f.write(mail.get_payload(0).get_payload(decode=True))
f.flush()
webbrowser.open('file://%s' % f.name)
answer = raw_input('Accept as new test data for %s [y/N]? '
% test_name).strip().lower()
if answer == 'y':
with open(expected, 'wb') as expected_file:
expected_file.write(mail_str)
else:
if self.mode == self.TEST_MODE:
self.assertFalse(os.path.exists(expected))
elif self.mode == self.TRAIN_MODE:
if os.path.exists(expected):
os.remove(expected)
def recursive_key_replace(obj, find, replace):
"""Recursively transforms the keys of a json-like object.
In particular, it will leave non-key values alone and will traverse any
number of dictionaries/lists to completely transform obj.
Example:
INPUT:
{ 'test_': [['not_transformed', {'tweak_this': 100}]] }
OUTPUT (find='_', replace='-'):
{ 'test-': [['not_transformed', {'tweak-this': 100}]] }
"""
if isinstance(obj, dict):
ret = {}
for k, v in obj.iteritems():
k = k.replace(find, replace)
if isinstance(v, (list, dict)):
v = recursive_key_replace(v, find, replace)
ret[k] = v
elif isinstance(obj, list):
ret = []
for v in obj:
if isinstance(v, (list, dict)):
v = recursive_key_replace(v, find, replace)
ret.append(v)
else:
assert False, 'obj must be a list or dict'
return ret
def step_helper(name, extras=None, result=results.SUCCESS, exLogNames=None,
started=True, skip_name=False, hidden=False):
logs = []
for log_name in (exLogNames or []) + ['stdio']:
log = mock.Mock(LogFile)
log.getName.return_value = log_name
logs.append(log)
return {
'getName()': name,
'getText()': ([name] if not skip_name else [])+(extras or []),
'getResults()': (result, []),
'isStarted()': started,
'isHidden()': hidden,
'getLogs()': logs}
def test_from_files(infile, expected, name):
env = {'results': results, 'step': step_helper}
def inner(self):
with open(infile) as f:
# pylint: disable=eval-used
data = eval(f.read(), {}, env)
data['build_step']['getProperties()'] = FakeBuild(
data['build_step_props']).getProperties()
data = recursive_key_replace(data, '()', '.return_value')
self.check_mail(
data['build_step'], data['builder'], data['steps'], data['master'],
expected, name
)
inner.__name__ = "test_%s" % name
return inner
def addTests():
base_path = os.path.join(test_env.DATA_PATH, 'trymail_tests')
for fname in os.listdir(base_path):
if fname.endswith('.in'):
path = os.path.join(base_path, fname)
name = os.path.splitext(fname)[0]
expected = os.path.join(base_path, name+'.expected')
setattr(
TestMailNotifier, 'test_%s' % name,
test_from_files(path, expected, name)
)
addTests()
def main(argv):
if '--help' in argv or '-h' in argv:
print 'Pass --train to enter training mode.'
print
elif '--train' in argv:
argv.remove('--train')
TestMailNotifier.mode = TestMailNotifier.TRAIN_MODE
unittest.main()
if __name__ == '__main__':
sys.exit(main(sys.argv))
# vim: set ts=2 sts=2 sw=2:
|
{
"content_hash": "04aec66068bcb96d0c2071cd6f14e001",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 31.019138755980862,
"alnum_prop": 0.6156100570723431,
"repo_name": "eunchong/build",
"id": "e5956dc12f920eaf1cd042a1f4962e0450c4ce0c",
"size": "6668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tryserver_mail_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
}
|
"""Preprocessing stage."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util import nest
# Sequential methods should take precedence.
class PreprocessingStage(sequential.Sequential,
base_preprocessing_layer.PreprocessingLayer):
"""A sequential preprocessing stage.
This preprocessing stage wraps a list of preprocessing layers into a
Sequential-like object that enables you to `adapt()` the whole list via
a single `adapt()` call on the preprocessing stage.
Args:
layers: List of layers. Can include layers that aren't preprocessing layers.
name: String. Optional name for the preprocessing stage object.
"""
def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data.
Args:
data: A batched Dataset object, or a NumPy array, or an EagerTensor.
Data to be iterated over to adapt the state of the layers in this
preprocessing stage.
reset_state: Whether this call to `adapt` should reset the state of
the layers in this preprocessing stage.
"""
if not isinstance(data,
(dataset_ops.DatasetV2, np.ndarray, ops.EagerTensor)):
raise ValueError(
'`adapt()` requires a batched Dataset, an EagerTensor, '
'or a Numpy array as input, '
'got {}'.format(type(data)))
if isinstance(data, dataset_ops.DatasetV2):
# Validate the datasets to try and ensure we haven't been passed one with
# infinite size. That would cause an infinite loop here.
if tf_utils.dataset_is_infinite(data):
raise ValueError(
'The dataset passed to `adapt()` has an infinite number of '
'elements. Please use dataset.take(...) to make the number '
'of elements finite.')
for current_layer_index in range(0, len(self.layers)):
if not hasattr(self.layers[current_layer_index], 'adapt'):
# Skip any layer that does not need adapting.
continue
def map_fn(x):
"""Maps `PreprocessingStage` inputs to inputs at `current_layer_index`.
Args:
x: Batch of inputs seen in entry of the `PreprocessingStage` instance.
Returns:
Batch of inputs to be processed by layer
`self.layers[current_layer_index]`
"""
if current_layer_index == 0: # pylint: disable=cell-var-from-loop
return x
for i in range(current_layer_index): # pylint: disable=cell-var-from-loop
x = self.layers[i](x)
return x
if isinstance(data, dataset_ops.DatasetV2):
current_layer_data = data.map(map_fn)
else:
current_layer_data = map_fn(data)
self.layers[current_layer_index].adapt(current_layer_data,
reset_state=reset_state)
# Functional methods shoud take precedence.
class FunctionalPreprocessingStage(functional.Functional,
base_preprocessing_layer.PreprocessingLayer):
"""A functional preprocessing stage.
This preprocessing stage wraps a graph of preprocessing layers into a
Functional-like object that enables you to `adapt()` the whole graph via
a single `adapt()` call on the preprocessing stage.
Preprocessing stage is not a complete model, so it cannot be called with
`fit()`. However, it is possible to add regular layers that may be trainable
to a preprocessing stage.
A functional preprocessing stage is created in the same way as `Functional`
models. A stage can be instantiated by passing two arguments to
`__init__`. The first argument is the `keras.Input` Tensors that represent
the inputs to the stage. The second argument specifies the output
tensors that represent the outputs of this stage. Both arguments can be a
nested structure of tensors.
Example:
>>> inputs = {'x2': tf.keras.Input(shape=(5,)),
... 'x1': tf.keras.Input(shape=(1,))}
>>> norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
>>> y = norm_layer(inputs['x2'])
>>> y, z = tf.keras.layers.Lambda(lambda x: (x, x))(inputs['x1'])
>>> outputs = [inputs['x1'], [y, z]]
>>> stage = FunctionalPreprocessingStage(inputs, outputs)
Args:
inputs: An input tensor (must be created via `tf.keras.Input()`), or a list,
a dict, or a nested strcture of input tensors.
outputs: An output tensor, or a list, a dict or a nested structure of output
tensors.
name: String, optional. Name of the preprocessing stage.
"""
def fit(self, *args, **kwargs):
raise ValueError(
'Preprocessing stage is not a complete model, and hence should not be '
'`fit`. Instead, you may feed data to `adapt` the stage to set '
'appropriate states of the layers in the stage.')
def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data.
Args:
data: A batched Dataset object, a NumPy array, an EagerTensor, or a list,
dict or nested structure of Numpy Arrays or EagerTensors. The elements
of Dataset object need to conform with inputs of the stage. The first
dimension of NumPy arrays or EagerTensors are understood to be batch
dimension. Data to be iterated over to adapt the state of the layers in
this preprocessing stage.
reset_state: Whether this call to `adapt` should reset the state of the
layers in this preprocessing stage.
Examples:
>>> # For a stage with dict input
>>> inputs = {'x2': tf.keras.Input(shape=(5,)),
... 'x1': tf.keras.Input(shape=(1,))}
>>> outputs = [inputs['x1'], inputs['x2']]
>>> stage = FunctionalPreprocessingStage(inputs, outputs)
>>> ds = tf.data.Dataset.from_tensor_slices({'x1': tf.ones((4,5)),
... 'x2': tf.ones((4,1))})
>>> sorted(ds.element_spec.items()) # Check element_spec
[('x1', TensorSpec(shape=(5,), dtype=tf.float32, name=None)),
('x2', TensorSpec(shape=(1,), dtype=tf.float32, name=None))]
>>> stage.adapt(ds)
>>> data_np = {'x1': np.ones((4, 5)), 'x2': np.ones((4, 1))}
>>> stage.adapt(data_np)
"""
if not isinstance(data, dataset_ops.Dataset):
data = self._flatten_to_reference_inputs(data)
if any(not isinstance(datum, (np.ndarray, ops.EagerTensor))
for datum in data):
raise ValueError(
'`adapt()` requires a batched Dataset, a list of EagerTensors '
'or Numpy arrays as input, got {}'.format(type(data)))
ds_input = [
dataset_ops.Dataset.from_tensor_slices(x).batch(1) for x in data
]
if isinstance(data, dataset_ops.Dataset):
# Validate the datasets to try and ensure we haven't been passed one with
# infinite size. That would cause an infinite loop here.
if tf_utils.dataset_is_infinite(data):
raise ValueError(
'The dataset passed to `adapt()` has an infinite number of '
'elements. Please use dataset.take(...) to make the number '
'of elements finite.')
# Unzip dataset object to a list of single input dataset.
ds_input = _unzip_dataset(data)
# Dictionary mapping reference tensors to datasets
ds_dict = {}
tensor_usage_count = self._tensor_usage_count
for x, y in zip(self.inputs, ds_input):
x_id = str(id(x))
ds_dict[x_id] = [y] * tensor_usage_count[x_id]
nodes_by_depth = self._nodes_by_depth
depth_keys = sorted(nodes_by_depth.keys(), reverse=True)
def build_map_fn(node, args, kwargs):
if not isinstance(args.element_spec, tuple):
def map_fn(*x):
return nest.flatten(node.layer(*x, **kwargs))
else:
def map_fn(*x):
return nest.flatten(node.layer(x, **kwargs))
return map_fn
for depth in depth_keys:
for node in nodes_by_depth[depth]:
# Input node
if node.is_input:
continue
# Node with input not computed yet
if any(t_id not in ds_dict for t_id in node.flat_input_ids):
continue
args, kwargs = node.map_arguments(ds_dict)
args = dataset_ops.Dataset.zip(nest.list_to_tuple(*args))
if hasattr(node.layer, 'adapt'):
node.layer.adapt(args, reset_state=reset_state)
map_fn = build_map_fn(node, args, kwargs)
outputs = args.map(map_fn)
outputs = _unzip_dataset(outputs)
# Update ds_dict.
for x_id, y in zip(node.flat_output_ids, outputs):
ds_dict[x_id] = [y] * tensor_usage_count[x_id]
def _unzip_dataset(ds):
"""Unzip dataset into a list of single element datasets.
Args:
ds: A Dataset object.
Returns:
A list of Dataset object, each correspond to one of the `element_spec` of
the input Dataset object.
Example:
>>> ds1 = tf.data.Dataset.from_tensor_slices([1, 2, 3])
>>> ds2 = tf.data.Dataset.from_tensor_slices([4, 5, 6])
>>> ds_zipped_tuple = tf.data.Dataset.zip((ds1, ds2))
>>> ds_unzipped_tuple = _unzip_dataset(ds_zipped_tuple)
>>> ds_zipped_dict = tf.data.Dataset.zip({'ds1': ds1, 'ds2': ds2})
>>> ds_unzipped_dict = _unzip_dataset(ds_zipped_dict)
Then the two elements of `ds_unzipped_tuple` and `ds_unzipped_dict` are both
the same as `ds1` and `ds2`.
"""
element_count = len(nest.flatten(ds.element_spec))
ds_unzipped = []
for i in range(element_count):
def map_fn(*x, j=i):
return nest.flatten(x)[j]
ds_unzipped.append(ds.map(map_fn))
return ds_unzipped
|
{
"content_hash": "9045679073f8f856bd82fe22a8a060a5",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 82,
"avg_line_length": 38.87644787644788,
"alnum_prop": 0.6484258615552686,
"repo_name": "petewarden/tensorflow",
"id": "cf6028018a3125842fe12cc07bd2c6617c4d6028",
"size": "10758",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import sys
class ProductKey:
def __init__(self, fileName = None):
self.keyFile = None
if (fileName is not None):
self.Open(fileName)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Close()
def Open(self, fileName):
if (self.keyFile is not None):
raise Exception("File already opened")
self.keyFile = open(fileName, mode='rb')
def Close(self):
if (self.keyFile is not None):
self.keyFile.close()
def Decode(self, bytes):
rpk = list(bytes)
rpkOffset = 52
i = 28
szPossibleChars = "BCDFGHJKMPQRTVWXY2346789"
szProductKey = ""
while i >= 0:
dwAccumulator = 0
j = 14
while j >= 0:
dwAccumulator = dwAccumulator * 256
d = rpk[j+rpkOffset]
if isinstance(d, str):
d = ord(d)
dwAccumulator = d + dwAccumulator
if sys.version_info[0] >= 3:
rpk[j+rpkOffset] = (dwAccumulator // 24) if (dwAccumulator // 24) <= 255 else 255
else:
rpk[j+rpkOffset] = (dwAccumulator / 24) if (dwAccumulator / 24) <= 255 else 255
dwAccumulator = dwAccumulator % 24
j = j - 1
i = i - 1
szProductKey = szPossibleChars[dwAccumulator] + szProductKey
if ((29 - i) % 6) == 0 and i != -1:
i = i - 1
szProductKey = "-" + szProductKey
return szProductKey
def DecodeFromFile(self):
return self.Decode(self.keyFile.read())
|
{
"content_hash": "7a32033d0fb4514b92ee07d6db16554a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 97,
"avg_line_length": 29.892857142857142,
"alnum_prop": 0.5125448028673835,
"repo_name": "andry81/contools",
"id": "151ea6228ac1be23d7b70062a25e20225596c662",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "Scripts/Tools/admin/ProductKey/ProductKey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "959036"
},
{
"name": "C",
"bytes": "7239"
},
{
"name": "C++",
"bytes": "1047092"
},
{
"name": "HTML",
"bytes": "2134"
},
{
"name": "JavaScript",
"bytes": "11776"
},
{
"name": "Perl",
"bytes": "20305"
},
{
"name": "Python",
"bytes": "18433"
},
{
"name": "Shell",
"bytes": "445345"
},
{
"name": "Smarty",
"bytes": "80952"
},
{
"name": "Tcl",
"bytes": "1519"
},
{
"name": "VBA",
"bytes": "635"
},
{
"name": "VBScript",
"bytes": "93894"
},
{
"name": "XSLT",
"bytes": "4303"
},
{
"name": "sed",
"bytes": "6718"
}
],
"symlink_target": ""
}
|
import sys
# [START storage_get_requester_pays_status]
from google.cloud import storage
def get_requester_pays_status(bucket_name):
"""Get a bucket's requester pays metadata"""
# bucket_name = "my-bucket"
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
requester_pays_status = bucket.requester_pays
if requester_pays_status:
print(f"Requester Pays is enabled for {bucket_name}")
else:
print(f"Requester Pays is disabled for {bucket_name}")
# [END storage_get_requester_pays_status]
if __name__ == "__main__":
get_requester_pays_status(bucket_name=sys.argv[1])
|
{
"content_hash": "802e284a941e5674d51c54c8cc94543f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 27.083333333333332,
"alnum_prop": 0.6892307692307692,
"repo_name": "googleapis/python-storage",
"id": "a2eeb34d70f837c5d8cd4bacdd9d723d612f484d",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/storage_get_requester_pays_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1392987"
},
{
"name": "Shell",
"bytes": "32171"
}
],
"symlink_target": ""
}
|
"""Remote control support for Bravia TV."""
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
from homeassistant.components.remote import ATTR_NUM_REPEATS, RemoteEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import BraviaTVCoordinator
from .const import ATTR_MANUFACTURER, DEFAULT_NAME, DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Bravia TV Remote from a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
unique_id = config_entry.unique_id
assert unique_id is not None
device_info: DeviceInfo = {
"identifiers": {(DOMAIN, unique_id)},
"name": DEFAULT_NAME,
"manufacturer": ATTR_MANUFACTURER,
"model": config_entry.title,
}
async_add_entities(
[BraviaTVRemote(coordinator, DEFAULT_NAME, unique_id, device_info)]
)
class BraviaTVRemote(CoordinatorEntity, RemoteEntity):
"""Representation of a Bravia TV Remote."""
coordinator: BraviaTVCoordinator
def __init__(
self,
coordinator: BraviaTVCoordinator,
name: str,
unique_id: str,
device_info: DeviceInfo,
) -> None:
"""Initialize the entity."""
self._attr_device_info = device_info
self._attr_name = name
self._attr_unique_id = unique_id
super().__init__(coordinator)
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self.coordinator.is_on
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
await self.coordinator.async_turn_on()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
await self.coordinator.async_turn_off()
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send a command to device."""
repeats = kwargs[ATTR_NUM_REPEATS]
await self.coordinator.async_send_command(command, repeats)
|
{
"content_hash": "6b0e6b8f7dfe9cb72ea803bc6c86da19",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 86,
"avg_line_length": 31.342105263157894,
"alnum_prop": 0.6763224181360201,
"repo_name": "Danielhiversen/home-assistant",
"id": "81761240320de655d4356c2925c7f666eba271c7",
"size": "2382",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/braviatv/remote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import pytest
import libqtile.config
from libqtile import layout
from test.conftest import no_xinerama
from test.layouts.layout_utils import (
assert_dimensions,
assert_focus_path,
assert_focused,
)
class ZoomyConfig:
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
]
layouts = [
layout.Zoomy(columnwidth=200),
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
screens = []
def zoomy_config(x):
return no_xinerama(pytest.mark.parametrize("qtile", [ZoomyConfig], indirect=True)(x))
@zoomy_config
def test_zoomy_one(qtile):
qtile.test_window('one')
assert_dimensions(qtile, 0, 0, 600, 600)
qtile.test_window('two')
assert_dimensions(qtile, 0, 0, 600, 600)
qtile.test_window('three')
assert_dimensions(qtile, 0, 0, 600, 600)
assert_focus_path(qtile, 'two', 'one', 'three')
# TODO(pc) find a way to check size of inactive windows
@zoomy_config
def test_zoomy_window_focus_cycle(qtile):
# setup 3 tiled and two floating clients
qtile.test_window("one")
qtile.test_window("two")
qtile.test_window("float1")
qtile.c.window.toggle_floating()
qtile.test_window("float2")
qtile.c.window.toggle_floating()
qtile.test_window("three")
# test preconditions, Zoomy adds clients at head
assert qtile.c.layout.info()['clients'] == ['three', 'two', 'one']
# last added window has focus
assert_focused(qtile, "three")
# assert window focus cycle, according to order in layout
assert_focus_path(qtile, 'two', 'one', 'float1', 'float2', 'three')
|
{
"content_hash": "90443b3519bc4d2957d1658bb7cf3576",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 89,
"avg_line_length": 27.0327868852459,
"alnum_prop": 0.6622195269860521,
"repo_name": "zordsdavini/qtile",
"id": "849fa53bf6b0c86ed1d817011e3638962e4a897e",
"size": "2987",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/layouts/test_zoomy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "899"
},
{
"name": "Python",
"bytes": "1275265"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "8160"
}
],
"symlink_target": ""
}
|
import sys, os, threading, traceback
import types
from collections.abc import Mapping
from . import envs
from .thread_state import thread_local
from .values import McInvalidValue
from .config_errors import InvalidUsageException, ConfigExcludedAttributeError, ConfigApiException
from .bases import get_bases
from .attribute import Where, _McAttributeAccessor
from .repeatable import RepeatableDict
_calculated_value = ' #calculated'
_static_value = ' #static'
_dynamic_value = ' #dynamic'
_property_method_value_hidden = '@property method value - call disabled'
_mc_filter_out_keys = ('env', 'env_factory', 'contained_in', 'root_conf', 'attributes', 'mc_config_result', 'num_invalid_property_usage', 'named_as')
_mc_hidden_if_not_true = ('mc_is_default_value_item',)
_mc_show_if_names_only = ('mc_is_default_value_item',)
def _class_tuple(obj, obj_info=""):
return {'__class__': obj.__class__.__name__ + obj_info}
def _attr_ref_msg(obj, attr_name):
try:
return attr_name + ": " + repr(getattr(obj, attr_name))
except ConfigExcludedAttributeError as ex:
return attr_name + ": " + ex.value if ex.value else ''
except AttributeError:
return ''
except ConfigApiException:
return attr_name + ": 'NO-CURRENT-VALUE"
def ref_id(obj):
try:
return obj.ref_id_for_json()
except (AttributeError, TypeError):
return id(obj)
def _mc_identification_msg_str(objval):
"""Generate a string which may help to identify an item which is not itself being dumped"""
name_msg = _attr_ref_msg(objval, 'name')
id_msg = _attr_ref_msg(objval, 'id')
additionl_ref_info_msg = ''.join([', ' + msg for msg in (id_msg, name_msg) if msg])
cls_msg = repr(type(objval)) if objval else repr(objval)
return cls_msg + additionl_ref_info_msg
class ConfigItemEncoder():
recursion_check = threading.local()
recursion_check.in_default = None
def __init__(self, filter_callable, fallback_callable, compact, sort_attributes, property_methods, with_item_types, warn_nesting,
multiconf_base_type, multiconf_property_wrapper_type, show_all_envs, depth, persistent_ids):
"""Encoder for json.
Check the :meth:`~multiconf.ConfigItem.json` and :meth:`~multiconf.ConfigItem.mc_build` methods for public arguments passed on to this.
Arguments:
multiconf_base_type, multiconf_property_wrapper_type (type): Passed as arguments as a workaround for cyclic imports.
"""
self.user_filter_callable = filter_callable
self.user_fallback_callable = fallback_callable
self.compact = compact
self.sort_attributes = sort_attributes
self.property_methods = property_methods
self.with_item_types = with_item_types
self.multiconf_base_type = multiconf_base_type
self.multiconf_property_wrapper_type = multiconf_property_wrapper_type
self.seen = {}
self.start_obj = None
self.num_errors = 0
self.num_invalid_usages = 0
self.show_all_envs = show_all_envs
self.depth = depth
self.start_depth = None
self.current_depth = None
self.persistent_ids = persistent_ids
if warn_nesting != None:
ConfigItemEncoder.recursion_check.warn_nesting = warn_nesting
else:
ConfigItemEncoder.recursion_check.warn_nesting = str(os.environ.get('MULTICONF_WARN_JSON_NESTING')).lower() == 'true'
def ref_repr(self, obj):
if self.persistent_ids:
# This will not identify the object, but it gives an indication
return _mc_identification_msg_str(obj)
return ref_id(obj)
def safe_repr(self, obj):
"""This catches exceptions from calling repr(obj) and embeds the message in the returned str."""
try:
return repr(obj)
except Exception as ex:
traceback.print_exception(*sys.exc_info())
self.num_errors += 1
msg = "Error gettting repr of obj, type: {ot}, exception: {extyp}: {exmsg}".format(ot=type(obj), extyp=type(ex).__name__, exmsg=str(ex))
print(msg, file=sys.stderr)
return msg
def _mc_class_dict(self, obj):
not_frozen_msg = "" if obj._mc_where == Where.FROZEN else ", not-frozen"
if self.compact:
msg = " #as: '" + obj.named_as() + "', id: " + str(self.ref_repr(obj)) + not_frozen_msg
return _class_tuple(obj, msg)
return {**_class_tuple(obj, not_frozen_msg), '__id__': self.ref_repr(obj)}
def _ref_item_str(self, objval):
excl = ' excluded' if not objval else ''
if isinstance(objval, self.with_item_types):
return excl + objval.ref_type_info_for_json() + ", id: " + str(self.ref_repr(objval))
try:
ref_type_info = objval.ref_type_info_for_json()
except AttributeError:
ref_type_info = ''
return excl + ref_type_info + " " + _mc_identification_msg_str(objval)
def _ref_earlier_str(self, objval):
return "#ref" + self._ref_item_str(objval)
def _ref_later_str(self, objval):
return "#ref later" + self._ref_item_str(objval)
def _ref_self_str(self, objval):
return "#ref self" + self._ref_item_str(objval)
def _ref_outside_str(self, objval):
# A reference to an item which is outside of the currently dumped hierarchy.
# Showing self.ref_repr(obj) does not help here as the object is not dumped, instead try to show some attributes which may identify the object
return "#ref outside: " + _mc_identification_msg_str(objval)
def _ref_mc_item_str(self, objval):
if ref_id(objval) in self.seen:
return self._ref_earlier_str(objval)
return self._ref_later_str(objval)
def _check_nesting(self, obj, child_obj):
# Returns child_obj or reference info string
# Check that object being dumped is actually contained in self
# We dont want to display an outer/sibling object as nested under an inner object
# Check for reference to parent or sibling object (in case we dump from a lower level than root)
if child_obj is obj:
return self._ref_self_str(child_obj)
if self.seen.get(ref_id(child_obj)):
return self._ref_earlier_str(child_obj)
if isinstance(child_obj, self.multiconf_base_type):
contained_in = child_obj._mc_contained_in
while contained_in is not None:
if contained_in is self.start_obj:
# We know we are referencing a later object, because it was not in 'seen'
return self._ref_later_str(child_obj)
contained_in = contained_in._mc_contained_in
# We found a reference to an item which is outside of the currently dumped hierarchy
return self._ref_outside_str(child_obj)
return child_obj
def _handle_one_attr_one_env(self, obj, key, mc_attr, env, attributes_overriding_property, dir_entries, names_only):
attr_inf = []
try:
val = mc_attr.env_values[env]
if key in dir_entries:
attributes_overriding_property.add(key)
attr_inf = [(' #overrides @property', True)]
except KeyError as ex:
# mc_attribute overriding @property OR the value for env has not yet been set
try:
val = obj.getattr(key, env)
attr_inf = [(' #value for {env} provided by @property'.format(env=env), True)]
except AttributeError:
val = McInvalidValue.MC_NO_VALUE
if self.user_filter_callable:
try:
key, val = self.user_filter_callable(obj, key, val)
if key is False:
return key, []
except Exception as ex:
self.num_errors += 1
traceback.print_exception(*sys.exc_info())
attr_inf.append((' #json_error calling filter', self.safe_repr(ex)),)
val = self._check_nesting(obj, val)
if val == McInvalidValue.MC_NO_VALUE:
return key, [(' #no value for {env}'.format(env=env), True)]
if isinstance(val, Mapping):
new_val = {}
for inner_key, maybeitem in val.items():
if not isinstance(maybeitem, self.multiconf_base_type):
new_val[str(inner_key)] = maybeitem
continue
new_val[inner_key] = self._ref_mc_item_str(maybeitem)
return key, [('', new_val)] + attr_inf
try:
iterable = iter(val)
# TODO?: Include type of iterable in json meta info
except TypeError:
return key, [('', val)] + attr_inf
if isinstance(val, str):
return key, [('', val)] + attr_inf
new_val = []
for maybeitem in val:
if not isinstance(maybeitem, self.multiconf_base_type):
new_val.append(maybeitem)
continue
new_val.append(self._ref_mc_item_str(maybeitem))
return key, [('', new_val)] + attr_inf
def _handle_one_dir_entry_one_env(self, obj, key, _val, env, attributes_overriding_property, _dir_entries, names_only):
if key.startswith('_') or isinstance(obj.__dict__.get(key, None), (self.multiconf_base_type, RepeatableDict)) or key in _mc_filter_out_keys:
return key, ()
overridden_property = ''
if key in attributes_overriding_property:
overridden_property = ' #overridden @property'
# Figure out if the attribute is a @property or a static value
try:
# If proxy object then get proxied object, the access to __class__ does not work through the proxy
real_obj = object.__getattribute__(obj, '_mc_proxied_item')
except AttributeError:
real_obj = obj
for cls in get_bases(object.__getattribute__(real_obj, '__class__')):
try:
real_attr = object.__getattribute__(cls, key)
except AttributeError:
continue
if isinstance(real_attr, (property, self.multiconf_property_wrapper_type)):
if key in _mc_hidden_if_not_true and not getattr(obj, key):
return key, ()
calc_or_static = _calculated_value
if names_only and key not in _mc_show_if_names_only:
val = _property_method_value_hidden
break
orig_env = thread_local.env
try:
thread_local.env = env
if isinstance(real_attr, self.multiconf_property_wrapper_type):
val = real_attr.prop.__get__(obj, type(obj))
break
val = getattr(obj, key)
except InvalidUsageException as ex:
self.num_invalid_usages += 1
return key, [(overridden_property + ' #invalid usage context', self.safe_repr(ex))]
except Exception as ex:
self.num_errors += 1
traceback.print_exception(*sys.exc_info())
return key, [(overridden_property + ' #json_error trying to handle property method', self.safe_repr(ex))]
finally:
thread_local.env = orig_env
break
elif not (hasattr(real_attr, '__call__') or hasattr(real_attr, '__func__')):
calc_or_static = _static_value
val = real_attr
break
elif isinstance(real_attr, type):
calc_or_static = ''
val = real_attr
break
else:
# Ignore methods
return key, ()
property_inf = []
if self.user_filter_callable:
try:
key, val = self.user_filter_callable(obj, key, val)
if key is False:
return key, []
except Exception as ex:
self.num_errors += 1
traceback.print_exception(*sys.exc_info())
property_inf = [(' #json_error calling filter', self.safe_repr(ex))]
if type(val) == type:
return key, [(overridden_property, self.safe_repr(val))] + property_inf
val = self._check_nesting(obj, val)
if isinstance(val, (str, int, float)):
if overridden_property:
return key, [(overridden_property + calc_or_static + ' value was', val)] + property_inf
if self.compact:
return key, [('', (str(val).lower() if isinstance(val, bool) else str(val)) + calc_or_static)] + property_inf
return key, [('', val), (calc_or_static, True)] + property_inf
if isinstance(val, (list, tuple)):
new_list = []
for item in val:
new_list.append(self._check_nesting(obj, item))
return key, [(overridden_property, new_list), (calc_or_static, True)] + property_inf
if isinstance(val, Mapping):
new_dict = {}
for item_key, item in val.items():
new_dict[item_key] = self._check_nesting(obj, item)
return key, [(overridden_property, new_dict), (calc_or_static, True)] + property_inf
return key, [(overridden_property, val), (calc_or_static, True)] + property_inf
def _handle_one_value_multiple_envs(
self, dd, obj, attr_key, attr_val, env, attributes_overriding_property, dir_entries, one_env_func, multi_value_meta_inf, names_only):
if not self.show_all_envs:
attr_key, property_inf = one_env_func(obj, attr_key, attr_val, env, attributes_overriding_property, dir_entries, names_only)
for meta_key, val in property_inf:
dd[attr_key + meta_key] = val
return
env_values = {}
prev_key_property_inf = None
multiple_values = False
for env in obj.env_factory.envs.values():
key_property_inf = one_env_func(obj, attr_key, attr_val, env, attributes_overriding_property, dir_entries, names_only)
if key_property_inf != prev_key_property_inf:
if prev_key_property_inf is not None:
multiple_values = True
prev_key_property_inf = key_property_inf
attr_key, property_inf = key_property_inf
for meta_key, val in property_inf:
env_values[env.name + meta_key] = val
if env_values and multiple_values:
dd[attr_key + multi_value_meta_inf] = True
dd[attr_key] = env_values
return
for meta_key, val in property_inf:
dd[attr_key + meta_key] = val
def __call__(self, obj):
property_methods_orig = self.property_methods
if ConfigItemEncoder.recursion_check.in_default:
in_default = ConfigItemEncoder.recursion_check.in_default
ConfigItemEncoder.recursion_check.in_default = None
self.property_methods = False
if self.recursion_check.warn_nesting:
print("Warning: Nested json calls, disabling @property method value dump:", file=sys.stderr)
print("outer object type:", type(in_default), file=sys.stderr)
print("inner object type:", self.safe_repr(type(obj)) + ", inner obj:", obj.json(compact=True, property_methods=False), file=sys.stderr)
try:
ConfigItemEncoder.recursion_check.in_default = obj
if self.seen.get(ref_id(obj)) and obj is not self.start_obj.env:
return self._ref_earlier_str(obj)
self.seen[ref_id(obj)] = obj
if isinstance(obj, self.multiconf_base_type):
if self.depth is not None:
if self.start_depth is None:
self.start_depth = 0
contained_in = obj
while contained_in is not None:
self.start_depth += 1
contained_in = contained_in.contained_in
self.current_depth = 0
contained_in = obj
while contained_in is not None:
self.current_depth += 1
contained_in = contained_in.contained_in
self.current_depth = self.current_depth - self.start_depth + 1
# Handle ConfigItems", type(obj)
dd = self._mc_class_dict(obj)
if not self.start_obj:
self.start_obj = obj
# Put 'env' once on the first object
dd['env'] = obj.env
if self.show_all_envs:
not_in_envs = [str(env) for env in obj.env_factory.envs.values() if not obj._mc_exists_in_given_env(env)]
if not_in_envs:
dd["#item does not exist in"] = ', '.join(not_in_envs)
# --- Handle attributes ---
attributes_overriding_property = set()
if self.sort_attributes:
attr_dict = {}
else:
attr_dict = dd
dir_entries = obj._mc_cls_dir_entries
for attr_key, mc_attr in obj._mc_attributes.items():
self._handle_one_value_multiple_envs(
attr_dict, obj, attr_key, mc_attr, obj.env, attributes_overriding_property, dir_entries, self._handle_one_attr_one_env,
' #multiconf attribute', names_only=False)
if self.sort_attributes:
for key in sorted(attr_dict):
dd[key] = attr_dict[key]
# --- Handle child items ---
for key, item in obj.items(with_types=self.with_item_types, with_excluded=True):
if self.current_depth is not None:
if self.current_depth >= self.depth:
dd[key] = _mc_identification_msg_str(item)
continue
if self.current_depth == self.depth -1 and isinstance(item, RepeatableDict):
shallow_item = {}
for child_key, child_item in item.items():
shallow_item[child_key] = _mc_identification_msg_str(child_item)
dd[key] = shallow_item
continue
if not item and isinstance(item, self.multiconf_base_type):
if self.compact:
dd[key] = 'false #' + repr(item)
continue
dd[key] = False
dd[key + ' #' + repr(item)] = True
continue
dd[key] = item
if self.property_methods is False:
# Note: also excludes class/static members
return dd
# --- Handle results from dir() call ---
if self.sort_attributes:
property_dict = {}
else:
property_dict = dd
for attr_key in dir_entries:
self._handle_one_value_multiple_envs(
property_dict, obj, attr_key, None, obj.env, attributes_overriding_property, None, self._handle_one_dir_entry_one_env,
' #multiconf env specific @property', names_only=self.property_methods is None)
if self.sort_attributes:
for key in sorted(property_dict):
dd[key] = property_dict[key]
# --- End handle ConfigItem ---
return dd
if isinstance(obj, envs.BaseEnv):
# print "# Handle Env objects", type(obj)
dd = _class_tuple(obj)
dd['name'] = obj.name
return dd
if type(obj) == type:
return self.safe_repr(obj)
# If obj defines json_equivalent, then return the result of that
if hasattr(obj, "json_equivalent"):
try:
return obj.json_equivalent()
except Exception as ex:
self.num_errors += 1
traceback.print_exception(*sys.exc_info())
return "__json_error__ calling 'json_equivalent': " + self.safe_repr(ex)
try:
iterable = iter(obj)
except TypeError:
pass
else:
# print("Handle iterable objects", type(obj))
return list(iterable)
if self.user_fallback_callable:
obj, handled = self.user_fallback_callable(obj)
if handled:
return obj
self.num_errors += 1
return "__json_error__ # don't know how to handle obj of type: " + self.safe_repr(type(obj))
finally:
self.property_methods = property_methods_orig
ConfigItemEncoder.recursion_check.in_default = None
|
{
"content_hash": "dd77bcbf31f7dd33adf855e914d98753",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 152,
"avg_line_length": 42.11614173228347,
"alnum_prop": 0.5532601075017527,
"repo_name": "lhupfeldt/multiconf",
"id": "c7ead51c242aef4dfd16d5643308e53a3d30844d",
"size": "21526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multiconf/json_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "31"
},
{
"name": "Python",
"bytes": "760680"
}
],
"symlink_target": ""
}
|
__author__ = "@jframos"
import behave
from behave import step
from hamcrest import assert_that, is_
from qautils.dataset.dataset_utils import DatasetUtils
from commons.step_helpers import send_context_notification_step_helper
from commons.constants import IMPLICIT_WAIT_AFTER_NOTIFICATION
import time
behave.use_step_matcher("re")
_dataset_utils = DatasetUtils()
@step(u'the configured tenant-id is registered in CLOTO component')
@step(u'the main tenant-id configured is registered in CLOTO component')
def tenant_id_is_registered_in_cloto(context):
context.tenant_id_facts = context.tenant_id
print ("> A GET request is executed to CLOTO component, to init all data about that main tenant in its system.")
_, response = context.cloto_client.get_tenant_id_resource_client().get_tenant_id(context.tenant_id_facts)
assert_that(response.ok,
"TenantId '{}' for testing cannot be retrieved from CLOTO: Message: {}".format(context.tenant_id_facts,
response.text))
@step(u'a no registered Tentand-Id in CLOTO component "(?P<tenant_id>.*)"')
def tenant_id_is_not_registered_in_cloto(context, tenant_id):
context.tenant_id_facts = tenant_id
@step(u'the context notification has default context elements')
def the_context_notification_has_default_context_elements(context):
# Default parameter for the Context Notification request.
context.context_elements.update({'isPattern': 'false'})
context.context_elements.update({'type': 'vm'})
context.context_elements.update({'id': 'myServerId'})
@step(u'the context notification has these context elements')
def the_context_notification_has_these_context_elements(context):
# Prepare table data
context.context_elements = dict()
for element in context.table.rows:
data = element.as_dict()
data = _dataset_utils.generate_fixed_length_params(data)
data = _dataset_utils.remove_missing_params(data)
context.context_elements.update(data)
@step(u'the following notifications are received for "(?P<server_id>.*)" with values')
@step(u'a context notification is received for "(?P<server_id>.*)" with values')
@step(u'the following notifications are received for "(?P<server_id>.*)" and main tenant-id with values')
@step(u'a context notification is received for "(?P<server_id>.*)" and main tenant-id with values')
def a_context_update_is_received(context, server_id):
send_context_notification_step_helper(context, context.tenant_id_facts, server_id)
# Implicit Wait. We need to wait for facts processing after sending context notifications.
time.sleep(IMPLICIT_WAIT_AFTER_NOTIFICATION)
@step(u'the context is updated')
def the_context_is_updated(context):
assert_that(context.response.ok,
"Response to CB notification is not the expected one: Message: {}".format(context.response.text))
|
{
"content_hash": "0e8e44e73732e6e5ef1588b55b8a1f8a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 119,
"avg_line_length": 41.15277777777778,
"alnum_prop": 0.7124535943300708,
"repo_name": "Fiware/cloud.Facts",
"id": "79716bb4ff2a95c8681cf83e542a5f590f023371",
"size": "3757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/acceptance/features/component/steps/context_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "12835"
},
{
"name": "Cucumber",
"bytes": "33954"
},
{
"name": "Python",
"bytes": "167098"
},
{
"name": "Ruby",
"bytes": "1891"
},
{
"name": "Shell",
"bytes": "6076"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from collections import defaultdict, namedtuple
from datetime import timedelta
import six
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils import timezone
from sentry import tsdb
from sentry.api.serializers import Serializer, register, serialize
from sentry.constants import LOG_LEVELS
from sentry.models import (
Group, GroupAssignee, GroupBookmark, GroupMeta, GroupResolution,
GroupResolutionStatus, GroupSeen, GroupSnooze, GroupStatus,
GroupSubscription, GroupSubscriptionReason, GroupTagKey, UserOption,
UserOptionValue
)
from sentry.utils.db import attach_foreignkey
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
SUBSCRIPTION_REASON_MAP = {
GroupSubscriptionReason.comment: 'commented',
GroupSubscriptionReason.assigned: 'assigned',
GroupSubscriptionReason.bookmark: 'bookmarked',
GroupSubscriptionReason.status_change: 'changed_status',
}
@register(Group)
class GroupSerializer(Serializer):
def _get_subscriptions(self, item_list, user):
"""
Returns a mapping of group IDs to a two-tuple of (subscribed: bool,
subscription: GroupSubscription or None) for the provided user and
groups.
"""
results = {group.id: None for group in item_list}
# First, the easy part -- if there is a subscription record associated
# with the group, we can just use that to know if a user is subscribed
# or not.
subscriptions = GroupSubscription.objects.filter(
group__in=results.keys(),
user=user,
)
for subscription in subscriptions:
results[subscription.group_id] = (subscription.is_active, subscription)
# For any group that doesn't have a subscription associated with it,
# we'll need to fall back to the project's option value, so here we
# collect all of the projects to look up, and keep a set of groups that
# are part of that project. (Note that the common -- but not only --
# case here is that all groups are part of the same project.)
projects = defaultdict(set)
for group in item_list:
if results[group.id] is None:
projects[group.project].add(group.id)
if projects:
# NOTE: This doesn't use `values_list` because that bypasses field
# value decoding, so the `value` field would not be unpickled.
options = {
option.project_id: option.value
for option in
UserOption.objects.filter(
Q(project__in=projects.keys()) | Q(project__isnull=True),
user=user,
key='workflow:notifications',
)
}
# This is the user's default value for any projects that don't have
# the option value specifically recorded. (The default "all
# conversations" value is convention.)
default = options.get(None, UserOptionValue.all_conversations)
# If you're subscribed to all notifications for the project, that
# means you're subscribed to all of the groups. Otherwise you're
# not subscribed to any of these leftover groups.
for project, group_ids in projects.items():
is_subscribed = options.get(
project.id,
default,
) == UserOptionValue.all_conversations
for group_id in group_ids:
results[group_id] = (is_subscribed, None)
return results
def get_attrs(self, item_list, user):
from sentry.plugins import plugins
GroupMeta.objects.populate_cache(item_list)
attach_foreignkey(item_list, Group.project)
if user.is_authenticated() and item_list:
bookmarks = set(GroupBookmark.objects.filter(
user=user,
group__in=item_list,
).values_list('group_id', flat=True))
seen_groups = dict(GroupSeen.objects.filter(
user=user,
group__in=item_list,
).values_list('group_id', 'last_seen'))
subscriptions = self._get_subscriptions(item_list, user)
else:
bookmarks = set()
seen_groups = {}
subscriptions = defaultdict(lambda: (False, None))
assignees = dict(
(a.group_id, a.user)
for a in GroupAssignee.objects.filter(
group__in=item_list,
).select_related('user')
)
user_counts = dict(
GroupTagKey.objects.filter(
group__in=item_list,
key='sentry:user',
).values_list('group', 'values_seen')
)
ignore_durations = dict(
GroupSnooze.objects.filter(
group__in=item_list,
).values_list('group', 'until')
)
pending_resolutions = dict(
GroupResolution.objects.filter(
group__in=item_list,
status=GroupResolutionStatus.PENDING,
).values_list('group', 'release')
)
result = {}
for item in item_list:
active_date = item.active_at or item.first_seen
annotations = []
for plugin in plugins.for_project(project=item.project, version=1):
safe_execute(plugin.tags, None, item, annotations,
_with_transaction=False)
for plugin in plugins.for_project(project=item.project, version=2):
annotations.extend(safe_execute(plugin.get_annotations, group=item,
_with_transaction=False) or ())
result[item] = {
'assigned_to': serialize(assignees.get(item.id)),
'is_bookmarked': item.id in bookmarks,
'subscription': subscriptions[item.id],
'has_seen': seen_groups.get(item.id, active_date) > active_date,
'annotations': annotations,
'user_count': user_counts.get(item.id, 0),
'ignore_duration': ignore_durations.get(item.id),
'pending_resolution': pending_resolutions.get(item.id),
}
return result
def serialize(self, obj, attrs, user):
status = obj.status
status_details = {}
if attrs['ignore_duration']:
if attrs['ignore_duration'] < timezone.now() and status == GroupStatus.IGNORED:
status = GroupStatus.UNRESOLVED
else:
status_details['ignoreUntil'] = attrs['ignore_duration']
elif status == GroupStatus.UNRESOLVED and obj.is_over_resolve_age():
status = GroupStatus.RESOLVED
status_details['autoResolved'] = True
if status == GroupStatus.RESOLVED:
status_label = 'resolved'
if attrs['pending_resolution']:
status_details['inNextRelease'] = True
elif status == GroupStatus.IGNORED:
status_label = 'ignored'
elif status in [GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]:
status_label = 'pending_deletion'
elif status == GroupStatus.PENDING_MERGE:
status_label = 'pending_merge'
else:
status_label = 'unresolved'
# If user is not logged in and member of the organization,
# do not return the permalink which contains private information i.e. org name.
if user.is_authenticated() and user.get_orgs().filter(id=obj.organization.id).exists():
permalink = absolute_uri(reverse('sentry-group', args=[
obj.organization.slug, obj.project.slug, obj.id]))
else:
permalink = None
is_subscribed, subscription = attrs['subscription']
return {
'id': six.text_type(obj.id),
'shareId': obj.get_share_id(),
'shortId': obj.qualified_short_id,
'count': six.text_type(obj.times_seen),
'userCount': attrs['user_count'],
'title': obj.title,
'culprit': obj.culprit,
'permalink': permalink,
'firstSeen': obj.first_seen,
'lastSeen': obj.last_seen,
'logger': obj.logger or None,
'level': LOG_LEVELS.get(obj.level, 'unknown'),
'status': status_label,
'statusDetails': status_details,
'isPublic': obj.is_public,
'project': {
'name': obj.project.name,
'slug': obj.project.slug,
},
'type': obj.get_event_type(),
'metadata': obj.get_event_metadata(),
'numComments': obj.num_comments,
'assignedTo': attrs['assigned_to'],
'isBookmarked': attrs['is_bookmarked'],
'isSubscribed': is_subscribed,
'subscriptionDetails': {
'reason': SUBSCRIPTION_REASON_MAP.get(
subscription.reason,
'unknown',
),
} if is_subscribed and subscription is not None else None,
'hasSeen': attrs['has_seen'],
'annotations': attrs['annotations'],
}
StatsPeriod = namedtuple('StatsPeriod', ('segments', 'interval'))
class StreamGroupSerializer(GroupSerializer):
STATS_PERIOD_CHOICES = {
'14d': StatsPeriod(14, timedelta(hours=24)),
'24h': StatsPeriod(24, timedelta(hours=1)),
}
def __init__(self, stats_period=None, matching_event_id=None):
if stats_period is not None:
assert stats_period in self.STATS_PERIOD_CHOICES
self.stats_period = stats_period
self.matching_event_id = matching_event_id
def get_attrs(self, item_list, user):
attrs = super(StreamGroupSerializer, self).get_attrs(item_list, user)
if self.stats_period:
# we need to compute stats at 1d (1h resolution), and 14d
group_ids = [g.id for g in item_list]
segments, interval = self.STATS_PERIOD_CHOICES[self.stats_period]
now = timezone.now()
stats = tsdb.get_range(
model=tsdb.models.group,
keys=group_ids,
end=now,
start=now - ((segments - 1) * interval),
rollup=int(interval.total_seconds()),
)
for item in item_list:
attrs[item].update({
'stats': stats[item.id],
})
return attrs
def serialize(self, obj, attrs, user):
result = super(StreamGroupSerializer, self).serialize(obj, attrs, user)
if self.stats_period:
result['stats'] = {
self.stats_period: attrs['stats'],
}
if self.matching_event_id:
result['matchingEventId'] = self.matching_event_id
return result
class SharedGroupSerializer(GroupSerializer):
def serialize(self, obj, attrs, user):
result = super(SharedGroupSerializer, self).serialize(obj, attrs, user)
del result['annotations']
return result
|
{
"content_hash": "a14b6744e793b29334c6d5605cf0ba80",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 95,
"avg_line_length": 38.320945945945944,
"alnum_prop": 0.5800934497046637,
"repo_name": "JamesMura/sentry",
"id": "f9a5760a0a7e730360bf1b2a34a6779164fc6873",
"size": "11343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/serializers/models/group.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "264356"
},
{
"name": "HTML",
"bytes": "306533"
},
{
"name": "JavaScript",
"bytes": "1101462"
},
{
"name": "Lua",
"bytes": "51972"
},
{
"name": "Makefile",
"bytes": "6425"
},
{
"name": "Python",
"bytes": "15082023"
},
{
"name": "Ruby",
"bytes": "3867"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from mock import patch, call, Mock
from ...testcases import DustyTestCase
from dusty.commands.setup import (_get_mac_username, setup_dusty_config, complete_setup,
_get_vm_size)
from dusty.payload import Payload
from dusty import constants
class TestSetupCommands(DustyTestCase):
@patch('dusty.commands.setup.subprocess.check_output')
@patch('dusty.commands.setup._get_raw_input')
def test_get_mac_username_no(self, fake_raw_input, fake_check_output):
fake_raw_input.return_value = 'n'
fake_check_output.return_value = 'user\n'
result = _get_mac_username()
self.assertEqual(result, 'n')
@patch('pwd.getpwnam')
@patch('dusty.commands.setup.subprocess.check_output')
@patch('dusty.commands.setup._get_raw_input')
def test_get_mac_username_yes(self, fake_raw_input, fake_check_output, fake_pwnam):
fake_raw_input.return_value = 'y'
fake_check_output.return_value = 'user\n'
result = _get_mac_username()
self.assertEqual(result, 'user')
def factory_file_side_effect(self, file_compare_name):
def is_file_side_effect(file_name):
if file_name == file_compare_name:
return True
return False
return is_file_side_effect
@patch('pwd.getpwnam')
@patch('dusty.commands.setup._get_specs_repo')
@patch('dusty.commands.setup._get_mac_username')
@patch('dusty.commands.setup._get_vm_size')
def test_setup_dusty_config(self, fake_get_vm_size, fake_get_mac, fake_get_default_specs, fake_pwnam):
fake_get_mac.return_value = 'user'
fake_get_default_specs.return_value = 'github.com/gamechanger/dusty'
fake_get_vm_size.return_value = 6
expected_dict_argument = {constants.CONFIG_MAC_USERNAME_KEY: 'user',
constants.CONFIG_SPECS_REPO_KEY: 'github.com/gamechanger/dusty',
constants.CONFIG_VM_MEM_SIZE: '6'}
return_payload = setup_dusty_config()
self.assertEqual(return_payload.fn, complete_setup)
self.assertEqual(return_payload.args[0], expected_dict_argument)
@patch('pwd.getpwnam')
@patch('dusty.commands.setup._get_specs_repo')
@patch('dusty.commands.setup._get_mac_username')
@patch('dusty.commands.setup._get_vm_size')
def test_setup_dusty_config_pass_arguments_1(self, fake_get_vm_size, fake_get_mac, fake_get_default_specs, fake_pwnam):
setup_dusty_config(mac_username='1',
specs_repo='2')
fake_get_vm_size.assert_has_calls([call()])
fake_get_mac.assert_has_calls([])
fake_get_default_specs.assert_has_calls([])
@patch('pwd.getpwnam')
@patch('dusty.commands.setup._get_specs_repo')
@patch('dusty.commands.setup._get_mac_username')
@patch('dusty.commands.setup._get_vm_size')
def test_setup_dusty_config_pass_arguments_2(self, fake_get_vm_size, fake_get_mac, fake_get_default_specs, fake_pwnam):
setup_dusty_config(mac_username='1')
fake_get_vm_size.assert_has_calls([call()])
fake_get_mac.assert_has_calls([])
fake_get_default_specs.assert_has_calls([call()])
@patch('pwd.getpwnam')
@patch('dusty.commands.setup._get_specs_repo')
@patch('dusty.commands.setup._get_mac_username')
@patch('dusty.commands.setup._get_vm_size')
def test_setup_dusty_config_pass_arguments_3(self, fake_get_vm_size, fake_get_mac, fake_get_default_specs, fake_pwnam):
setup_dusty_config(specs_repo='1')
fake_get_vm_size.assert_has_calls([call()])
fake_get_mac.assert_has_calls([call()])
fake_get_default_specs.assert_has_calls([])
@patch('dusty.commands.setup.update_managed_repos')
@patch('dusty.commands.setup.save_config_value')
def test_complete_setup(self, fake_save_config_value, *args):
dict_argument = {constants.CONFIG_MAC_USERNAME_KEY: 'user',
constants.CONFIG_SPECS_REPO_KEY: 'github.com/gamechanger/dusty'}
complete_setup(dict_argument)
fake_save_config_value.assert_has_calls([call(constants.CONFIG_MAC_USERNAME_KEY,'user'),
call(constants.CONFIG_SPECS_REPO_KEY, 'github.com/gamechanger/dusty'),
call(constants.CONFIG_SETUP_KEY, True)])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_16_y(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 16 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.return_value = 'y'
self.assertEqual(_get_vm_size(), 6144)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 16384 megabytes of memory. We would like to allocate 6144 to your vm. Is that ok? (y/n) ')])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_16_n(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 16 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.side_effect = ['n', 2]
self.assertEqual(_get_vm_size(), 2)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 16384 megabytes of memory. We would like to allocate 6144 to your vm. Is that ok? (y/n) '),
call('Please input the number of megabytes to allocate to the vm: ')])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_8_y(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 8 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.return_value = 'y'
self.assertEqual(_get_vm_size(), 4096)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 8192 megabytes of memory. We would like to allocate 4096 to your vm. Is that ok? (y/n) ')])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_8_n(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 8 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.side_effect = ['n', 2]
self.assertEqual(_get_vm_size(), 2)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 8192 megabytes of memory. We would like to allocate 4096 to your vm. Is that ok? (y/n) '),
call('Please input the number of megabytes to allocate to the vm: ')])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_less_8_y(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 6 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.return_value = 'y'
self.assertEqual(_get_vm_size(), 2048)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 6144 megabytes of memory. We would like to allocate 2048 to your vm. Is that ok? (y/n) ')])
@patch('dusty.commands.setup._get_raw_input')
@patch('dusty.commands.setup.virtual_memory')
def test_get_vm_size_less_8_n(self, fake_virtual_memory, fake_get_raw_input):
total_mock = Mock()
total_mock.total = 6 * 2**30
fake_virtual_memory.return_value = total_mock
fake_get_raw_input.side_effect = ['n', 1]
self.assertEqual(_get_vm_size(), 1)
fake_get_raw_input.assert_has_calls([call('Your system seems to have 6144 megabytes of memory. We would like to allocate 2048 to your vm. Is that ok? (y/n) '),
call('Please input the number of megabytes to allocate to the vm: ')])
@patch('dusty.commands.setup._get_raw_input')
def test_enter_is_accepted_as_yes(self, fake_get_raw_input):
fake_get_raw_input.return_value = ''
setup_dusty_config()
|
{
"content_hash": "820548e19bde74e3026a9a4ccf35fed1",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 169,
"avg_line_length": 53.52903225806452,
"alnum_prop": 0.6373387971555984,
"repo_name": "gamechanger/dusty",
"id": "62a23330efd2ea8f9b69e8a864c91719bb505938",
"size": "8297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/commands/setup_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "493669"
},
{
"name": "Ruby",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "3875"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import pytest
Tree = namedtuple('Tree', 'x l r')
def is_balanced(T):
"""
Time: O(n log n), Space worst O(n) best/avg O(log n)
"""
if T is None:
return True
diff_height = abs(height(T.l) - height(T.r))
if diff_height > 1:
return False
if not is_balanced(T.l) or not is_balanced(T.r):
return False
return True
def height(T):
"""
Time O(n), Space worst O(n) best/avg O(log n)
"""
if T is None:
return 0
return max(height(T.l), height(T.r)) + 1
def improved_is_balanced(T):
"""
Time O(n), Space worst O(n) best/avg O(log n)
"""
if T is None:
return True
if improved_height(T) == -1:
return False
return True
def improved_height(T):
"""
Returns -1 if tree is not balanced, returns the height of the tree otherwise.
Time O(n), Space worst O(n) best/avg O(log n)
"""
if T is None:
return 0
left_h = improved_height(T.l)
if left_h == -1:
return -1
right_h = improved_height(T.r)
if right_h == -1:
return -1
if abs(left_h - right_h) > 1:
return -1
return max(left_h, right_h) + 1
BALANCED = Tree(10,
Tree(1,
Tree(2, None, None),
Tree(6, None, None)),
Tree(15,
Tree(16, None,
Tree(3, None, None)),
Tree(3, None, None)))
NOT_BALANCED = Tree(10,
Tree(1,
Tree(2,
Tree(4,
Tree(19, None, None),
None),
Tree(8, None, None)),
Tree(7, None, None)),
None)
@pytest.mark.parametrize('input, expected', [
(BALANCED, True),
(NOT_BALANCED, False)
])
def test_is_balanced(input, expected):
assert is_balanced(input) is expected
@pytest.mark.parametrize('input, expected', [
(BALANCED, True),
(NOT_BALANCED, False)
])
def test_improved_is_balanced(input, expected):
assert improved_is_balanced(input) is expected
|
{
"content_hash": "fc015568927db9d9b863b7913f642fae",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 81,
"avg_line_length": 21.901960784313726,
"alnum_prop": 0.4941808415398389,
"repo_name": "daviur/py-cracking-the-coding-interview",
"id": "7baa6c7a346047eacb0f6776b22174aa27b0d9da",
"size": "3353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trees-and-graphs/tg41.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42382"
}
],
"symlink_target": ""
}
|
from .templates import SortTemplate
class SelectionSort(SortTemplate):
"""Creates a class to implement the selection sort.
"""
def sort(self):
"""Uses the section sort algorithm to sort.
This is a basic section sort algorithm.
"""
for i in range(self.length):
min = i
for j in range(i+1, self.length):
if self.items[min] > self.items[j]:
min = j
self.exchange(i, min)
def quick_sort(self):
"""Uses the quick sort algorithm to sort.
This is a quick section sort algorithm.
"""
self.__quick_sort_helper(0, self.length-1)
def __quick_sort_helper(self, first, last):
"""Does quick sort recursively.
"""
if first < last:
j = self.__partition(first, last)
self.__quick_sort_helper(first, j-1)
self.__quick_sort_helper(j+1, last)
def __partition(self, first, last):
"""Sorts one partition's element.
"""
pivot = self.items[first]
left = first + 1
right = last
while True:
while left <= right and self.items[left] <= pivot: left += 1
while self.items[right] >= pivot and left <= right: right -= 1
if left > right: break
self.exchange(left, right)
self.exchange(first, right)
return right
|
{
"content_hash": "d77f9afd9a718444f280dd0b06ee2a54",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 30.29787234042553,
"alnum_prop": 0.5393258426966292,
"repo_name": "OctavianLee/Algorithm-Python",
"id": "328a19ccc1ea0c9733ae6ccc8632d5037dcfc5ad",
"size": "1448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dahlia/sorts/selection_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "13787"
}
],
"symlink_target": ""
}
|
"""Runs kubernetes e2e test with specified config"""
import argparse
import hashlib
import os
import random
import re
import shutil
import subprocess
import sys
import urllib2
import time
ORIG_CWD = os.getcwd() # Checkout changes cwd
# The zones below are the zones available in the CNCF account (in theory, zones vary by account)
# We aim for 3 zones per region to try to maintain even spreading.
# We also remove a few zones where our preferred instance type is not available,
# though really this needs a better fix (likely in kops)
DEFAULT_AWS_ZONES = [
'ap-northeast-1a',
'ap-northeast-1c',
'ap-northeast-1d',
'ap-northeast-2a',
#'ap-northeast-2a' - AZ does not exist, so we're breaking the 3 AZs per region target here
'ap-northeast-2c',
#'ap-south-1a', # no c4.large instances available
#'ap-south-1b', # no c4.large instances available
'ap-southeast-1a',
'ap-southeast-1b',
'ap-southeast-1c',
'ap-southeast-2a',
'ap-southeast-2b',
'ap-southeast-2c',
#'ca-central-1a', no c4.large capacity 2018-04-25
#'ca-central-1b', no c4.large capacity 2018-04-25
'eu-central-1a',
'eu-central-1b',
'eu-central-1c',
'eu-west-1a',
'eu-west-1b',
'eu-west-1c',
#'eu-west-2a', no c4.large capacity 2018-04-24
#'eu-west-2b', no c4.large capacity 2018-04-24
#'eu-west-2c', no c4.large capacity 2018-04-24
#'eu-west-3a', documented to not support c4 family
#'eu-west-3b', documented to not support c4 family
#'eu-west-3c', documented to not support c4 family
'sa-east-1a',
#'sa-east-1b', AZ does not exist, so we're breaking the 3 AZs per region target here
'sa-east-1c',
'us-east-1a',
'us-east-1b',
'us-east-1c',
#'us-east-1d', # limiting to 3 zones to not overallocate
#'us-east-1e', # limiting to 3 zones to not overallocate
#'us-east-1f', # limiting to 3 zones to not overallocate
#'us-east-2a', # no c4.large instances available
#'us-east-2b', # no c4.large instances available
#'us-east-2c', # no c4.large instances available
'us-west-1a',
'us-west-1b',
#'us-west-1c', AZ does not exist, so we're breaking the 3 AZs per region target here
'us-west-2a',
'us-west-2b',
'us-west-2c'
]
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def check_output(*cmd):
"""Log and run the command, raising on errors, return output"""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
def check_env(env, *cmd):
"""Log and run the command with a specific env, raising on errors."""
print >>sys.stderr, 'Environment:'
for key, value in sorted(env.items()):
print >>sys.stderr, '%s=%s' % (key, value)
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd, env=env)
def kubekins(tag):
"""Return full path to kubekins-e2e:tag."""
return 'gcr.io/k8s-testimages/kubekins-e2e:%s' % tag
def parse_env(env):
"""Returns (FOO, BAR=MORE) for FOO=BAR=MORE."""
return env.split('=', 1)
def aws_role_config(profile, arn):
return (('[profile jenkins-assumed-role]\n' +
'role_arn = %s\n' +
'source_profile = %s\n') %
(arn, profile))
def kubeadm_version(mode, shared_build_gcs_path):
"""Return string to use for kubeadm version, given the job's mode (ci/pull/periodic)."""
version = ''
if mode in ['ci', 'periodic']:
# This job only runs against the kubernetes repo, and bootstrap.py leaves the
# current working directory at the repository root. Grab the SCM_REVISION so we
# can use the .debs built during the bazel-build job that should have already
# succeeded.
status = re.search(
r'STABLE_BUILD_SCM_REVISION ([^\n]+)',
check_output('hack/print-workspace-status.sh')
)
if not status:
raise ValueError('STABLE_BUILD_SCM_REVISION not found')
version = status.group(1)
# The path given here should match ci-kubernetes-bazel-build
return 'gs://kubernetes-release-dev/ci/%s-bazel/bin/linux/amd64/' % version
elif mode == 'pull':
# The format of shared_build_gcs_path looks like:
# gs://kubernetes-release-dev/bazel/<git-describe-output>
# Add bin/linux/amd64 yet to that path so it points to the dir with the debs
return '%s/bin/linux/amd64/' % shared_build_gcs_path
elif mode == 'stable':
# This job need not run against the kubernetes repo and uses the stable version
# of kubeadm packages. This mode may be desired when kubeadm itself is not the
# SUT (System Under Test).
return 'stable'
else:
raise ValueError("Unknown kubeadm mode given: %s" % mode)
class LocalMode(object):
"""Runs e2e tests by calling kubetest."""
def __init__(self, workspace, artifacts):
self.command = 'kubetest'
self.workspace = workspace
self.artifacts = artifacts
self.env = []
self.os_env = []
self.env_files = []
self.add_environment(
'HOME=%s' % workspace,
'WORKSPACE=%s' % workspace,
'PATH=%s' % os.getenv('PATH'),
)
def add_environment(self, *envs):
"""Adds FOO=BAR to the list of environment overrides."""
self.env.extend(parse_env(e) for e in envs)
def add_os_environment(self, *envs):
"""Adds FOO=BAR to the list of os environment overrides."""
self.os_env.extend(parse_env(e) for e in envs)
def add_file(self, env_file):
"""Reads all FOO=BAR lines from env_file."""
with open(env_file) as fp:
for line in fp:
line = line.rstrip()
if not line or line.startswith('#'):
continue
self.env_files.append(parse_env(line))
def add_env(self, env):
self.env_files.append(parse_env(env))
def add_aws_cred(self, priv, pub, cred):
"""Sets aws keys and credentials."""
ssh_dir = os.path.join(self.workspace, '.ssh')
if not os.path.isdir(ssh_dir):
os.makedirs(ssh_dir)
cred_dir = os.path.join(self.workspace, '.aws')
if not os.path.isdir(cred_dir):
os.makedirs(cred_dir)
aws_ssh = os.path.join(ssh_dir, 'kube_aws_rsa')
aws_pub = os.path.join(ssh_dir, 'kube_aws_rsa.pub')
aws_cred = os.path.join(cred_dir, 'credentials')
shutil.copy(priv, aws_ssh)
shutil.copy(pub, aws_pub)
shutil.copy(cred, aws_cred)
self.add_environment(
'JENKINS_AWS_SSH_PRIVATE_KEY_FILE=%s' % priv,
'JENKINS_AWS_SSH_PUBLIC_KEY_FILE=%s' % pub,
'JENKINS_AWS_CREDENTIALS_FILE=%s' % cred,
)
def add_aws_role(self, profile, arn):
with open(os.path.join(self.workspace, '.aws', 'config'), 'w') as cfg:
cfg.write(aws_role_config(profile, arn))
self.add_environment('AWS_SDK_LOAD_CONFIG=true')
return 'jenkins-assumed-role'
def add_gce_ssh(self, priv, pub):
"""Copies priv, pub keys to $WORKSPACE/.ssh."""
ssh_dir = os.path.join(self.workspace, '.ssh')
if not os.path.isdir(ssh_dir):
os.makedirs(ssh_dir)
gce_ssh = os.path.join(ssh_dir, 'google_compute_engine')
gce_pub = os.path.join(ssh_dir, 'google_compute_engine.pub')
shutil.copy(priv, gce_ssh)
shutil.copy(pub, gce_pub)
self.add_environment(
'JENKINS_GCE_SSH_PRIVATE_KEY_FILE=%s' % gce_ssh,
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE=%s' % gce_pub,
)
@staticmethod
def add_service_account(path):
"""Returns path."""
return path
def add_k8s(self, *a, **kw):
"""Add specified k8s.io repos (noop)."""
pass
def add_aws_runner(self):
"""Start with kops-e2e-runner.sh"""
# TODO(Krzyzacy):retire kops-e2e-runner.sh
self.command = os.path.join(self.workspace, 'kops-e2e-runner.sh')
def start(self, args):
"""Starts kubetest."""
print >>sys.stderr, 'starts with local mode'
env = {}
env.update(self.os_env)
env.update(self.env_files)
env.update(self.env)
check_env(env, self.command, *args)
def cluster_name(cluster):
"""Return or select a cluster name."""
if cluster:
return cluster
# Create a suffix based on the build number and job name.
# This ensures no conflict across runs of different jobs (see #7592).
# For PR jobs, we use PR number instead of build number to ensure the
# name is constant across different runs of the presubmit on the PR.
# This helps clean potentially leaked resources from earlier run that
# could've got evicted midway (see #7673).
job_type = os.getenv('JOB_TYPE')
if job_type == 'batch':
suffix = 'batch-%s' % os.getenv('BUILD_ID', 0)
elif job_type == 'presubmit':
suffix = '%s' % os.getenv('PULL_NUMBER', 0)
else:
suffix = '%s' % os.getenv('BUILD_ID', 0)
if len(suffix) > 10:
suffix = hashlib.md5(suffix).hexdigest()[:10]
job_hash = hashlib.md5(os.getenv('JOB_NAME', '')).hexdigest()[:5]
return 'e2e-%s-%s' % (suffix, job_hash)
# TODO(krzyzacy): Move this into kubetest
def build_kops(kops, mode):
"""Build kops, set kops related envs."""
if not os.path.basename(kops) == 'kops':
raise ValueError(kops)
version = 'pull-' + check_output('git', 'describe', '--always').strip()
job = os.getenv('JOB_NAME', 'pull-kops-e2e-kubernetes-aws')
gcs = 'gs://kops-ci/pulls/%s' % job
gapi = 'https://storage.googleapis.com/kops-ci/pulls/%s' % job
mode.add_environment(
'KOPS_BASE_URL=%s/%s' % (gapi, version),
'GCS_LOCATION=%s' % gcs
)
check('make', 'gcs-publish-ci', 'VERSION=%s' % version, 'GCS_LOCATION=%s' % gcs)
def set_up_kops_gce(workspace, args, mode, cluster, runner_args):
"""Set up kops on GCE envs."""
for path in [args.gce_ssh, args.gce_pub]:
if not os.path.isfile(os.path.expandvars(path)):
raise IOError(path, os.path.expandvars(path))
mode.add_gce_ssh(args.gce_ssh, args.gce_pub)
gce_ssh = os.path.join(workspace, '.ssh', 'google_compute_engine')
zones = args.kops_zones or random.choice([
'us-central1-a',
'us-central1-b',
'us-central1-c',
'us-central1-f',
])
runner_args.extend([
'--kops-cluster=%s' % cluster,
'--kops-zones=%s' % zones,
'--kops-state=%s' % args.kops_state_gce,
'--kops-nodes=%s' % args.kops_nodes,
'--kops-ssh-key=%s' % gce_ssh,
])
def set_up_kops_aws(workspace, args, mode, cluster, runner_args):
"""Set up aws related envs for kops. Will replace set_up_aws."""
for path in [args.aws_ssh, args.aws_pub, args.aws_cred]:
if not os.path.isfile(os.path.expandvars(path)):
raise IOError(path, os.path.expandvars(path))
mode.add_aws_cred(args.aws_ssh, args.aws_pub, args.aws_cred)
aws_ssh = os.path.join(workspace, '.ssh', 'kube_aws_rsa')
profile = args.aws_profile
if args.aws_role_arn:
profile = mode.add_aws_role(profile, args.aws_role_arn)
# kubetest for kops now support select random regions and zones.
# For initial testing we are not sending in zones when the
# --kops-multiple-zones flag is set. If the flag is not set then
# we use the older functionality of passing in zones.
if args.kops_multiple_zones:
runner_args.extend(["--kops-multiple-zones"])
else:
# TODO(@chrislovecnm): once we have tested we can remove the zones
# and region logic from this code and have kubetest handle that
# logic
zones = args.kops_zones or random.choice(DEFAULT_AWS_ZONES)
regions = ','.join([zone[:-1] for zone in zones.split(',')])
runner_args.extend(['--kops-zones=%s' % zones])
mode.add_environment(
'KOPS_REGIONS=%s' % regions,
)
mode.add_environment(
'AWS_PROFILE=%s' % profile,
'AWS_DEFAULT_PROFILE=%s' % profile,
)
if args.aws_cluster_domain:
cluster = '%s.%s' % (cluster, args.aws_cluster_domain)
runner_args.extend([
'--kops-cluster=%s' % cluster,
'--kops-state=%s' % args.kops_state,
'--kops-nodes=%s' % args.kops_nodes,
'--kops-ssh-key=%s' % aws_ssh,
"--kops-ssh-user=admin",
])
def set_up_aws(workspace, args, mode, cluster, runner_args):
"""Set up aws related envs. Legacy; will be replaced by set_up_kops_aws."""
for path in [args.aws_ssh, args.aws_pub, args.aws_cred]:
if not os.path.isfile(os.path.expandvars(path)):
raise IOError(path, os.path.expandvars(path))
mode.add_aws_cred(args.aws_ssh, args.aws_pub, args.aws_cred)
aws_ssh = os.path.join(workspace, '.ssh', 'kube_aws_rsa')
profile = args.aws_profile
if args.aws_role_arn:
profile = mode.add_aws_role(profile, args.aws_role_arn)
zones = args.kops_zones or random.choice(DEFAULT_AWS_ZONES)
regions = ','.join([zone[:-1] for zone in zones.split(',')])
mode.add_environment(
'AWS_PROFILE=%s' % profile,
'AWS_DEFAULT_PROFILE=%s' % profile,
'KOPS_REGIONS=%s' % regions,
)
if args.aws_cluster_domain:
cluster = '%s.%s' % (cluster, args.aws_cluster_domain)
runner_args.extend([
'--kops-cluster=%s' % cluster,
'--kops-zones=%s' % zones,
'--kops-state=%s' % args.kops_state,
'--kops-nodes=%s' % args.kops_nodes,
'--kops-ssh-key=%s' % aws_ssh,
"--kops-ssh-user=admin",
])
# TODO(krzyzacy):Remove after retire kops-e2e-runner.sh
mode.add_aws_runner()
def read_gcs_path(gcs_path):
"""reads a gcs path (gs://...) by HTTP GET to storage.googleapis.com"""
link = gcs_path.replace('gs://', 'https://storage.googleapis.com/')
loc = urllib2.urlopen(link).read()
print >>sys.stderr, "Read GCS Path: %s" % loc
return loc
def get_shared_gcs_path(gcs_shared, use_shared_build):
"""return the shared path for this set of jobs using args and $PULL_REFS."""
build_file = ''
if use_shared_build:
build_file += use_shared_build + '-'
build_file += 'build-location.txt'
return os.path.join(gcs_shared, os.getenv('PULL_REFS', ''), build_file)
def main(args):
"""Set up env, start kubekins-e2e, handle termination. """
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
# Rules for env var priority here in docker:
# -e FOO=a -e FOO=b -> FOO=b
# --env-file FOO=a --env-file FOO=b -> FOO=b
# -e FOO=a --env-file FOO=b -> FOO=a(!!!!)
# --env-file FOO=a -e FOO=b -> FOO=b
#
# So if you overwrite FOO=c for a local run it will take precedence.
#
# Set up workspace/artifacts dir
workspace = os.environ.get('WORKSPACE', os.getcwd())
artifacts = os.path.join(workspace, '_artifacts')
if not os.path.isdir(artifacts):
os.makedirs(artifacts)
mode = LocalMode(workspace, artifacts)
for env_file in args.env_file:
mode.add_file(test_infra(env_file))
for env in args.env:
mode.add_env(env)
# TODO(fejta): remove after next image push
mode.add_environment('KUBETEST_MANUAL_DUMP=y')
runner_args = [
'--dump=%s' % mode.artifacts,
]
if args.service_account:
runner_args.append(
'--gcp-service-account=%s' % mode.add_service_account(args.service_account))
shared_build_gcs_path = ""
if args.use_shared_build is not None:
# find shared build location from GCS
gcs_path = get_shared_gcs_path(args.gcs_shared, args.use_shared_build)
print >>sys.stderr, 'Getting shared build location from: '+gcs_path
# retry loop for reading the location
attempts_remaining = 12
while True:
attempts_remaining -= 1
try:
# tell kubetest to extract from this location
shared_build_gcs_path = read_gcs_path(gcs_path)
args.kubetest_args.append('--extract=' + shared_build_gcs_path)
args.build = None
break
except urllib2.URLError as err:
print >>sys.stderr, 'Failed to get shared build location: %s' % err
if attempts_remaining > 0:
print >>sys.stderr, 'Waiting 5 seconds and retrying...'
time.sleep(5)
else:
raise RuntimeError('Failed to get shared build location too many times!')
elif args.build is not None:
if args.build == '':
# Empty string means --build was passed without any arguments;
# if --build wasn't passed, args.build would be None
runner_args.append('--build')
else:
runner_args.append('--build=%s' % args.build)
k8s = os.getcwd()
if not os.path.basename(k8s) == 'kubernetes':
raise ValueError(k8s)
mode.add_k8s(os.path.dirname(k8s), 'kubernetes', 'release')
if args.build_federation is not None:
if args.build_federation == '':
runner_args.append('--build-federation')
else:
runner_args.append('--build-federation=%s' % args.build_federation)
fed = os.getcwd()
if not os.path.basename(fed) == 'federation':
raise ValueError(fed)
mode.add_k8s(os.path.dirname(fed), 'federation', 'release')
if args.kops_build:
build_kops(os.getcwd(), mode)
if args.stage is not None:
runner_args.append('--stage=%s' % args.stage)
if args.aws:
for line in check_output('hack/print-workspace-status.sh').split('\n'):
if 'gitVersion' in line:
_, version = line.strip().split(' ')
break
else:
raise ValueError('kubernetes version not found in workspace status')
runner_args.append('--kops-kubernetes-version=%s/%s' % (
args.stage.replace('gs://', 'https://storage.googleapis.com/'),
version))
# TODO(fejta): move these out of this file
if args.up == 'true':
runner_args.append('--up')
if args.down == 'true':
runner_args.append('--down')
if args.test == 'true':
runner_args.append('--test')
# Passthrough some args to kubetest
if args.deployment:
runner_args.append('--deployment=%s' % args.deployment)
if args.provider:
runner_args.append('--provider=%s' % args.provider)
cluster = cluster_name(args.cluster)
runner_args.append('--cluster=%s' % cluster)
runner_args.append('--gcp-network=%s' % cluster)
runner_args.extend(args.kubetest_args)
if args.use_logexporter:
# TODO(fejta): Take the below value through a flag instead of env var.
runner_args.append('--logexporter-gcs-path=%s' % os.environ.get('GCS_ARTIFACTS_DIR', ''))
if args.kubeadm:
version = kubeadm_version(args.kubeadm, shared_build_gcs_path)
runner_args.extend([
'--kubernetes-anywhere-path=%s' % os.path.join(workspace, 'kubernetes-anywhere'),
'--kubernetes-anywhere-phase2-provider=kubeadm',
'--kubernetes-anywhere-cluster=%s' % cluster,
'--kubernetes-anywhere-kubeadm-version=%s' % version,
])
if args.kubeadm == "pull":
# If this is a pull job; the kubelet version should equal
# the kubeadm version here: we should use debs from the PR build
runner_args.extend([
'--kubernetes-anywhere-kubelet-version=%s' % version,
])
if args.aws:
# Legacy - prefer passing --deployment=kops, --provider=aws,
# which does not use kops-e2e-runner.sh
set_up_aws(mode.workspace, args, mode, cluster, runner_args)
elif args.deployment == 'kops' and args.provider == 'aws':
set_up_kops_aws(mode.workspace, args, mode, cluster, runner_args)
elif args.deployment == 'kops' and args.provider == 'gce':
set_up_kops_gce(mode.workspace, args, mode, cluster, runner_args)
elif args.gce_ssh:
mode.add_gce_ssh(args.gce_ssh, args.gce_pub)
# TODO(fejta): delete this?
mode.add_os_environment(*(
'%s=%s' % (k, v) for (k, v) in os.environ.items()))
mode.add_environment(
# Boilerplate envs
# Skip gcloud update checking
'CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true',
# Use default component update behavior
'CLOUDSDK_EXPERIMENTAL_FAST_COMPONENT_UPDATE=false',
# AWS
'KUBE_AWS_INSTANCE_PREFIX=%s' % cluster,
# GCE
'INSTANCE_PREFIX=%s' % cluster,
'KUBE_GCE_INSTANCE_PREFIX=%s' % cluster,
)
mode.start(runner_args)
def create_parser():
"""Create argparser."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--env-file', default=[], action="append",
help='Job specific environment file')
parser.add_argument(
'--env', default=[], action="append",
help='Job specific environment setting ' +
'(usage: "--env=VAR=SETTING" will set VAR to SETTING).')
parser.add_argument(
'--gce-ssh',
default=os.environ.get('JENKINS_GCE_SSH_PRIVATE_KEY_FILE'),
help='Path to .ssh/google_compute_engine keys')
parser.add_argument(
'--gce-pub',
default=os.environ.get('JENKINS_GCE_SSH_PUBLIC_KEY_FILE'),
help='Path to pub gce ssh key')
parser.add_argument(
'--service-account',
default=os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'),
help='Path to service-account.json')
parser.add_argument(
'--build', nargs='?', default=None, const='',
help='Build kubernetes binaries if set, optionally specifying strategy')
parser.add_argument(
'--build-federation', nargs='?', default=None, const='',
help='Build federation binaries if set, optionally specifying strategy')
parser.add_argument(
'--use-shared-build', nargs='?', default=None, const='',
help='Use prebuilt kubernetes binaries if set, optionally specifying strategy')
parser.add_argument(
'--gcs-shared',
default='gs://kubernetes-jenkins/shared-results/',
help='Get shared build from this bucket')
parser.add_argument(
'--cluster', default='bootstrap-e2e', help='Name of the cluster')
parser.add_argument(
'--kubeadm', choices=['ci', 'periodic', 'pull', 'stable'])
parser.add_argument(
'--stage', default=None, help='Stage release to GCS path provided')
parser.add_argument(
'--test', default='true', help='If we need to run any actual test within kubetest')
parser.add_argument(
'--down', default='true', help='If we need to tear down the e2e cluster')
parser.add_argument(
'--up', default='true', help='If we need to bring up a e2e cluster')
parser.add_argument(
'--use-logexporter',
action='store_true',
help='If we need to use logexporter tool to upload logs from nodes to GCS directly')
parser.add_argument(
'--kubetest_args',
action='append',
default=[],
help='Send unrecognized args directly to kubetest')
# kops & aws
# TODO(justinsb): replace with --provider=aws --deployment=kops
parser.add_argument(
'--aws', action='store_true', help='E2E job runs in aws')
parser.add_argument(
'--aws-profile',
default=(
os.environ.get('AWS_PROFILE') or
os.environ.get('AWS_DEFAULT_PROFILE') or
'default'
),
help='Profile within --aws-cred to use')
parser.add_argument(
'--aws-role-arn',
default=os.environ.get('KOPS_E2E_ROLE_ARN'),
help='Use --aws-profile to run as --aws-role-arn if set')
parser.add_argument(
'--aws-ssh',
default=os.environ.get('JENKINS_AWS_SSH_PRIVATE_KEY_FILE'),
help='Path to private aws ssh keys')
parser.add_argument(
'--aws-pub',
default=os.environ.get('JENKINS_AWS_SSH_PUBLIC_KEY_FILE'),
help='Path to pub aws ssh key')
parser.add_argument(
'--aws-cred',
default=os.environ.get('JENKINS_AWS_CREDENTIALS_FILE'),
help='Path to aws credential file')
parser.add_argument(
'--aws-cluster-domain', help='Domain of the aws cluster for aws-pr jobs')
parser.add_argument(
'--kops-nodes', default=4, type=int, help='Number of nodes to start')
parser.add_argument(
'--kops-state', default='s3://k8s-kops-prow/',
help='Name of the aws state storage')
parser.add_argument(
'--kops-state-gce', default='gs://k8s-kops-gce/',
help='Name of the kops state storage for GCE')
parser.add_argument(
'--kops-zones', help='Comma-separated list of zones else random choice')
parser.add_argument(
'--kops-build', action='store_true', help='If we need to build kops locally')
parser.add_argument(
'--kops-multiple-zones', action='store_true', help='Use multiple zones')
# kubetest flags that also trigger behaviour here
parser.add_argument(
'--provider', help='provider flag as used by kubetest')
parser.add_argument(
'--deployment', help='deployment flag as used by kubetest')
return parser
def parse_args(args=None):
"""Return args, adding unrecognized args to kubetest_args."""
parser = create_parser()
args, extra = parser.parse_known_args(args)
args.kubetest_args += extra
if args.aws or args.provider == 'aws':
# If aws keys are missing, try to fetch from HOME dir
if not args.aws_ssh or not args.aws_pub or not args.aws_cred:
home = os.environ.get('HOME')
if not home:
raise ValueError('HOME dir not set!')
if not args.aws_ssh:
args.aws_ssh = '%s/.ssh/kube_aws_rsa' % home
print >>sys.stderr, '-aws-ssh key not set. Defaulting to %s' % args.aws_ssh
if not args.aws_pub:
args.aws_pub = '%s/.ssh/kube_aws_rsa.pub' % home
print >>sys.stderr, '--aws-pub key not set. Defaulting to %s' % args.aws_pub
if not args.aws_cred:
args.aws_cred = '%s/.aws/credentials' % home
print >>sys.stderr, '--aws-cred not set. Defaulting to %s' % args.aws_cred
return args
if __name__ == '__main__':
main(parse_args())
|
{
"content_hash": "3aa8f0bd5e4ad4fe2a77a554998843fc",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 97,
"avg_line_length": 37.874647887323945,
"alnum_prop": 0.6032501580454427,
"repo_name": "foxish/test-infra",
"id": "5bc1f21185f9d137d95a3ab536834123d8e405e7",
"size": "27589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenarios/kubernetes_e2e.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26680"
},
{
"name": "Go",
"bytes": "3516095"
},
{
"name": "HTML",
"bytes": "73212"
},
{
"name": "JavaScript",
"bytes": "207614"
},
{
"name": "Makefile",
"bytes": "61977"
},
{
"name": "Python",
"bytes": "958919"
},
{
"name": "Roff",
"bytes": "5462"
},
{
"name": "Shell",
"bytes": "96590"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
}
|
import os
from oslo.config import cfg
from nova import config
from nova import ipv6
from nova.openstack.common.fixture import config as config_fixture
from nova import paths
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('state_path', os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..')))
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[])
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
|
{
"content_hash": "5e159c0578445a04f5a892ab8bdd4557",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 44.21568627450981,
"alnum_prop": 0.6607538802660754,
"repo_name": "petrutlucian94/nova_dev",
"id": "0503d740b1e45ef6a0d5125556ffae264ca33872",
"size": "2987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/conf_fixture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13338689"
},
{
"name": "Shell",
"bytes": "16180"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from .views import CurrentUserView
urlpatterns = [
url(r'^me$', CurrentUserView.as_view(), name='me')
]
|
{
"content_hash": "e13d7f21c48346c372fc95cd6e568009",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 18,
"alnum_prop": 0.7013888888888888,
"repo_name": "locarise/locarise-drf-oauth2-support",
"id": "046b974c768cd410fc4d739e7651b1e5fd880958",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "locarise_drf_oauth2_support/users/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4402"
},
{
"name": "Python",
"bytes": "33298"
}
],
"symlink_target": ""
}
|
__author__ = 'sulantha'
import pandas as pd
import numpy
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve, auc, confusion_matrix
from Python.RUSRandomForest import RUSRandomForestClassifier
from Python.RUSRandomForest import Config
mci_df = pd.read_csv('../../Classification_Table.csv', delimiter=',')
mci_df = mci_df.drop('ID', axis=1)
Y = mci_df.Conversion.values
mci_df = mci_df.drop('Conversion', axis=1)
csf_cols = ['Age_bl', 'PTGENDER', 'APOE_bin', 'PTAU181P_bl', 'PTAU_Pos', 'ABETA142', 'ABETA142_Pos', 'PTAU_AB142_Ratio']
av45_cols = ['Age_bl', 'PTGENDER', 'APOE_bin', 'AV45_bl_Global_SUVR_NEW', 'AV45_region1', 'AV45_region2',
'AV45_region3', 'AV45_region4']
fdg_cols = ['Age_bl', 'PTGENDER', 'APOE_bin', 'FDG_bl_Global_SUVR_NEW', 'FDG_region1', 'FDG_region2', 'FDG_region3',
'FDG_region4', 'FDG_region5']
csf_av45_cols = ['Age_bl', 'PTGENDER', 'APOE_bin', 'PTAU181P_bl', 'PTAU_Pos', 'ABETA142', 'ABETA142_Pos',
'PTAU_AB142_Ratio', 'AV45_bl_Global_SUVR_NEW', 'AV45_region1', 'AV45_region2',
'AV45_region3', 'AV45_region4']
csf_fdg_cols = ['Age_bl', 'PTGENDER', 'APOE_bin', 'PTAU181P_bl', 'PTAU_Pos', 'ABETA142', 'ABETA142_Pos',
'PTAU_AB142_Ratio', 'FDG_bl_Global_SUVR_NEW', 'FDG_region1', 'FDG_region2', 'FDG_region3',
'FDG_region4', 'FDG_region5']
all_list = ['Age_bl', 'PTGENDER', 'APOE_bin', 'PTAU181P_bl', 'PTAU_Pos', 'ABETA142', 'ABETA142_Pos',
'PTAU_AB142_Ratio', 'AV45_bl_Global_SUVR_NEW', 'FDG_bl_Global_SUVR_NEW', 'AV45_region1', 'AV45_region2',
'AV45_region3', 'AV45_region4', 'FDG_region1', 'FDG_region2', 'FDG_region3',
'FDG_region4', 'FDG_region5']
X_CSF_ONLY = mci_df[csf_cols].as_matrix()
X_AV45_ONLY = mci_df[av45_cols].as_matrix()
X_FDG_ONLY = mci_df[fdg_cols].as_matrix()
X_CSF_AV45 = mci_df[csf_av45_cols].as_matrix()
X_CSF_FDG = mci_df[csf_fdg_cols].as_matrix()
X_ALL = mci_df.as_matrix()
def writeSensAndSpec(fpr, tpr, thresh, filename):
specificity = 1-fpr
a = numpy.vstack([specificity, tpr, thresh])
b = numpy.transpose(a)
numpy.savetxt(filename, b, fmt='%.5f', delimiter=',')
print('CSF_ONLY')
RUSRFC_CSF_ONLY = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_CSF_ONLY, classProb_CSF_ONLY, featureImp_CSF_ONLY, featureImpSD_CSF_ONLY = RUSRFC_CSF_ONLY.CVJungle(X_CSF_ONLY, Y, shuffle=True, print_v=True)
cm_CSF_ONLY = confusion_matrix(Y, predClasses_CSF_ONLY)
print('Final Accuracy')
print(cm_CSF_ONLY)
print(featureImp_CSF_ONLY)
featureImpScale_CSF_ONLY = [featureImp_CSF_ONLY[csf_cols.index(i)] if i in csf_cols else 0 for i in all_list]
featureImpScaleSD_CSF_ONLY = [featureImpSD_CSF_ONLY[csf_cols.index(i)] if i in csf_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance CSF ONLY')
plt.bar(range(len(all_list)), featureImpScale_CSF_ONLY, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'CSF_ONLY_FEATURE_IMP.png')
false_positive_rate_CSF_ONLY, true_positive_rate_CSF_ONLY, thresholds_CSF_ONLY = roc_curve(Y, classProb_CSF_ONLY[:, 1])
writeSensAndSpec(false_positive_rate_CSF_ONLY, true_positive_rate_CSF_ONLY, thresholds_CSF_ONLY, Config.figOutputPath+'CSF_ONLY_SensSpec.out')
roc_auc_CSF_ONLY = auc(false_positive_rate_CSF_ONLY, true_positive_rate_CSF_ONLY)
print('AV45_ONLY')
RUSRFC_AV45_ONLY = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_AV45_ONLY, classProb_AV45_ONLY, featureImp_AV45_ONLY, featureImpSD_AV45_ONLY = RUSRFC_AV45_ONLY.CVJungle(X_AV45_ONLY, Y, shuffle=True, print_v=True)
cm_AV45_ONLY = confusion_matrix(Y, predClasses_AV45_ONLY)
print('Final Accuracy')
print(cm_AV45_ONLY)
print(featureImp_AV45_ONLY)
featureImpScale_AV45_ONLY = [featureImp_AV45_ONLY[av45_cols.index(i)] if i in av45_cols else 0 for i in all_list]
featureImpScaleSD_AV45_ONLY = [featureImpSD_AV45_ONLY[csf_cols.index(i)] if i in csf_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance AV45 ONLY')
plt.bar(range(len(all_list)), featureImpScale_AV45_ONLY, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'AV45_ONLY_FEATURE_IMP.png')
false_positive_rate_AV45_ONLY, true_positive_rate_AV45_ONLY, thresholds_AV45_ONLY = roc_curve(Y,classProb_AV45_ONLY[:, 1])
writeSensAndSpec(false_positive_rate_AV45_ONLY, true_positive_rate_AV45_ONLY, thresholds_AV45_ONLY, Config.figOutputPath+'AV45_ONLY_SensSpec.out')
roc_auc_AV45_ONLY = auc(false_positive_rate_AV45_ONLY, true_positive_rate_AV45_ONLY)
print('FDG_ONLY')
RUSRFC_FDG_ONLY = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_FDG_ONLY, classProb_FDG_ONLY, featureImp_FDG_ONLY, featureImpSD_FDG_ONLY = RUSRFC_FDG_ONLY.CVJungle(X_FDG_ONLY, Y, shuffle=True, print_v=True)
cm_FDG_ONLY = confusion_matrix(Y, predClasses_FDG_ONLY)
print('Final Accuracy')
print(cm_FDG_ONLY)
print(featureImp_FDG_ONLY)
featureImpScale_FDG_ONLY = [featureImp_FDG_ONLY[fdg_cols.index(i)] if i in fdg_cols else 0 for i in all_list]
featureImpScaleSD_FDG_ONLY = [featureImpSD_FDG_ONLY[csf_cols.index(i)] if i in csf_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance FDG ONLY')
plt.bar(range(len(all_list)), featureImpScale_FDG_ONLY, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'FDG_ONLY_FEATURE_IMP.png')
false_positive_rate_FDG_ONLY, true_positive_rate_FDG_ONLY, thresholds_FDG_ONLY = roc_curve(Y, classProb_FDG_ONLY[:, 1])
writeSensAndSpec(false_positive_rate_FDG_ONLY, true_positive_rate_FDG_ONLY, thresholds_FDG_ONLY, Config.figOutputPath+'FDG_ONLY_SensSpec.out')
roc_auc_FDG_ONLY = auc(false_positive_rate_FDG_ONLY, true_positive_rate_FDG_ONLY)
print('CSF_AV45')
RUSRFC_CSF_AV45 = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_CSF_AV45, classProb_CSF_AV45, featureImp_CSF_AV45, featureImpSD_CSF_AV45 = RUSRFC_CSF_AV45.CVJungle(X_CSF_AV45, Y, shuffle=True, print_v=True)
cm_CSF_AV45 = confusion_matrix(Y, predClasses_CSF_AV45)
print('Final Accuracy')
print(cm_CSF_AV45)
print(featureImp_CSF_AV45)
featureImpScale_CSF_AV45 = [featureImp_CSF_AV45[csf_av45_cols.index(i)] if i in csf_av45_cols else 0 for i in all_list]
featureImpScaleSD_CSF_AV45 = [featureImpSD_CSF_AV45[csf_cols.index(i)] if i in csf_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance CSF & AV45')
plt.bar(range(len(all_list)), featureImpScale_CSF_AV45, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'CSF_AV45_FEATURE_IMP.png')
false_positive_rate_CSF_AV45, true_positive_rate_CSF_AV45, thresholds_CSF_AV45 = roc_curve(Y, classProb_CSF_AV45[:, 1])
writeSensAndSpec(false_positive_rate_CSF_AV45, true_positive_rate_CSF_AV45, thresholds_CSF_AV45, Config.figOutputPath+'CSF_AV45_SensSpec.out')
roc_auc_CSF_AV45 = auc(false_positive_rate_CSF_AV45, true_positive_rate_CSF_AV45)
print('CSF_FDG')
RUSRFC_CSF_FDG = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_CSF_FDG, classProb_CSF_FDG, featureImp_CSF_FDG, featureImpSD_CSF_FDG = RUSRFC_CSF_FDG.CVJungle(X_CSF_FDG, Y, shuffle=True, print_v=True)
cm_CSF_FDG = confusion_matrix(Y, predClasses_CSF_FDG)
print('Final Accuracy')
print(cm_CSF_FDG)
print(featureImp_CSF_FDG)
featureImpScale_CSF_FDG = [featureImp_CSF_FDG[csf_fdg_cols.index(i)] if i in csf_fdg_cols else 0 for i in all_list]
featureImpScaleSD_CSF_FDG = [featureImp_CSF_FDG[csf_av45_cols.index(i)] if i in csf_av45_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance CSF & FDG')
plt.bar(range(len(all_list)), featureImpScale_CSF_FDG, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'CSF_FDG_FEATURE_IMP.png')
false_positive_rate_CSF_FDG, true_positive_rate_CSF_FDG, thresholds_CSF_FDG = roc_curve(Y, classProb_CSF_FDG[:, 1])
writeSensAndSpec(false_positive_rate_CSF_FDG, true_positive_rate_CSF_FDG, thresholds_CSF_FDG, Config.figOutputPath+'CSF_FDG_SensSpec.out')
roc_auc_CSF_FDG = auc(false_positive_rate_CSF_FDG, true_positive_rate_CSF_FDG)
print('ALL')
RUSRFC_ALL = RUSRandomForestClassifier.RUSRandomForestClassifier(n_Forests=200, n_TreesInForest=500)
predClasses_ALL, classProb_ALL, featureImp_ALL, featureImpSD_ALL = RUSRFC_ALL.CVJungle(X_ALL, Y, shuffle=True, print_v=True)
cm_ALL = confusion_matrix(Y, predClasses_ALL)
print('Final Accuracy')
print(cm_ALL)
print(featureImp_ALL)
featureImpScale_ALL = [featureImp_ALL[all_list.index(i)] if i in all_list else 0 for i in all_list]
featureImpScaleSD_ALL = [featureImp_ALL[csf_fdg_cols.index(i)] if i in csf_fdg_cols else 0 for i in all_list]
plt.figure()
plt.title('Feature Importance ALL VARS')
plt.bar(range(len(all_list)), featureImpScale_ALL, color='r', align='center', orientation='vertical')
plt.xticks(range(len(all_list)), [Config.xticks_dict[tick] for tick in all_list])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(Config.figOutputPath+'ALL_FEATURE_IMP.png')
false_positive_rate_ALL, true_positive_rate_ALL, thresholds_ALL = roc_curve(Y, classProb_ALL[:, 1])
writeSensAndSpec(false_positive_rate_ALL, true_positive_rate_ALL, thresholds_ALL, Config.figOutputPath+'ALL_SensSpec.out')
roc_auc_ALL = auc(false_positive_rate_ALL, true_positive_rate_ALL)
plt.figure()
plt.plot(false_positive_rate_CSF_ONLY, true_positive_rate_CSF_ONLY, 'g',
label='ROC curve {0} (area = {1:0.2f})'.format('CSF', roc_auc_CSF_ONLY))
plt.plot(false_positive_rate_AV45_ONLY, true_positive_rate_AV45_ONLY, 'b',
label='ROC curve {0} (area = {1:0.2f})'.format('AV45', roc_auc_AV45_ONLY))
plt.plot(false_positive_rate_AV45_ONLY, true_positive_rate_FDG_ONLY, 'c',
label='ROC curve {0} (area = {1:0.2f})'.format('FDG', roc_auc_FDG_ONLY))
plt.plot(false_positive_rate_CSF_AV45, true_positive_rate_CSF_AV45, 'm',
label='ROC curve {0} (area = {1:0.2f})'.format('CSF & AV45', roc_auc_CSF_AV45))
plt.plot(false_positive_rate_CSF_FDG, true_positive_rate_CSF_FDG, 'y',
label='ROC curve {0} (area = {1:0.2f})'.format('CSF & FDG', roc_auc_CSF_FDG))
plt.plot(false_positive_rate_ALL, true_positive_rate_ALL, 'r',
label='ROC curve {0} (area = {1:0.2f})'.format('ALL', roc_auc_ALL))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC for MCI Converters')
plt.legend(loc="lower right")
plt.tight_layout()
plt.savefig(Config.figOutputPath+'full.png')
|
{
"content_hash": "cff745e6a0e80ddd75b0d32c5c6dfb14",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 160,
"avg_line_length": 61.09782608695652,
"alnum_prop": 0.7312755737413271,
"repo_name": "sulantha2006/Conversion",
"id": "220b6e5b1ae4a768a7142529ad2e66b5e422bb5e",
"size": "11242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/RUSRandomForest/runClassification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "17618"
},
{
"name": "Python",
"bytes": "45849"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr
import mimetypes, json
from werkzeug.wrappers import Response
from frappe.website.context import get_context
from frappe.website.utils import scrub_relative_urls, get_home_page, can_cache, delete_page_cache
from frappe.website.permissions import clear_permissions
from frappe.website.router import clear_sitemap
class PageNotFoundError(Exception): pass
def render(path, http_status_code=None):
"""render html page"""
path = resolve_path(path.strip("/"))
try:
data = render_page(path)
except frappe.DoesNotExistError, e:
doctype, name = get_doctype_from_path(path)
if doctype and name:
path = "print"
frappe.local.form_dict.doctype = doctype
frappe.local.form_dict.name = name
elif doctype:
path = "list"
frappe.local.form_dict.type = doctype
else:
path = "404"
http_status_code = e.http_status_code
try:
data = render_page(path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except Exception:
path = "error"
data = render_page(path)
http_status_code = 500
return build_response(path, data, http_status_code or 200)
def render_403(e, pathname):
path = "message"
frappe.local.message = """<p><strong>{error}</strong></p>
<p>
<a href="/login?redirect-to=/{pathname}" class="btn btn-primary>{login}</a>
</p>""".format(error=cstr(e), login=_("Login"), pathname=pathname)
frappe.local.message_title = _("Not Permitted")
return render_page(path), e.http_status_code
def get_doctype_from_path(path):
doctypes = frappe.db.sql_list("select name from tabDocType")
parts = path.split("/")
doctype = parts[0]
name = parts[1] if len(parts) > 1 else None
if doctype in doctypes:
return doctype, name
# try scrubbed
doctype = doctype.replace("_", " ").title()
if doctype in doctypes:
return doctype, name
return None, None
def build_response(path, data, http_status_code):
# build response
response = Response()
response.data = set_content_type(response, data, path)
response.status_code = http_status_code
response.headers[b"X-Page-Name"] = path.encode("utf-8")
response.headers[b"X-From-Cache"] = frappe.local.response.from_cache or False
return response
def render_page(path):
"""get page html"""
cache_key = ("page_context:{}" if is_ajax() else "page:{}").format(path)
out = None
# try memcache
if can_cache():
out = frappe.cache().get_value(cache_key)
if out and is_ajax():
out = out.get("data")
if out:
frappe.local.response.from_cache = True
return out
return build(path)
def build(path):
if not frappe.db:
frappe.connect()
build_method = (build_json if is_ajax() else build_page)
try:
return build_method(path)
except frappe.DoesNotExistError:
hooks = frappe.get_hooks()
if hooks.website_catch_all:
path = hooks.website_catch_all[0]
return build_method(path)
else:
raise
def build_json(path):
return get_context(path).data
def build_page(path):
context = get_context(path)
html = frappe.get_template(context.base_template_path).render(context)
html = scrub_relative_urls(html)
if can_cache(context.no_cache):
frappe.cache().set_value("page:" + path, html)
return html
def is_ajax():
return getattr(frappe.local, "is_ajax", False)
def resolve_path(path):
if not path:
path = "index"
if path.endswith('.html'):
path = path[:-5]
if path == "index":
path = get_home_page()
return path
def set_content_type(response, data, path):
if isinstance(data, dict):
response.headers[b"Content-Type"] = b"application/json; charset: utf-8"
data = json.dumps(data)
return data
response.headers[b"Content-Type"] = b"text/html; charset: utf-8"
if "." in path:
content_type, encoding = mimetypes.guess_type(path)
if not content_type:
raise frappe.UnsupportedMediaType("Cannot determine content type of {}".format(path))
response.headers[b"Content-Type"] = content_type.encode("utf-8")
return data
def clear_cache(path=None):
if path:
delete_page_cache(path)
else:
clear_sitemap()
frappe.clear_cache("Guest")
clear_permissions()
for method in frappe.get_hooks("website_clear_cache"):
frappe.get_attr(method)(path)
|
{
"content_hash": "b640c3debd173b89a0df766d72bc3568",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 97,
"avg_line_length": 24.685714285714287,
"alnum_prop": 0.705324074074074,
"repo_name": "gangadharkadam/johnfrappe",
"id": "17c9af9398d3ad4aa291ca18e204bef3d43fe7e6",
"size": "4424",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/website/render.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87199"
},
{
"name": "JavaScript",
"bytes": "1501561"
},
{
"name": "Python",
"bytes": "942367"
}
],
"symlink_target": ""
}
|
import array
import logging
import datetime
import time
from argparse import ArgumentParser
import os
import sys
import traceback
from ant.fs.manager import (
Application,
AntFSAuthenticationException,
AntFSTimeException,
AntFSDownloadException,
)
from ant.fs.manager import AntFSUploadException
from ant.fs.file import File
from . import utilities
from . import scripting
_logger = logging.getLogger()
_directories = {
".": File.Identifier.DEVICE,
"activities": File.Identifier.ACTIVITY,
"courses": File.Identifier.COURSE,
"waypoints": File.Identifier.WAYPOINTS,
"monitoring_b": File.Identifier.MONITORING_B,
# "profile": File.Identifier.?
# "goals?": File.Identifier.GOALS,
# "bloodprs": File.Identifier.BLOOD_PRESSURE,
# "summaries": File.Identifier.ACTIVITY_SUMMARY,
"settings": File.Identifier.SETTING,
"sports": File.Identifier.SPORT,
"totals": File.Identifier.TOTALS,
"weight": File.Identifier.WEIGHT,
"workouts": File.Identifier.WORKOUT,
}
_filetypes = dict((v, k) for (k, v) in _directories.items())
class Device:
class ProfileVersionException(Exception):
pass
_PROFILE_VERSION = 1
_PROFILE_VERSION_FILE = "profile_version"
def __init__(self, basedir, serial, name):
self._path = os.path.join(basedir, str(serial))
self._serial = serial
self._name = name
# Check profile version, if not a new device
if os.path.isdir(self._path):
if self.get_profile_version() < self._PROFILE_VERSION:
raise Device.ProfileVersionException(
"Profile version mismatch, too old"
)
elif self.get_profile_version() > self._PROFILE_VERSION:
raise Device.ProfileVersionException(
"Profile version mismatch, too new"
)
# Create directories
utilities.makedirs_if_not_exists(self._path)
for directory in _directories:
directory_path = os.path.join(self._path, directory)
utilities.makedirs_if_not_exists(directory_path)
# Write profile version (If none)
path = os.path.join(self._path, self._PROFILE_VERSION_FILE)
if not os.path.exists(path):
with open(path, "w") as f:
f.write(str(self._PROFILE_VERSION))
def get_path(self):
return self._path
def get_serial(self):
return self._serial
def get_name(self):
return self._name
def get_profile_version(self):
path = os.path.join(self._path, self._PROFILE_VERSION_FILE)
try:
with open(path, "rb") as f:
return int(f.read())
except IOError as e:
# TODO
return 0
def read_passkey(self):
try:
with open(os.path.join(self._path, "authfile"), "rb") as f:
d = array.array("B", f.read())
_logger.debug("loaded authfile: %r", d)
return d
except:
return None
def write_passkey(self, passkey):
with open(os.path.join(self._path, "authfile"), "wb") as f:
passkey.tofile(f)
_logger.debug("wrote authfile: %r, %r", self._serial, passkey)
class AntFSCLI(Application):
PRODUCT_NAME = "antfs-cli"
def __init__(self, config_dir, args):
Application.__init__(self)
self.config_dir = config_dir
# Set up scripting
scripts_dir = os.path.join(self.config_dir, "scripts")
utilities.makedirs_if_not_exists(scripts_dir)
self.scriptr = scripting.Runner(scripts_dir)
self._device = None
self._uploading = args.upload
self._pair = args.pair
self._skip_archived = args.skip_archived
def setup_channel(self, channel):
channel.set_period(4096)
channel.set_search_timeout(255)
channel.set_rf_freq(50)
channel.set_search_waveform([0x53, 0x00])
channel.set_id(0, 0x01, 0)
channel.open()
# channel.request_message(Message.ID.RESPONSE_CHANNEL_STATUS)
print("Searching...")
def on_link(self, beacon):
_logger.debug("on link, %r, %r", beacon.get_serial(), beacon.get_descriptor())
self.link()
return True
def on_authentication(self, beacon):
_logger.debug("on authentication")
serial, name = self.authentication_serial()
self._device = Device(self.config_dir, serial, name)
passkey = self._device.read_passkey()
print("Authenticating with", name, "(" + str(serial) + ")")
_logger.debug("serial %s, %r, %r", name, serial, passkey)
if passkey is not None and not self._pair:
try:
print(" - Passkey:", end=" ")
sys.stdout.flush()
self.authentication_passkey(passkey)
print("OK")
return True
except AntFSAuthenticationException as e:
print("FAILED")
return False
else:
try:
print(" - Pairing:", end=" ")
sys.stdout.flush()
passkey = self.authentication_pair(self.PRODUCT_NAME)
self._device.write_passkey(passkey)
print("OK")
return True
except AntFSAuthenticationException as e:
print("FAILED")
return False
def on_transport(self, beacon):
# Adjust time
print(" - Set time:", end=" ")
try:
result = self.set_time()
except (AntFSTimeException, AntFSDownloadException, AntFSUploadException) as e:
print("FAILED")
_logger.exception("Could not set time")
else:
print("OK")
directory = self.download_directory()
# directory.print_list()
# Map local filenames to FIT file types
local_files = []
for folder, filetype in _directories.items():
path = os.path.join(self._device.get_path(), folder)
for filename in os.listdir(path):
if os.path.splitext(filename)[1].lower() == ".fit":
local_files.append((filename, filetype))
# Map remote filenames to FIT file objects
remote_files = []
for fil in directory.get_files():
if fil.get_fit_sub_type() in _filetypes and fil.is_readable():
remote_files.append((self.get_filename(fil), fil))
# Calculate remote and local file diff
local_names = set(name for (name, filetype) in local_files)
remote_names = set(name for (name, fil) in remote_files)
downloading = [
fil
for name, fil in remote_files
if name not in local_names or not fil.is_archived()
]
uploading = [
(name, filetype)
for name, filetype in local_files
if name not in remote_names
]
# Remove archived files from the list
if self._skip_archived:
downloading = [fil for fil in downloading if not fil.is_archived()]
print("Downloading", len(downloading), "file(s)")
if self._uploading:
print(" and uploading", len(uploading), "file(s)")
# Download missing files:
for fileobject in downloading:
self.download_file(fileobject)
# Upload missing files:
if uploading and self._uploading:
# Upload
results = {}
for filename, typ in uploading:
index = self.upload_file(typ, filename)
results[index] = (filename, typ)
# Rename uploaded files locally
directory = self.download_directory()
for index, (filename, typ) in results.items():
try:
file_object = next(
f for f in directory.get_files() if f.get_index() == index
)
src = os.path.join(
self._device.get_path(), _filetypes[typ], filename
)
dst = self.get_filepath(file_object)
print(" - Renamed", src, "to", dst)
os.rename(src, dst)
except Exception as e:
print(" - Failed", index, filename, e)
def get_filename(self, fil):
return "{0}_{1}_{2}.fit".format(
fil.get_date().strftime("%Y-%m-%d_%H-%M-%S"),
fil.get_fit_sub_type(),
fil.get_fit_file_number(),
)
def get_filepath(self, fil):
return os.path.join(
self._device.get_path(),
_filetypes[fil.get_fit_sub_type()],
self.get_filename(fil),
)
def download_file(self, fil):
sys.stdout.write("Downloading {0}: ".format(self.get_filename(fil)))
sys.stdout.flush()
data = self.download(fil.get_index(), AntFSCLI._get_progress_callback())
with open(self.get_filepath(fil), "wb") as fd:
data.tofile(fd)
sys.stdout.write("\n")
sys.stdout.flush()
self.scriptr.run_download(self.get_filepath(fil), fil.get_fit_sub_type())
def upload_file(self, typ, filename):
sys.stdout.write("Uploading {0}: ".format(filename))
sys.stdout.flush()
with open(
os.path.join(self._device.get_path(), _filetypes[typ], filename), "rb"
) as fd:
data = array.array("B", fd.read())
index = self.create(typ, data, AntFSCLI._get_progress_callback())
sys.stdout.write("\n")
sys.stdout.flush()
return index
@staticmethod
def _get_progress_callback():
start_time = time.time()
def callback(new_progress):
s = "[{0:<30}]".format("." * int(new_progress * 30))
if new_progress == 0:
s += " started"
else:
delta = time.time() - start_time
eta = datetime.timedelta(seconds=int(delta / new_progress - delta))
s += " ETA: {0}".format(eta)
sys.stdout.write(s)
sys.stdout.flush()
sys.stdout.write("\b" * len(s))
return callback
def main():
parser = ArgumentParser(
description="Extracts FIT files from ANT-FS based sport watches."
)
parser.add_argument("--upload", action="store_true", help="enable uploading")
parser.add_argument("--debug", action="store_true", help="enable debug")
parser.add_argument(
"--pair", action="store_true", help="force pairing even if already paired"
)
parser.add_argument(
"-a",
"--skip-archived",
action="store_true",
help="don't download files marked as 'archived' on the watch",
)
args = parser.parse_args()
# Set up config dir
config_dir = utilities.XDG(AntFSCLI.PRODUCT_NAME).get_config_dir()
logs_dir = os.path.join(config_dir, "logs")
utilities.makedirs_if_not_exists(config_dir)
utilities.makedirs_if_not_exists(logs_dir)
# Set up logging
_logger.setLevel(logging.DEBUG)
# If you add new module/logger name longer than the 16 characters
# just increase the value after %(name).
# The longest module/logger name now is "ant.easy.channel".
formatter = logging.Formatter(
fmt="%(threadName)-10s %(asctime)s %(name)-16s"
" %(levelname)-8s %(message)s (%(filename)s:%(lineno)d)"
)
log_filename = os.path.join(
logs_dir,
"{0}-{1}.log".format(time.strftime("%Y%m%d-%H%M%S"), AntFSCLI.PRODUCT_NAME),
)
handler = logging.FileHandler(log_filename, "w")
handler.setFormatter(formatter)
_logger.addHandler(handler)
if args.debug:
_logger.addHandler(logging.StreamHandler())
try:
g = AntFSCLI(config_dir, args)
try:
g.start()
finally:
g.stop()
except Device.ProfileVersionException as e:
print(
"\nError: %s\n\nThis means that %s found that your data directory "
"structure was too old or too new. The best option is "
"probably to let %s recreate your "
"folder by deleting your data folder, after backing it up, "
"and let all your files be redownloaded from your sports "
"watch." % (e, AntFSCLI.PRODUCT_NAME, AntFSCLI.PRODUCT_NAME)
)
except (Exception, KeyboardInterrupt) as e:
traceback.print_exc()
for line in traceback.format_exc().splitlines():
_logger.error("%r", line)
print("Interrupted:", str(e))
return 1
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "b4fbbde0f23fb0a2320db2cf16c5de99",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 87,
"avg_line_length": 33.56955380577428,
"alnum_prop": 0.5666927286942924,
"repo_name": "Tigge/antfs-cli",
"id": "d24d3e1ec5284826663d104b856a8e81e6994ead",
"size": "13935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antfs_cli/program.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33554"
}
],
"symlink_target": ""
}
|
"""Utility functions for Windows builds.
This file is copied to the build directory as part of toolchain setup and
is used to set up calls to tools used by the build that need wrappers.
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import stat
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
exit_code = WinTool().Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecDeleteFile(self, path):
"""Simple file delete command."""
if os.path.exists(path):
os.unlink(path)
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, dummy_excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
# Try to diagnose crbug.com/741603
if not os.path.exists(dest):
raise Exception("Copying of %s to %s failed" % (source, dest))
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
if sys.platform == 'win32':
args = list(args) # *args is a tuple by default, which is read-only.
args[0] = args[0].replace('/', '\\')
# https://docs.python.org/2/library/subprocess.html:
# "On Unix with shell=True [...] if args is a sequence, the first item
# specifies the command string, and any additional items will be treated as
# additional arguments to the shell itself. That is to say, Popen does the
# equivalent of:
# Popen(['/bin/sh', '-c', args[0], args[1], ...])"
# For that reason, since going through the shell doesn't seem necessary on
# non-Windows don't do that there.
pe_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
pe_name = m.group('out')
link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Read output one line at a time as it shows up to avoid OOM failures when
# GBs of output is produced.
for line in link.stdout:
if (not line.startswith(b' Creating library ')
and not line.startswith(b'Generating code')
and not line.startswith(b'Finished generating code')):
print(line)
return link.wait()
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
if sys.platform == 'win32':
# Windows ARM64 uses clang-cl as assembler which has '/' as path
# separator, convert it to '\\' when running on Windows.
args = list(args) # *args is a tuple by default, which is read-only
args[0] = args[0].replace('/', '\\')
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.decode('utf8').splitlines():
if not line.startswith(' Assembling: '):
print(line)
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith(b'Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith(b'Copyright (C) Microsoft Corporation') and line):
print(line)
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dirname):
"""Runs an action command line from a response file using the environment
for |arch|. If |dirname| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after GN-time. http://crbug.com/333738.
for k, v in os.environ.items():
if k not in env:
env[k] = v
args = open(rspfile).read()
dirname = dirname[0] if dirname else None
return subprocess.call(args, shell=True, env=env, cwd=dirname)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "4c8f64b526eca03a7db04b089f35ce3a",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 81,
"avg_line_length": 38.22222222222222,
"alnum_prop": 0.6450719822812846,
"repo_name": "flutter/buildroot",
"id": "b4fc8485ffa17057ad70706a99865fc36d9127a4",
"size": "7391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/toolchain/win/tool_wrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "507"
},
{
"name": "C++",
"bytes": "30195"
},
{
"name": "Python",
"bytes": "291265"
},
{
"name": "Shell",
"bytes": "85178"
},
{
"name": "sed",
"bytes": "1677"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.enums",
marshal="google.ads.googleads.v12",
manifest={"SkAdNetworkUserTypeEnum",},
)
class SkAdNetworkUserTypeEnum(proto.Message):
r"""Container for enumeration of SkAdNetwork user types.
"""
class SkAdNetworkUserType(proto.Enum):
r"""Enumerates SkAdNetwork user types"""
UNSPECIFIED = 0
UNKNOWN = 1
UNAVAILABLE = 2
NEW_INSTALLER = 3
REINSTALLER = 4
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "1acd4d8a43a364aa883cb17509e99f88",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6496478873239436,
"repo_name": "googleads/google-ads-python",
"id": "e5e9549f820a24fb53bff58553b1ee3fdbfbb0f7",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/enums/types/sk_ad_network_user_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
"""
test_synonym_inducers
----------------------------------
Tests for `synonym_inducers` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from wikipediabase import synonym_inducers as si
class TestSynonymInducers(unittest.TestCase):
def setUp(self):
pass
def test_reduce(self):
self.assertEqual(si.string_reduce("An awesome \"thing\""),
"awesome thing")
def test_forward_redirect(self):
fr = si.ForwardRedirectInducer()
self.assertIn('barack obama', fr.induce("Barack Hussein Obama"))
def test_lexical(self):
fl = si.LexicalInducer()
self.assertIn('awesome', fl.induce('awesome (singer)'))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a7c79e22c3de7d3a1866c062ee7cb6a6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 22.63888888888889,
"alnum_prop": 0.6073619631901841,
"repo_name": "fakedrake/WikipediaBase",
"id": "a64f2fa40d2f2f6cb4d2c035c3e49c11791e8614",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_synonym_inducers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "321"
},
{
"name": "Python",
"bytes": "156664"
}
],
"symlink_target": ""
}
|
"""Model setup for photos and albums."""
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import python_2_unicode_compatible
from sorl.thumbnail import ImageField
from taggit.managers import TaggableManager
PUBLISHED_STATUS = (
('PB', 'public'),
('PV', 'private'),
('SH', 'shared'),
)
@python_2_unicode_compatible
class ImagerPhoto(models.Model):
"""Photo models for Django imager app."""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name='photos')
photo = ImageField(upload_to='images')
published = models.CharField(
max_length=2,
choices=PUBLISHED_STATUS,
default='PV')
date_uploaded = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(blank=True, null=True)
description = models.TextField()
title = models.CharField(default='', max_length=50)
tags = TaggableManager(blank=True)
def __str__(self):
"""Represent."""
return "{}".format(self.title)
@python_2_unicode_compatible
class ImagerAlbum(models.Model):
"""Album models for Django imager app."""
user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name='albums')
title = models.CharField(default='', max_length=50)
photos = models.ManyToManyField(ImagerPhoto, blank=True,
default='', related_name='albums')
cover = models.ForeignKey(ImagerPhoto, blank=True, null=True,
related_name='+')
published = models.CharField(
max_length=2,
choices=PUBLISHED_STATUS,
default='PV')
date_uploaded = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(blank=True, null=True)
description = models.TextField()
tags = TaggableManager(blank=True)
def __str__(self):
"""Represent."""
return "{}".format(self.title)
|
{
"content_hash": "8eb40e050eb71f9d58dc7664ff351a26",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 33.921875,
"alnum_prop": 0.6282818977429756,
"repo_name": "cahudson94/django-imager",
"id": "d3296dba2effbecf558058b054df74e018e42021",
"size": "2171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49712"
},
{
"name": "HTML",
"bytes": "21382"
},
{
"name": "JavaScript",
"bytes": "97134"
},
{
"name": "Python",
"bytes": "75478"
}
],
"symlink_target": ""
}
|
class bcolors:
""" bcolors: Facilitates printing colors on terminals with support for
escape sequences. It was borrowed from the following stackoverflow answer:
<http://stackoverflow.com/a/287944>
"""
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
def get_term_size():
""" get_term_size: Returns a tuple of the host's terminal width and size
(in that order). This code should be platform independent and was borrowed
from the following stackoverflow answer:
<http://stackoverflow.com/a/566752>
"""
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def clear_terminal():
""" clear_terminal: Clears the terminal's window.
"""
import sys
sys.stdout.write( chr(27) + "[2J" )
|
{
"content_hash": "2ce842db151167fc5f4a9f0c9df50b60",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 28.59259259259259,
"alnum_prop": 0.5621761658031088,
"repo_name": "yamadapc/life",
"id": "38f6bf6325da1c0fd10135d8ca7fdcc09d89dde7",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6374"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from enum import Enum
import re
from .exceptions import ScanError
_COMMENT_TEST_RX = re.compile(r'^\s*#.*$')
_CTX_WS_RX = re.compile(r'\S')
_INT_RX = re.compile(r'-?\d+')
_WS_RX = re.compile(r'\s+')
class Keywords(object):
css_start = '$'
accessor_start = '|'
attr_accessor_start = '['
attr_accessor_end = ']'
text_accessor = 'text'
own_text_accessor = 'own_text'
field_accessor_start = '.'
statement_end = ';'
params_start = ':'
continuation = ','
regexp_delimiter = '`'
class KeywordSets(object):
query_start = Keywords.css_start + Keywords.accessor_start + Keywords.regexp_delimiter
css_query_end = Keywords.accessor_start + Keywords.statement_end
directive_id_end = Keywords.params_start + Keywords.statement_end
param_end = ' ' + Keywords.statement_end + Keywords.continuation
TokenType = Enum('TokenType', ' '.join((
'Context',
'QueryStatement',
'QueryStatementEnd',
'CSSSelector',
'TerseRegexp',
'VerboseRegexp',
'AccessorSequence',
'IndexAccessor',
'TextAccessor',
'OwnTextAccessor',
'AttrAccessor',
'FieldAccessor',
'DirectiveStatement',
'DirectiveStatementEnd',
'DirectiveIdentifier',
'DirectiveBodyItem',
'InlineSubContext'
)))
class Token(namedtuple('Token', 'type_ content line line_num start end')):
__slots__ = ()
def __repr__(self):
return ('Token: {{\n'
' type_: {!r}\n'
' content: "{}"\n'
' line num: {}\n'
' start, end: {}, {}\n'
' line: "{}"\n'
'}}').format(self.type_, self.content, self.line_num, self.start, self.end, self.line)
class Scanner(object):
def __init__(self, fobj):
self.fobj = fobj
self.line_gen = None
self.line = None
self.line_num = 0
self.start = 0
self.pos = 0
@property
def _c(self):
return self.line[self.pos]
@property
def _eol(self):
return self.pos >= len(self.line)
@property
def _to_eol_content(self):
return self.line[self.pos:]
@property
def _tok_content(self):
return self.line[self.start:self.pos]
def scan(self):
while bool(self._next_line()):
if _COMMENT_TEST_RX.match(self.line):
continue
scan_fn = self._scan_context
while scan_fn:
scan_fn, tok = scan_fn()
if tok:
yield tok
def _next_line(self):
if not self.line_gen:
self.line_gen = iter(self.fobj)
while True:
try:
self.line = next(self.line_gen).rstrip()
except StopIteration:
self.line_gen = None
self.line = None
self.start = self.pos -1
return None
self.start = self.pos = 0
self.line_num += 1
if self.line:
return self.line
def _ignore(self):
self.start = self.pos
def _make_token(self, type_):
tok = Token(type_,
self.line[self.start:self.pos],
self.line,
self.line_num,
self.start,
self.pos)
self.start = self.pos
return tok
def _make_marker_token(self, type_):
"""Make a token that has no content"""
tok = Token(type_,
'',
self.line,
self.line_num,
self.start,
self.start)
return tok
def _accept(self, valid=None, alpha=False, num=False, alphanum=False):
if self._eol:
return False
ok = False
if valid and self._c in valid:
ok = True
elif alpha and self._c.isalpha():
ok = True
elif num and self._c.isdigit():
ok = True
elif alphanum and self._c.isalnum():
ok = True
elif not valid and not alpha and not num and not alphanum:
raise ScanError.make(self, 'Invalid _accept call, valid or preset not specified')
if ok:
self.pos += 1
return ok
def _accept_run(self, valid=None, alpha=False, num=False, alphanum=False):
count = 0
tests = []
if valid:
tests.append(lambda: self._c in valid)
if alpha:
tests.append(lambda: self._c.isalpha())
if num:
tests.append(lambda: self._c.isdigit())
if alphanum:
tests.append(lambda: self._c.isalnum())
if not tests:
raise ScanError.make(self, 'Invalid _accept call, valid or preset not specified')
while not self._eol and any(t() for t in tests):
count += 1
self.pos += 1
return count
def _accept_until(self, one_of):
count = 0
while not self._eol and self._c not in one_of:
count += 1
self.pos += 1
return count
def _consume(self, val):
if self._eol:
return False
next_pos = self.pos + len(val)
if self.line[self.pos:next_pos] == val:
self.pos = next_pos
return True
return False
def _consume_re(self, test_re):
if self._eol:
return False
m = test_re.match(self._to_eol_content)
if not m:
return False
self.pos += m.end()
return True
def _scan_context(self):
m = _CTX_WS_RX.search(self.line)
if not m:
raise ScanError.make(self, 'Invalid state: error parsing context')
self.pos = m.start()
tok = self._make_token(TokenType.Context)
return self._scan_statement, tok
def _scan_statement(self):
if self._c in KeywordSets.query_start:
tok = self._make_marker_token(TokenType.QueryStatement)
return self._scan_query, tok
else:
tok = self._make_marker_token(TokenType.DirectiveStatement)
return self._scan_directive, tok
def _scan_directive(self):
# a directive without any text is valid, considered an alias to "save",
# use ":" as the directive ID
if self._accept(Keywords.params_start):
tok = self._make_token(TokenType.DirectiveIdentifier)
return self._scan_directive_body, tok
elif self._accept_until(KeywordSets.directive_id_end) < 1:
raise ScanError.make(self, 'Invalid directive, 0 length')
tok = self._make_token(TokenType.DirectiveIdentifier)
if self._eol or self._c == Keywords.statement_end:
return self._end_directive, tok
elif self._accept(Keywords.params_start):
self._ignore()
return self._scan_directive_body, tok
else:
raise ScanError.make(self, 'Invalid directive identifier terminator: '
'%r. Either %r, %r or end of the line required.' %
(self._to_eol_content,
Keywords.params_start,
Keywords.statement_end))
def _scan_directive_body(self):
self._accept_run(' \t')
self._ignore()
if self._eol or self._c == Keywords.statement_end:
return self._end_directive()
if self._c == Keywords.continuation:
self._accept(Keywords.continuation)
self._accept_run(' \t')
self._ignore()
if self._eol and not self._next_line():
raise ScanError.make(self, 'Unexpected EOF, directive parameter expected.')
self._consume_re(_WS_RX)
if self._accept_until(KeywordSets.param_end) < 1:
raise ScanError.make(self, 'Invalid directive body item, 0 length')
tok = self._make_token(TokenType.DirectiveBodyItem)
return self._scan_directive_body, tok
def _end_directive(self):
tok = self._make_marker_token(TokenType.DirectiveStatementEnd)
if self._eol:
return None, tok
elif self._c == Keywords.statement_end:
return self._scan_inline_sub_ctx, tok
else:
raise ScanError.make(self, 'Invalid end of directive statement: %r' % self._to_eol_content)
def _scan_query(self):
if self._c == Keywords.css_start:
return self._scan_css_selector()
elif self._c == Keywords.accessor_start:
return self._scan_accessor_sequence()
elif self._c == Keywords.regexp_delimiter:
return self._scan_regexp()
else:
raise ScanError.make(self, 'Invalid query statement: %r' % self._to_eol_content)
def _end_query(self):
tok = self._make_marker_token(TokenType.QueryStatementEnd)
if self._eol:
return None, tok
elif self._c == Keywords.statement_end:
return self._scan_inline_sub_ctx, tok
else:
raise ScanError.make(self, 'Invalid end of query statement: %r' % self._to_eol_content)
def _scan_css_selector(self):
self._accept(Keywords.css_start)
self._accept_run(' \t')
self._ignore()
if self._accept_until(KeywordSets.css_query_end) < 1:
raise ScanError.make(self, 'Invalid CSS Selector: %r' % self._to_eol_content)
tok = self._make_token(TokenType.CSSSelector)
if self._eol or self._c == Keywords.statement_end:
return self._end_query, tok
elif self._c == Keywords.accessor_start:
return self._scan_accessor_sequence, tok
else:
raise ScanError.make(self, 'EOL or accessor sequence expected, instead found %r' % self._to_eol_content)
def _scan_accessor_sequence(self):
# create the marker token at the start of the sequence
tok = self._make_marker_token(TokenType.AccessorSequence)
# skip the initial marker character
self._accept(Keywords.accessor_start)
self._ignore()
return self._scan_accessor, tok
def _scan_accessor(self):
self._accept_run(' \t')
self._ignore()
if self._eol or self._c == Keywords.statement_end:
return self._end_query()
# attribute accessor
elif self._c == Keywords.attr_accessor_start:
self._accept_until(Keywords.attr_accessor_end)
if not self._accept(Keywords.attr_accessor_end):
raise ScanError.make(self, 'Invalid Attr Accessor: %r' % self._to_eol_content)
tok = self._make_token(TokenType.AttrAccessor)
return self._scan_accessor, tok
# text accessor
elif self._consume(Keywords.text_accessor):
tok = self._make_token(TokenType.TextAccessor)
return self._scan_accessor, tok
# own text accessor
elif self._consume(Keywords.own_text_accessor):
tok = self._make_token(TokenType.OwnTextAccessor)
return self._scan_accessor, tok
# field accessor, ex: `| .field_name`
elif self._consume(Keywords.field_accessor_start):
if self._accept_until(' \t') < 1:
raise ScanError.make(self, 'Invalid field accessor, exepected field '
'name instead found: %r' % self._to_eol_content)
tok = self._make_token(TokenType.FieldAccessor)
return self._scan_accessor, tok
else:
# index accesor, test for a valid number, ie -?\d+
if not self._consume_re(_INT_RX):
raise ScanError.make(self, 'Expected an accessor, instead found: %r' % self._to_eol_content)
tok = self._make_token(TokenType.IndexAccessor)
return self._scan_accessor, tok
def _scan_regexp(self):
self._accept(Keywords.regexp_delimiter)
self._ignore()
if not self._accept(Keywords.regexp_delimiter):
return self._scan_terse_regexp()
else:
# is a "```" verbose regexp
if not self._accept(Keywords.regexp_delimiter):
raise ScanError.make(self, 'Invalid verbose regular expression,'
' found two "`" instead of three.')
self._ignore()
return self._scan_verbose_regexp()
def _scan_terse_regexp(self):
if self._accept_until(Keywords.regexp_delimiter) < 1:
raise ScanError.make(self, 'Invalid regexp literal: %r' %
self._to_eol_content)
if self._eol:
raise ScanError.make(self, 'Unexpected EOL, closing regexp literal '
'character ("`") expected.')
tok = self._make_token(TokenType.TerseRegexp)
self._accept(Keywords.regexp_delimiter)
self._accept_run(' \t')
self._ignore()
if self._eol or self._c == Keywords.statement_end:
return self._end_query, tok
else:
raise ScanError.make(self, 'Invalid end of regexp literal: %r' %
self._to_eol_content)
def _scan_verbose_regexp(self):
rx_parts = []
total_len = 0
done = False
# while stil searching for end ```
while True:
# for the entire line, search for ` then check for ```
while not self._eol:
total_len += self._accept_until(Keywords.regexp_delimiter)
if self._to_eol_content.startswith('```'):
# found it, so set to exit outer loop then exit inner loop
done = True
break
else:
# is actually just a "`" char in the regexp literal
self._accept('`')
rx_parts.append(self._tok_content)
if done:
break
else:
if not self._next_line():
raise ScanError.make(self, 'Unexpected EOF, closing regexp '
'literal sequence ("```") expected.')
# found the closing ```, but was there content between them?
if not total_len:
raise ScanError.make(self, 'Invalid regexp literal: %r' %
self._to_eol_content)
tok = Token(TokenType.VerboseRegexp,
'\n'.join(rx_parts),
self.line,
self.line_num,
-len(''.join(rx_parts)),
self.pos)
self.start = self.pos
self._accept('`')
self._accept('`')
self._accept('`')
self._accept_run(' \t')
self._ignore()
if self._eol or self._c == Keywords.statement_end:
return self._end_query, tok
else:
raise ScanError.make(self, 'Invalid end of regexp literal: %r' %
self._to_eol_content)
def _scan_inline_sub_ctx(self):
self._accept_run(Keywords.statement_end)
tok = self._make_token(TokenType.InlineSubContext)
self._accept_run(' \t')
self._ignore()
return self._scan_statement, tok
|
{
"content_hash": "badae6e2ae5a4553e87ec256ab7e6722",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 116,
"avg_line_length": 36.27764705882353,
"alnum_prop": 0.5407964716565054,
"repo_name": "tiffon/take",
"id": "297d70bb6d9cf22a744d14345169a3c88185bee8",
"size": "15418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "take/scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "952"
},
{
"name": "Python",
"bytes": "82639"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
class SlotPickleMixin(object):
# SlotPickelMixin is originally from:
# http://code.activestate.com/recipes/578433-mixin-for-pickling-objects-with-__slots__/
# Copyright (c) 2013 Created by Oren Tirosh
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
def __getstate__(self):
return {slot: getattr(self, slot) for slot in self.__slots__ if hasattr(self, slot)}
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
|
{
"content_hash": "4d0f9cbc0e5096512360b091a9fc0d42",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 51.18181818181818,
"alnum_prop": 0.6897572528123149,
"repo_name": "marcreyesph/scancode-toolkit",
"id": "703e50e94efb30d3ce55971e6a7a974852ca0bd2",
"size": "3047",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "src/commoncode/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "492"
},
{
"name": "Ada",
"bytes": "1251"
},
{
"name": "AppleScript",
"bytes": "168"
},
{
"name": "Assembly",
"bytes": "35437"
},
{
"name": "Awk",
"bytes": "248"
},
{
"name": "Batchfile",
"bytes": "5336"
},
{
"name": "C",
"bytes": "1679475"
},
{
"name": "C#",
"bytes": "5901"
},
{
"name": "C++",
"bytes": "357750"
},
{
"name": "CMake",
"bytes": "142"
},
{
"name": "CSS",
"bytes": "4684"
},
{
"name": "GAP",
"bytes": "579"
},
{
"name": "HTML",
"bytes": "3002101"
},
{
"name": "Inno Setup",
"bytes": "235"
},
{
"name": "Java",
"bytes": "169604"
},
{
"name": "JavaScript",
"bytes": "32733"
},
{
"name": "M4",
"bytes": "45516"
},
{
"name": "Makefile",
"bytes": "19150"
},
{
"name": "Matlab",
"bytes": "148"
},
{
"name": "Objective-C",
"bytes": "203172"
},
{
"name": "Objective-C++",
"bytes": "950"
},
{
"name": "PHP",
"bytes": "621154"
},
{
"name": "Perl",
"bytes": "279362"
},
{
"name": "PostScript",
"bytes": "562"
},
{
"name": "Protocol Buffer",
"bytes": "374"
},
{
"name": "Python",
"bytes": "3458894"
},
{
"name": "Ragel",
"bytes": "27606"
},
{
"name": "Roff",
"bytes": "209319"
},
{
"name": "Ruby",
"bytes": "164946"
},
{
"name": "Scala",
"bytes": "4500"
},
{
"name": "Shell",
"bytes": "1592320"
},
{
"name": "Smalltalk",
"bytes": "603"
},
{
"name": "TeX",
"bytes": "3126"
},
{
"name": "Vim script",
"bytes": "1129"
},
{
"name": "Visual Basic",
"bytes": "23"
},
{
"name": "XSLT",
"bytes": "474"
},
{
"name": "Yacc",
"bytes": "1497"
}
],
"symlink_target": ""
}
|
import types
import tempfile
from .utils import needs_uge
from .utils import generate_random_string
from .utils import create_config_file
from .utils import load_values
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
create_config_file()
API = QconfApi()
PROJECT_NAME = '%s' % generate_random_string(6)
CONFIG_MANAGER = ConfigManager.get_instance()
LOG_MANAGER = LogManager.get_instance()
VALUES_DICT = load_values('test_values.json')
print(VALUES_DICT)
@needs_uge
def test_object_not_found():
try:
project = API.get_prj('__non_existent_project__')
assert (False)
except ObjectNotFound as ex:
# ok
pass
def test_generate_prj():
project = API.generate_prj(PROJECT_NAME)
assert (project.data['name'] == PROJECT_NAME)
def test_add_prj():
try:
project_list = API.list_prjs()
except ObjectNotFound as ex:
# no projects defined
project_list = []
project = API.add_prj(name=PROJECT_NAME)
assert (project.data['name'] == PROJECT_NAME)
project_list2 = API.list_prjs()
assert (len(project_list2) == len(project_list) + 1)
assert (project_list2.count(PROJECT_NAME) == 1)
def test_list_prjs():
project_list = API.list_prjs()
assert (project_list is not None)
def test_object_already_exists():
try:
project = API.add_prj(name=PROJECT_NAME)
assert (False)
except ObjectAlreadyExists as ex:
# ok
pass
def test_get_prj():
project = API.get_prj(PROJECT_NAME)
assert (project.data['name'] == PROJECT_NAME)
def test_generate_prj_from_json():
prj = API.get_prj(PROJECT_NAME)
json = prj.to_json()
prj2 = API.generate_object(json)
assert (prj2.__class__.__name__ == prj.__class__.__name__)
for key in list(prj.data.keys()):
v = prj.data[key]
v2 = prj2.data[key]
if type(v) == list:
assert (len(v) == len(v2))
for s in v:
assert (v2.count(s) == 1)
elif type(v) == dict:
for key in list(v.keys()):
assert (str(v[key]) == str(v2[key]))
else:
assert (str(v) == str(v2))
def test_modify_prj():
project = API.get_prj(PROJECT_NAME)
oticket = project.data['oticket']
project = API.modify_prj(name=PROJECT_NAME, data={'oticket': oticket + 1})
oticket2 = project.data['oticket']
assert (oticket2 == oticket + 1)
def test_delete_prj():
project_list = API.list_prjs()
API.delete_prj(PROJECT_NAME)
try:
project_list2 = API.list_prjs()
except ObjectNotFound as ex:
# no projects defined
project_list2 = []
assert (len(project_list2) == len(project_list) - 1)
assert (project_list2.count(PROJECT_NAME) == 0)
def test_get_prjs():
prjl = API.list_prjs()
print("Project Names: " + prjl)
prjs = API.get_prjs()
print("Projects: " + str(prjs))
for prj in prjs:
print("#############################################")
print(prj.to_uge())
assert (prj.data['name'] in prjl)
def test_write_prjs():
try:
tdir = tempfile.mkdtemp()
print("*************************** " + tdir)
prjs = API.get_prjs()
for prj in prjs:
print("Before #############################################")
print(prj.to_uge())
new_prjs = []
prj_names = VALUES_DICT['prj_names']
for name in prj_names:
nprj = API.generate_prj(name=name)
new_prjs.append(nprj)
API.mk_prjs_dir(tdir)
API.write_prjs(new_prjs, tdir)
API.add_prjs_from_dir(tdir)
API.modify_prjs_from_dir(tdir)
prjs = API.get_prjs()
for prj in prjs:
print("After #############################################")
print(prj.to_uge())
prjs = API.list_prjs()
for name in prj_names:
assert (name in prjs)
print("project found: " + name)
finally:
API.delete_prjs(prj_names)
API.rm_prjs_dir(tdir)
def test_add_prjs():
try:
new_prjs = []
prj_names = VALUES_DICT['prj_names']
for name in prj_names:
nprj = API.generate_prj(name=name)
new_prjs.append(nprj)
# print all projects currently in the cluster
prjs = API.get_prjs()
for prj in prjs:
print("Before #############################################")
print(prj.to_uge())
# add projects
API.add_prjs(new_prjs)
API.modify_prjs(new_prjs)
# print all projects currently in the cluster
prjs = API.get_prjs()
for prj in prjs:
print("After #############################################")
print(prj.to_uge())
# check that projects have been added
prjs = API.list_prjs()
for name in prj_names:
assert (name in prjs)
print("project found: " + name)
finally:
API.delete_prjs(prj_names)
def test_modify_prjs():
try:
add_projects = []
prj_names = VALUES_DICT['prj_names']
# prj_names = ['tp1', 'tp2']
for name in prj_names:
nprj = API.generate_prj(name=name)
add_projects.append(nprj)
# print all projects currently in the cluster
print("Before #############################################")
prjs = API.get_prjs()
for prj in prjs:
print(prj.to_uge())
# add projects
API.add_prjs(add_projects)
# modify added projects
print("Before modify #############################################")
prjs = API.get_prjs()
for prj in prjs:
if prj.data['name'] in prj_names:
prj.data['oticket'] += 1
else:
print("project not found: " + prj.data['name'])
print(prj.to_uge())
API.modify_prjs(prjs)
# check that projects have been changed
print("After #############################################")
prjs = API.get_prjs()
for p in prjs:
print(p.to_uge())
if p.data['name'] in prj_names:
print("project found: " + p.data['name'] + " with oticket=" + str(p.data['oticket']))
# assert(p.data['oticket'] == 1)
finally:
API.delete_prjs(prj_names)
|
{
"content_hash": "75c2a2f7c1fa8be1a92619b1f7c95b08",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 101,
"avg_line_length": 29.18141592920354,
"alnum_prop": 0.534950720242608,
"repo_name": "gridengine/config-api",
"id": "560dfa2f92ada2d0011b9c77477b4709a600b368",
"size": "7437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7063"
},
{
"name": "Python",
"bytes": "660511"
}
],
"symlink_target": ""
}
|
"""
Hello world example client.
@see: U{HelloWorld<http://pyamf.org/wiki/HelloWorld>} wiki page.
@since: 0.1.0
"""
from pyamf.remoting.client import RemotingService
gateway = RemotingService('http://demo.pyamf.org/gateway/helloworld')
echo_service = gateway.getService('echo.echo')
print echo_service('Hello world!')
|
{
"content_hash": "ebc32a9cccbc106ac540243281195ecc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 69,
"avg_line_length": 21.533333333333335,
"alnum_prop": 0.7430340557275542,
"repo_name": "cardmagic/PyAMF",
"id": "9750954bbecefdae007018a58812d3dbc575876d",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/tutorials/examples/general/helloworld/python/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "87097"
},
{
"name": "C",
"bytes": "635399"
},
{
"name": "Java",
"bytes": "374"
},
{
"name": "Python",
"bytes": "955083"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.