text stringlengths 4 1.02M | meta dict |
|---|---|
import struct
from .errors import *
#
# Protocol-related constants
#
MOIRA_PORT = 775
MOIRA_PROTOCOL_VERSION = 2
MOIRA_PROTOCOL_CHALLENGE = "\0\0\0\066\0\0\0\004\001\001\001\001server_id\0parms\0host\0user\0\0\0\0\001\0\0\0\0\001\0\0\0\0\001\0\0\0\0\001\0" # You don't want to know why those two strings looks the way they look
MOIRA_PROTOCOL_RESPONSE = "\0\0\0\061\0\0\0\003\0\001\001disposition\0server_id\0parms\0\0\0\0\001\0\0\0\001\0\0\0\0\001\0" # You really don't. Not convinced? See mr_connect.c
MOIRA_CLIENT_IDSTRING = "PyMoira"
MOIRA_KERBEROS_SERVICE_NAME = "moira"
MOIRA_QUERY_VERSION = 14
MOIRA_MAX_LIST_DEPTH = 3072 # server/qsupport.pc, line 206
#
# Utility functions
#
def _fmt_u32(n):
return struct.pack("!I", n)
def _read_u32(s):
r, = struct.unpack("!I", s[0:4])
return r
#
# The following object represents a packet in Moira dialogue.
#
# Moira packet looks in the following way:
# 0) All numbers are in the network byte order
# 1) Packet header (16 bytes)
# - Message length, including header (4 bytes)
# - Version (4 bytes), have to be equal MOIRA_PROTOCOL_VERSION
# - Opcode (client) / status (server) (4 bytes)
# In general, will be one of the constants. However, it may also
# be a negative Kerberos error code or something else.
# - Amount of fields (4 bytes)
# 2) Fields, each has the following form:
# - Length, *without* padding (4 bytes)
# - Value, padded with zeroes to four-byte boundary. It is supposed to be
# zero-terminated string, and I hope it is so.
#
class Packet(object):
"""Represents a basic Moira packet, send either way (from client to server or from
server to client)."""
opcode = None
data = ()
# Either built or received
raw = None
def build(self):
"""Constructs a binary packet which may be sent to Moira server."""
# First construct the body
body = ""
for item in self.data:
item += "\0"
lenstr = _fmt_u32( len(item) )
while len(item) % 4 != 0: item += "\0"
body += lenstr
body += item
# Now that we know the length of the body, construct header
header = struct.pack("!IIiI",
16 + len(body), # Total length
MOIRA_PROTOCOL_VERSION, # Protocol version
self.opcode, # Operation
len(self.data) # Field count
)
self.raw = header + body
return self.raw
def parse(self, orig):
"""Parses the packet from the network."""
# Seperate header and body
length, version, status, argc = struct.unpack("!IIiI", orig[:16])
body = orig[16:]
# Sanity checks for the header
if length % 4 != 0:
raise ConnectionError("Malformed Moira package: the length is not a multiple of four")
if version != 2:
raise ConnectionError("Moira protocol version mismatch")
# argc is parsed as unsigned, hence argc is always >= 0
# Read fields and truncate the body as we read
fields = []
for i in range(0, argc):
if len(body) < 4:
raise ConnectionError("Moira protocol version mismatch")
field_len = _read_u32(body)
if field_len + 4 > len(body):
raise ConnectionError("")
body = body[4:]
if field_len % 4 == 0:
actual_len = field_len
else:
actual_len = field_len + (4 - field_len % 4)
field = body[:actual_len].rstrip("\0")
body = body[actual_len:]
fields.append(field)
if len(body) > 0:
raise ConnectionError("Moira has sent package with out-of-field information")
self.raw_len = length
self.opcode = status
self.data = tuple(fields)
self.raw = orig
| {
"content_hash": "19a9df43afb3a012498815c8dc9e45b8",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 214,
"avg_line_length": 33.19834710743802,
"alnum_prop": 0.5767986059248195,
"repo_name": "vasilvv/pymoira",
"id": "f116277a7cc67c65a554b1e69a1550b1c0691caa",
"size": "4146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymoira/protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47084"
}
],
"symlink_target": ""
} |
def extractEnkirostranslationsHomeBlog(item):
'''
Parser for 'enkirostranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('arafoo kenja', 'Arafoo Kenja no Isekai Seikatsu Nikki', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "ff50996e96d47e0d7cdcc35b9171e233",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.624813153961136,
"repo_name": "fake-name/ReadableWebProxy",
"id": "da1aea4ac72b55e3e07b4cff29a670873b7894a3",
"size": "669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractEnkirostranslationsHomeBlog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import filer.fields.image
import filer.fields.file
class Migration(migrations.Migration):
dependencies = [
('cms_lab_publications', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='publicationset',
options={'ordering': ('name',)},
),
migrations.AddField(
model_name='publication',
name='mini_citation',
field=models.CharField(blank=True, max_length=255, help_text='<strong>This field is auto-generated when a PubMed query is made.</strong><br>It is recommended to use this text when adding custom names for uploaded files. See examples below.', verbose_name='mini citation'),
preserve_default=True,
),
migrations.AlterField(
model_name='publication',
name='image',
field=filer.fields.image.FilerImageField(related_name='cms_lab_publications_publication_image', help_text="Upload/select a representative image or figure for this publication.<br>Recommended naming format: '[mini citation] - Figure X'.", to='filer.Image', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='publication',
name='pdf',
field=filer.fields.file.FilerFileField(related_name='cms_lab_publications_publication_pdf', help_text="Upload/select a PDF for this publication.<br>Recommended naming format: '[mini citation]'.", to='filer.File', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='publication',
name='supplemental_pdf',
field=filer.fields.file.FilerFileField(related_name='cms_lab_publications_publication_supplemental_pdf', help_text="Upload/select a supplemental PDF for this publication.<br>Recommended naming format: '[mini citation] - Supplement'.", to='filer.File', blank=True, null=True),
preserve_default=True,
),
]
| {
"content_hash": "453deb7cfb7496ee6ea6fd804c8c5781",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 291,
"avg_line_length": 48.83720930232558,
"alnum_prop": 0.6552380952380953,
"repo_name": "mfcovington/djangocms-lab-publications",
"id": "d02cbc38be8990dc0f0d20036654c5dd100e1cf4",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms_lab_publications/migrations/0002_auto_20150527_1148.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1826"
},
{
"name": "HTML",
"bytes": "7642"
},
{
"name": "JavaScript",
"bytes": "4293"
},
{
"name": "Python",
"bytes": "34629"
}
],
"symlink_target": ""
} |
import os
import sys
import copy
import time
import tarfile
import base64
sys.path.append('../../')
from externals.simple_oss import SimpleOss
from batchcompute import (
Client, JobDescription, TaskDag, TaskDescription, ResourceDescription,
)
import config as cfg
def upload_worker(bucket, local_dir, oss_path):
'''
A function to help upload worker package to oss.
'''
oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
local_tarfile = 'worker.tar.gz'
if os.path.exists(local_tarfile): os.remove(local_tarfile)
def do_tar(worker_dir, tar_file):
'''
A function to tar worker package.
'''
tar = tarfile.open(tar_file, 'w:gz')
cwd = os.getcwd()
os.chdir(worker_dir)
for root,dir,files in os.walk('.'):
for file in files:
tar.add(os.path.join(root, file))
os.chdir(cwd)
tar.close()
do_tar(local_dir, local_tarfile)
oss_clnt.upload(bucket, local_tarfile, oss_path)
def get_job_desc(package_path, verbose=True):
job_desc = JobDescription()
find_task = TaskDescription()
# Create find task.
find_task.PackageUri = package_path
find_task.ProgramArguments = ''
find_task.ProgramName = 'find_prime_worker.py'
find_task.ProgramType = 'python'
find_task.ImageId = cfg.IMAGE_ID
find_task.InstanceCount = 1
find_task.EnvironmentVariables = {}
find_task.StdoutRedirectPath = cfg.LOG_PATH
find_task.StderrRedirectPath = cfg.LOG_PATH
# Create task dag.
task_dag = TaskDag()
task_dag.add_task(task_name='Find', task=find_task)
# find prime job description.
job_desc.TaskDag = task_dag
job_desc.JobName = 'find-prime'
job_desc.Priority = 1
return job_desc
def main():
upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)
# Submit job to batch compute.
clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
job_json = get_job_desc(cfg.FULL_PACKAGE)
job = clnt.create_job(job_json)
t = 10
print('Sleep %s second, please wait.'%t)
time.sleep(t)
# Wait for jobs terminated.
while(True):
s = clnt.get_job(job)
if s.State in ['Waiting', 'Running']:
print('Job %s is now %s'%(job, s.State))
time.sleep(3)
continue
else:
# 'Failed', 'Stopped', 'Finished'
print('Job %s is now %s'%(job, s.State))
if s.State == 'Finished':
oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.OUTPUT_PATH)
# Print out all prime numbers from 0 to 10000.
print result.splitlines()
break
clnt.delete_job(job)
if __name__ == '__main__':
main()
| {
"content_hash": "79579e65c9e0eb93debd1c9b3aab852e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 29.1875,
"alnum_prop": 0.6102783725910065,
"repo_name": "luzhijun/Optimization",
"id": "e0bf678e83d8531ee8061d21eb3b2790dd55bf3a",
"size": "2802",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "cma-es/batchcompute_python_sdk/examples/find_prime.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11592"
},
{
"name": "C++",
"bytes": "41273"
},
{
"name": "CSS",
"bytes": "8912"
},
{
"name": "HTML",
"bytes": "845295"
},
{
"name": "JavaScript",
"bytes": "185036"
},
{
"name": "Jupyter Notebook",
"bytes": "1680887"
},
{
"name": "Makefile",
"bytes": "166"
},
{
"name": "Matlab",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "1912745"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ContainersOperations(object):
"""ContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2021_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_storage_account(
self,
device_name, # type: str
storage_account_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ContainerList"]
"""Lists all the containers of a storage Account in a Data Box Edge/Data Box Gateway device.
Lists all the containers of a storage Account in a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The storage Account name.
:type storage_account_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2021_02_01_preview.models.ContainerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContainerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ContainerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_storage_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers'} # type: ignore
def get(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Container"
"""Gets a container by name.
Gets a container by name.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The Storage Account Name.
:type storage_account_name: str
:param container_name: The container Name.
:type container_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Container, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.Container
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Container"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Container', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
container, # type: "_models.Container"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Container"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Container"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(container, 'Container')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Container', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
container, # type: "_models.Container"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Container"]
"""Creates a new container or updates an existing container on the device.
Creates a new container or updates an existing container on the device.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The Storage Account Name.
:type storage_account_name: str
:param container_name: The container name.
:type container_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param container: The container properties.
:type container: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.Container
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Container or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2021_02_01_preview.models.Container]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Container"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
storage_account_name=storage_account_name,
container_name=container_name,
resource_group_name=resource_group_name,
container=container,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Container', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the container on the Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The Storage Account Name.
:type storage_account_name: str
:param container_name: The container name.
:type container_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
storage_account_name=storage_account_name,
container_name=container_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}'} # type: ignore
def _refresh_initial(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self._refresh_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_refresh_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}/refresh'} # type: ignore
def begin_refresh(
self,
device_name, # type: str
storage_account_name, # type: str
container_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Refreshes the container metadata with the data from the cloud.
Refreshes the container metadata with the data from the cloud.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The Storage Account Name.
:type storage_account_name: str
:param container_name: The container name.
:type container_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._refresh_initial(
device_name=device_name,
storage_account_name=storage_account_name,
container_name=container_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_refresh.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}/containers/{containerName}/refresh'} # type: ignore
| {
"content_hash": "1ce62ff28105bef0f05ee183cca0d77a",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 262,
"avg_line_length": 49.873504273504274,
"alnum_prop": 0.6407663833287633,
"repo_name": "Azure/azure-sdk-for-python",
"id": "82bffe37fd93e305632795ae452b6f7f0e09bf88",
"size": "29643",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2021_02_01_preview/operations/_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import logging
import urllib
from homeassistant.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-telegram-bot==3.4']
def get_service(hass, config):
"""Get the Telegram notification service."""
import telegram
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'chat_id']},
_LOGGER):
return None
try:
bot = telegram.Bot(token=config[CONF_API_KEY])
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'.", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token.")
return None
return TelegramNotificationService(config[CONF_API_KEY], config['chat_id'])
# pylint: disable=too-few-public-methods
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, api_key, chat_id):
"""Initialize the service."""
import telegram
self._api_key = api_key
self._chat_id = chat_id
self.bot = telegram.Bot(token=self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import telegram
title = kwargs.get(ATTR_TITLE)
try:
self.bot.sendMessage(chat_id=self._chat_id,
text=title + " " + message)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message.")
| {
"content_hash": "2478359f0c51bfd92eeecf9efb077f28",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 30.338709677419356,
"alnum_prop": 0.6406166932482722,
"repo_name": "justyns/home-assistant",
"id": "b26306405c18f2fb61fd1feb475f0792e60ce151",
"size": "1881",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/telegram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1515430"
},
{
"name": "Python",
"bytes": "2051483"
},
{
"name": "Shell",
"bytes": "3674"
}
],
"symlink_target": ""
} |
"""Parallelization utility optimizer."""
from __future__ import absolute_import
__all__ = ['split_data', 'split_and_load', 'clip_global_norm',
'check_sha1', 'download']
import os
import sys
import hashlib
import uuid
import warnings
import collections
import weakref
import requests
import numpy as np
from .. import ndarray
from ..util import is_np_shape, is_np_array
from .. import numpy as _mx_np # pylint: disable=reimported
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = size // num_slice
# If size < num_slice, make fewer slices
if not even_split and size < num_slice:
step = 1
num_slice = size
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
if is_np_array():
slices = _mx_np.split(data, indices_or_sections=num_slice, axis=batch_axis)
else:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
if is_np_array():
indices = [step * i for i in range(1, num_slice)]
slices = _mx_np.split(data, indices_or_sections=indices, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray or ndarray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArrays or ndarrays
Each corresponds to a context in `ctx_list`.
"""
array_fn = _mx_np.array if is_np_array() else ndarray.array
if not isinstance(data, ndarray.NDArray):
data = array_fn(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
def clip_global_norm(arrays, max_norm, check_isfinite=True):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
Parameters
----------
arrays : list of NDArray
max_norm : float
check_isfinite : bool, default True
If True, check that the total_norm is finite (not nan or inf). This
requires a blocking .asscalar() call.
Returns
-------
NDArray or float
Total norm. Return type is NDArray of shape (1,) if check_isfinite is
False. Otherwise a float is returned.
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return ndarray.dot(x, x)
return array.norm().square()
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays])
total_norm = ndarray.sqrt(total_norm)
if check_isfinite:
if not np.isfinite(total_norm.asscalar()):
warnings.warn(
UserWarning('nan or inf is detected. '
'Clipping results will be undefined.'), stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
scale = ndarray.min(ndarray.concat(scale, ndarray.ones(1, ctx=ctx), dim=0))
for arr in arrays:
arr *= scale.as_in_context(arr.context)
if check_isfinite:
return total_norm.asscalar()
else:
return total_norm
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
if not sys.platform.startswith('win32'):
# refer to https://github.com/untitaker/python-atomicwrites
def _replace_atomic(src, dst):
"""Implement atomic os.replace with linux and OSX. Internal use only"""
try:
os.rename(src, dst)
except OSError:
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(
'Moving downloaded temp file - {}, to {} failed. \
Please retry the download.'.format(src, dst))
else:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
# Setting this value guarantees that a move performed as a copy
# and delete operation is flushed to disk before the function returns.
# The flush occurs at the end of the copy operation.
_MOVEFILE_WRITE_THROUGH = 0x8
_windows_default_flags = _MOVEFILE_WRITE_THROUGH
text_type = unicode if sys.version_info[0] == 2 else str # pylint: disable=undefined-variable
def _str_to_unicode(x):
"""Handle text decoding. Internal use only"""
if not isinstance(x, text_type):
return x.decode(sys.getfilesystemencoding())
return x
def _handle_errors(rv, src):
"""Handle WinError. Internal use only"""
if not rv:
msg = ctypes.FormatError(ctypes.GetLastError())
# if the MoveFileExW fails(e.g. fail to acquire file lock), removes the tempfile
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(msg)
def _replace_atomic(src, dst):
"""Implement atomic os.replace with windows.
refer to https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-movefileexw
The function fails when one of the process(copy, flush, delete) fails.
Internal use only"""
_handle_errors(ctypes.windll.kernel32.MoveFileExW(
_str_to_unicode(src), _str_to_unicode(dst),
_windows_default_flags | _MOVEFILE_REPLACE_EXISTING
), src)
def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
# atmoic operation in the same file system
_replace_atomic('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname
def _get_repo_url():
"""Return the base URL for Gluon dataset and model repository."""
default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url+'/'
return repo_url
def _get_repo_file_url(namespace, filename):
"""Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
"""
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(),
namespace=namespace,
filename=filename)
def _brief_print_list(lst, limit=7):
"""Print at most `limit` elements of list."""
lst = list(lst)
if len(lst) > limit:
return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \
_brief_print_list(lst[-limit//2:], limit)
return ', '.join(["'%s'"%str(i) for i in lst])
class HookHandle(object):
"""A handle that can attach/detach a hook."""
def __init__(self):
self._hooks_dict_ref = None
self._id = None
def attach(self, hooks_dict, hook):
assert not self._hooks_dict_ref, 'The same handle cannot be attached twice.'
self._id = id(hook)
hooks_dict[self._id] = hook
self._hooks_dict_ref = weakref.ref(hooks_dict)
def detach(self):
hooks_dict = self._hooks_dict_ref()
if hooks_dict is not None and self._id in hooks_dict:
del hooks_dict[self._id]
def __getstate__(self):
return (self._hooks_dict_ref(), self._id)
def __setstate__(self, state):
if state[0] is None:
self._hooks_dict_ref = weakref.ref(collections.OrderedDict())
else:
self._hooks_dict_ref = weakref.ref(state[0])
self._id = state[1]
def __enter__(self):
return self
def __exit__(self, ptype, value, trace):
self.detach()
def shape_is_known(shape):
"""Check whether a shape is completely known with or without np semantics.
Please see the doc of is_np_shape for more details.
"""
if shape is None:
return False
unknown_dim_size = -1 if is_np_shape() else 0
if len(shape) == 0:
return unknown_dim_size == -1
for dim_size in shape:
if dim_size == unknown_dim_size:
return False
assert dim_size > unknown_dim_size, "shape dimension size cannot be less than {}, while " \
"received {}".format(unknown_dim_size, dim_size)
return True
def _check_same_symbol_type(symbols):
"""Check whether all the symbols in the list are of the same type.
Raise type error if the types are different. Return the class of
the symbols."""
from ..symbol.numpy import _Symbol as np_symbol
from ..symbol import Symbol as nd_symbol
is_np_sym = isinstance(symbols[0], np_symbol)
for s in symbols[1:]:
if is_np_sym != isinstance(s, np_symbol):
raise TypeError('Found both classic symbol (mx.sym.Symbol) and numpy symbol '
'(mx.sym.np._Symbol) in outputs. This will prevent you from building '
'a computation graph by grouping them since different types of symbols '
'are not allowed to be grouped in Gluon to form a computation graph. '
'You will need to convert them to the same type of symbols, either '
'classic or numpy following this rule: if you want numpy ndarray '
'output(s) from the computation graph, please convert all the classic '
'symbols in the list to numpy symbols by calling `as_np_ndarray()` '
'on each of them; if you want classic ndarray output(s) from the '
'computation graph, please convert all the numpy symbols in the list '
'to classic symbols by calling `as_nd_ndarray()` on each of them.')
return np_symbol if is_np_sym else nd_symbol
def _check_all_np_ndarrays(out):
"""Check if ndarrays/symbols in out are all np.ndarray/np._Symbol."""
from ..numpy import ndarray as np_ndarray
from ..symbol.numpy import _Symbol as np_symbol
from ..symbol import Symbol as nd_symbol
from ..ndarray import NDArray as nd_ndarray
# pylint: disable=no-else-raise
if isinstance(out, (nd_ndarray, nd_symbol)) and not isinstance(out, (np_ndarray, np_symbol)):
raise TypeError("Block's output ndarrays/symbols must be of type `mxnet.numpy.ndarray`"
" or `mxnet.symbol.numpy._Symbol`, while got output type {}"
.format(str(type(out))))
elif isinstance(out, (list, tuple)):
for i in out:
_check_all_np_ndarrays(i)
# pylint: enable=no-else-raise
| {
"content_hash": "9ac99025e9cad11ca3c793cadbd3af34",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 108,
"avg_line_length": 37.20779220779221,
"alnum_prop": 0.5795229784758581,
"repo_name": "reminisce/mxnet",
"id": "81a8dbaa486bb06170e3bf2dde40c1cf24cec051",
"size": "18011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/gluon/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "215572"
},
{
"name": "C++",
"bytes": "7680259"
},
{
"name": "CMake",
"bytes": "99958"
},
{
"name": "Clojure",
"bytes": "622688"
},
{
"name": "Cuda",
"bytes": "970884"
},
{
"name": "Dockerfile",
"bytes": "85151"
},
{
"name": "Groovy",
"bytes": "122800"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "436326"
},
{
"name": "Jupyter Notebook",
"bytes": "3660387"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "201597"
},
{
"name": "Perl",
"bytes": "1550163"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "13786"
},
{
"name": "Python",
"bytes": "7842403"
},
{
"name": "R",
"bytes": "357807"
},
{
"name": "Scala",
"bytes": "1305036"
},
{
"name": "Shell",
"bytes": "427407"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
} |
'''
Given a table-oriented text data file, a descriptive XML file and an outputFile name
convert the text data file into a netCDF file.
Created on Feb 27, 2017
@author: cyoung
'''
import sys
from gov.noaa.gmd.table_2_netcdf.TableDataDesc import TableDataDesc
from gov.noaa.gmd.table_2_netcdf.TableDataSet import TableDataSet
from gov.noaa.gmd.table_2_netcdf.NetCDFWriter import NetCDFWriter
class Table2NetCDF :
def __init__ (self, inputFile, xmlFile, outputFile):
self.inputFile=inputFile
self.xmlFile=xmlFile
self.outputFile=outputFile
print ('inputFile ', self.inputFile)
print ('xmlFile ', self.xmlFile)
print ('outputFile ', self.outputFile)
def convert (self):
#Parse the XML file.
tableDataDesc=TableDataDesc(self.xmlFile)
tableDataDesc.parse()
#Parse the data file.
tableDataSet=TableDataSet(self.inputFile, tableDataDesc)
tableDataSet.parse()
#Write the netCDF file
netCDFWriter=NetCDFWriter(tableDataSet, self.outputFile)
netCDFWriter.write()
if __name__ == "__main__":
table2NetCDF=Table2NetCDF(sys.argv[1],sys.argv[2],sys.argv[3])
table2NetCDF.convert()
| {
"content_hash": "5769f0ff7091b89bea8b8e73ebc348b6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 84,
"avg_line_length": 32.89473684210526,
"alnum_prop": 0.6728,
"repo_name": "charles-g-young/Table2NetCDF",
"id": "31042d56e39aa7a4010186977af3908aecca4307",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gov/noaa/gmd/table_2_netcdf/Table2NetCDF.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22648"
}
],
"symlink_target": ""
} |
"""trydjango19 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^posts/', include("posts.urls", namespace='posts')),
#url(r'^posts/$', "<appname>.views.<function_name>"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | {
"content_hash": "3bf9e77109980b1afe4565b44c0423b5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 82,
"avg_line_length": 40.4,
"alnum_prop": 0.7112211221122112,
"repo_name": "codingforentrepreneurs/try-django-19",
"id": "c0237862eda5c33888ec7eebf79c5e2ef9ce1fde",
"size": "1212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/trydjango19/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45815"
},
{
"name": "HTML",
"bytes": "5416"
},
{
"name": "JavaScript",
"bytes": "88987"
},
{
"name": "Python",
"bytes": "18186"
}
],
"symlink_target": ""
} |
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
import grpc # type: ignore
from google.cloud.tasks_v2.types import cloudtasks
from google.cloud.tasks_v2.types import queue
from google.cloud.tasks_v2.types import queue as gct_queue
from google.cloud.tasks_v2.types import task
from google.cloud.tasks_v2.types import task as gct_task
from .base import DEFAULT_CLIENT_INFO, CloudTasksTransport
class CloudTasksGrpcTransport(CloudTasksTransport):
"""gRPC backend transport for CloudTasks.
Cloud Tasks allows developers to manage the execution of
background work in their applications.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def list_queues(
self,
) -> Callable[[cloudtasks.ListQueuesRequest], cloudtasks.ListQueuesResponse]:
r"""Return a callable for the list queues method over gRPC.
Lists queues.
Queues are returned in lexicographical order.
Returns:
Callable[[~.ListQueuesRequest],
~.ListQueuesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_queues" not in self._stubs:
self._stubs["list_queues"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ListQueues",
request_serializer=cloudtasks.ListQueuesRequest.serialize,
response_deserializer=cloudtasks.ListQueuesResponse.deserialize,
)
return self._stubs["list_queues"]
@property
def get_queue(self) -> Callable[[cloudtasks.GetQueueRequest], queue.Queue]:
r"""Return a callable for the get queue method over gRPC.
Gets a queue.
Returns:
Callable[[~.GetQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_queue" not in self._stubs:
self._stubs["get_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetQueue",
request_serializer=cloudtasks.GetQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["get_queue"]
@property
def create_queue(
self,
) -> Callable[[cloudtasks.CreateQueueRequest], gct_queue.Queue]:
r"""Return a callable for the create queue method over gRPC.
Creates a queue.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.CreateQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_queue" not in self._stubs:
self._stubs["create_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/CreateQueue",
request_serializer=cloudtasks.CreateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["create_queue"]
@property
def update_queue(
self,
) -> Callable[[cloudtasks.UpdateQueueRequest], gct_queue.Queue]:
r"""Return a callable for the update queue method over gRPC.
Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.UpdateQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_queue" not in self._stubs:
self._stubs["update_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/UpdateQueue",
request_serializer=cloudtasks.UpdateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["update_queue"]
@property
def delete_queue(
self,
) -> Callable[[cloudtasks.DeleteQueueRequest], empty_pb2.Empty]:
r"""Return a callable for the delete queue method over gRPC.
Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be
created for 7 days.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.DeleteQueueRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_queue" not in self._stubs:
self._stubs["delete_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/DeleteQueue",
request_serializer=cloudtasks.DeleteQueueRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_queue"]
@property
def purge_queue(self) -> Callable[[cloudtasks.PurgeQueueRequest], queue.Queue]:
r"""Return a callable for the purge queue method over gRPC.
Purges a queue by deleting all of its tasks.
All tasks created before this method is called are
permanently deleted.
Purge operations can take up to one minute to take
effect. Tasks might be dispatched before the purge takes
effect. A purge is irreversible.
Returns:
Callable[[~.PurgeQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "purge_queue" not in self._stubs:
self._stubs["purge_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/PurgeQueue",
request_serializer=cloudtasks.PurgeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["purge_queue"]
@property
def pause_queue(self) -> Callable[[cloudtasks.PauseQueueRequest], queue.Queue]:
r"""Return a callable for the pause queue method over gRPC.
Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2.CloudTasks.ResumeQueue].
Tasks can still be added when the queue is paused. A queue is
paused if its [state][google.cloud.tasks.v2.Queue.state] is
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED].
Returns:
Callable[[~.PauseQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pause_queue" not in self._stubs:
self._stubs["pause_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/PauseQueue",
request_serializer=cloudtasks.PauseQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["pause_queue"]
@property
def resume_queue(self) -> Callable[[cloudtasks.ResumeQueueRequest], queue.Queue]:
r"""Return a callable for the resume queue method over gRPC.
Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2.Queue.State.DISABLED]. The
state of a queue is stored in the queue's
[state][google.cloud.tasks.v2.Queue.state]; after calling this
method it will be set to
[RUNNING][google.cloud.tasks.v2.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can lead
to target overloading. If you are resuming high-QPS queues,
follow the 500/50/5 pattern described in `Managing Cloud Tasks
Scaling
Risks <https://cloud.google.com/tasks/docs/manage-cloud-task-scaling>`__.
Returns:
Callable[[~.ResumeQueueRequest],
~.Queue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resume_queue" not in self._stubs:
self._stubs["resume_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ResumeQueue",
request_serializer=cloudtasks.ResumeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["resume_queue"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a
[Queue][google.cloud.tasks.v2.Queue]. Returns an empty policy if
the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.getIamPolicy``
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy for a
[Queue][google.cloud.tasks.v2.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM
permissions yet. Project-level permissions are required to use
the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.setIamPolicy``
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on a
[Queue][google.cloud.tasks.v2.Queue]. If the resource does not
exist, this will return an empty set of permissions, not a
[NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def list_tasks(
self,
) -> Callable[[cloudtasks.ListTasksRequest], cloudtasks.ListTasksResponse]:
r"""Return a callable for the list tasks method over gRPC.
Lists the tasks in a queue.
By default, only the
[BASIC][google.cloud.tasks.v2.Task.View.BASIC] view is retrieved
due to performance considerations;
[response_view][google.cloud.tasks.v2.ListTasksRequest.response_view]
controls the subset of information which is returned.
The tasks may be returned in any order. The ordering may change
at any time.
Returns:
Callable[[~.ListTasksRequest],
~.ListTasksResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_tasks" not in self._stubs:
self._stubs["list_tasks"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ListTasks",
request_serializer=cloudtasks.ListTasksRequest.serialize,
response_deserializer=cloudtasks.ListTasksResponse.deserialize,
)
return self._stubs["list_tasks"]
@property
def get_task(self) -> Callable[[cloudtasks.GetTaskRequest], task.Task]:
r"""Return a callable for the get task method over gRPC.
Gets a task.
Returns:
Callable[[~.GetTaskRequest],
~.Task]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_task" not in self._stubs:
self._stubs["get_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetTask",
request_serializer=cloudtasks.GetTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["get_task"]
@property
def create_task(self) -> Callable[[cloudtasks.CreateTaskRequest], gct_task.Task]:
r"""Return a callable for the create task method over gRPC.
Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask
command.
- The maximum task size is 100KB.
Returns:
Callable[[~.CreateTaskRequest],
~.Task]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_task" not in self._stubs:
self._stubs["create_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/CreateTask",
request_serializer=cloudtasks.CreateTaskRequest.serialize,
response_deserializer=gct_task.Task.deserialize,
)
return self._stubs["create_task"]
@property
def delete_task(self) -> Callable[[cloudtasks.DeleteTaskRequest], empty_pb2.Empty]:
r"""Return a callable for the delete task method over gRPC.
Deletes a task.
A task can be deleted if it is scheduled or dispatched.
A task cannot be deleted if it has executed successfully
or permanently failed.
Returns:
Callable[[~.DeleteTaskRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_task" not in self._stubs:
self._stubs["delete_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/DeleteTask",
request_serializer=cloudtasks.DeleteTaskRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_task"]
@property
def run_task(self) -> Callable[[cloudtasks.RunTaskRequest], task.Task]:
r"""Return a callable for the run task method over gRPC.
Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task,
even if the task is already running, the queue has reached its
[RateLimits][google.cloud.tasks.v2.RateLimits] or is
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example, [RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] can
be used to retry a failed task after a fix has been made or to
manually force a task to be dispatched now.
The dispatched task is returned. That is, the task that is
returned contains the [status][Task.status] after the task is
dispatched but before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2.Task.schedule_time] will
be reset to the time that
[RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] was called
plus the retry delay specified in the queue's
[RetryConfig][google.cloud.tasks.v2.RetryConfig].
[RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
Returns:
Callable[[~.RunTaskRequest],
~.Task]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_task" not in self._stubs:
self._stubs["run_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/RunTask",
request_serializer=cloudtasks.RunTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["run_task"]
def close(self):
self.grpc_channel.close()
@property
def kind(self) -> str:
return "grpc"
__all__ = ("CloudTasksGrpcTransport",)
| {
"content_hash": "6b80f931c95c6dd5eb5a70952f0399f0",
"timestamp": "",
"source": "github",
"line_count": 767,
"max_line_length": 94,
"avg_line_length": 43.13689700130378,
"alnum_prop": 0.6174212657921779,
"repo_name": "googleapis/python-tasks",
"id": "107bb94b5e40116c1789f24ae7e3e2366dd5913e",
"size": "33686",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/tasks_v2/services/cloud_tasks/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1867840"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
from django.db import migrations
from django.db.models import Count, Q
from wagtail.core.models import Page as RealPage
def ancestor_of_q(page):
paths = [
page.path[0:pos]
for pos in range(0, len(page.path) + 1, page.steplen)[1:]
]
q = Q(path__in=paths)
return q
def create_default_workflows(apps, schema_editor):
# This will recreate the existing publish-permission based moderation setup in the new workflow system, by creating new workflows
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Workflow = apps.get_model('wagtailcore.Workflow')
GroupApprovalTask = apps.get_model('wagtailcore.GroupApprovalTask')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
WorkflowPage = apps.get_model('wagtailcore.WorkflowPage')
WorkflowTask = apps.get_model('wagtailcore.WorkflowTask')
Page = apps.get_model('wagtailcore.Page')
Group = apps.get_model('auth.Group')
# Get this from real page model just in case it has been overridden
Page.steplen = RealPage.steplen
# Create content type for GroupApprovalTask model
group_approval_content_type, __ = ContentType.objects.get_or_create(
model='groupapprovaltask', app_label='wagtailcore')
publish_permissions = GroupPagePermission.objects.filter(permission_type='publish')
for permission in publish_permissions:
# find groups with publish permission over this page or its ancestors (and therefore this page by descent)
page = permission.page
page = Page.objects.get(pk=page.pk)
ancestors = Page.objects.filter(ancestor_of_q(page))
ancestor_permissions = publish_permissions.filter(page__in=ancestors)
groups = Group.objects.filter(Q(page_permissions__in=ancestor_permissions) | Q(page_permissions__pk=permission.pk)).distinct()
# get a GroupApprovalTask with groups matching these publish permission groups (and no others)
task = GroupApprovalTask.objects.filter(groups__id__in=groups.all()).annotate(count=Count('groups')).filter(count=groups.count()).filter(active=True).first()
if not task:
# if no such task exists, create it
group_names = ' '.join([group.name for group in groups])
task = GroupApprovalTask.objects.create(
name=group_names + " approval",
content_type=group_approval_content_type,
active=True,
)
task.groups.set(groups)
# get a Workflow containing only this task if if exists, otherwise create it
workflow = Workflow.objects.annotate(task_number=Count('workflow_tasks')).filter(task_number=1).filter(workflow_tasks__task=task).filter(active=True).first()
if not workflow:
workflow = Workflow.objects.create(
name=task.name,
active=True
)
WorkflowTask.objects.create(
workflow=workflow,
task=task,
sort_order=0,
)
# if the workflow is not linked by a WorkflowPage to the permission's linked page, link it by creating a new WorkflowPage now
if not WorkflowPage.objects.filter(workflow=workflow, page=page).exists():
WorkflowPage.objects.create(
workflow=workflow,
page=page
)
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0047_add_workflow_models'),
]
operations = [
migrations.RunPython(create_default_workflows, migrations.RunPython.noop),
]
| {
"content_hash": "9dcea9f17aa69a14a736b03880c8e98a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 165,
"avg_line_length": 41.23863636363637,
"alnum_prop": 0.6643703499586663,
"repo_name": "jnns/wagtail",
"id": "73e8f1eb1d7857ba70ec115ebeacecf9d9843ffc",
"size": "3653",
"binary": false,
"copies": "8",
"ref": "refs/heads/patch-2",
"path": "wagtail/core/migrations/0048_add_default_workflows.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "150882"
},
{
"name": "HTML",
"bytes": "243839"
},
{
"name": "JavaScript",
"bytes": "87980"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1528233"
},
{
"name": "Shell",
"bytes": "7241"
}
],
"symlink_target": ""
} |
import os, sys, json, yaml
import optparse
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--output', dest='output', default=None, help='output JSON filename')
parser.add_option('--class-names', dest='class_names', help='YAML metadata for class names')
parser.add_option('--debug-commands', dest='debug_commands', help='YAML metadata for debug commands')
parser.add_option('--debug-errors', dest='debug_errors', help='YAML metadata for debug protocol error codes')
parser.add_option('--opcodes', dest='opcodes', help='YAML metadata for opcodes')
(opts, args) = parser.parse_args()
res = {}
def merge(fn):
with open(fn, 'rb') as f:
doc = yaml.load(f)
for k in doc.keys():
res[k] = doc[k]
merge(opts.class_names)
merge(opts.debug_commands)
merge(opts.debug_errors)
merge(opts.opcodes)
with open(opts.output, 'wb') as f:
f.write(json.dumps(res, indent=4) + '\n')
print('Wrote merged debugger metadata to ' + str(opts.output))
| {
"content_hash": "0471c04345727ffa30282d1fc37cc904",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 110,
"avg_line_length": 36.48148148148148,
"alnum_prop": 0.6873096446700507,
"repo_name": "xsmart/opencvr",
"id": "ba9b38e3df45304ef9988ca94d756591d5a9c1b9",
"size": "1091",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "3rdparty/civetweb/src/third_party/duktape-1.5.2/debugger/merge_debug_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22997"
},
{
"name": "C",
"bytes": "52278"
},
{
"name": "C++",
"bytes": "2433166"
},
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "JavaScript",
"bytes": "13654"
},
{
"name": "Makefile",
"bytes": "3705541"
},
{
"name": "Objective-C",
"bytes": "1821"
},
{
"name": "Prolog",
"bytes": "2697"
},
{
"name": "Protocol Buffer",
"bytes": "5494"
},
{
"name": "Python",
"bytes": "18441"
},
{
"name": "QMake",
"bytes": "12789"
},
{
"name": "Shell",
"bytes": "4704"
}
],
"symlink_target": ""
} |
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common import importutils
from nova.openstack.common import periodic_task
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.register_opts(cell_manager_opts, group='cells')
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the messaging module. The
MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.9'
def __init__(self, *args, **kwargs):
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop consumers cleanly.
self.driver.start_consumers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_update(self, ctxt, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
| {
"content_hash": "daa39c324b5182d52b66375b1214e3a9",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 78,
"avg_line_length": 43.777188328912466,
"alnum_prop": 0.5948254968492487,
"repo_name": "DirectXMan12/nova-hacking",
"id": "f776c542e4aebb0dfd3ee097fa590389039c1c4d",
"size": "17141",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/cells/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
} |
"""Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
import os
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
array_function_like_doc = (
"""like : array_like, optional
Reference object to allow the creation of arrays which are not
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
compatible with that passed in via this argument."""
)
def set_array_function_like_doc(public_api):
if public_api.__doc__ is not None:
public_api.__doc__ = public_api.__doc__.replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
)
return public_api
add_docstring(
implement_array_function,
"""
Implement a function with checks for __array_function__ overrides.
All arguments are required, and can only be passed by position.
Parameters
----------
implementation : function
Function that implements the operation on NumPy array without
overrides when called like ``implementation(*args, **kwargs)``.
public_api : function
Function exposed by NumPy's public API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
""")
# exposed for testing purposes; used internally by implement_array_function
add_docstring(
_get_implementing_args,
"""
Collect arguments on which to call __array_function__.
Parameters
----------
relevant_args : iterable of array-like
Iterable of possibly array-like arguments to check for
__array_function__ methods.
Returns
-------
Sequence of arguments with __array_function__ methods, in the order in
which they should be called.
""")
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
def verify_matching_signatures(implementation, dispatcher):
"""Verify that a dispatcher function has the right signature."""
implementation_spec = ArgSpec(*getargspec(implementation))
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
if (implementation_spec.args != dispatcher_spec.args or
implementation_spec.varargs != dispatcher_spec.varargs or
implementation_spec.keywords != dispatcher_spec.keywords or
(bool(implementation_spec.defaults) !=
bool(dispatcher_spec.defaults)) or
(implementation_spec.defaults is not None and
len(implementation_spec.defaults) !=
len(dispatcher_spec.defaults))):
raise RuntimeError('implementation and dispatcher for %s have '
'different function signatures' % implementation)
if implementation_spec.defaults is not None:
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
raise RuntimeError('dispatcher functions can only use None for '
'default argument values')
def set_module(module):
"""Decorator for overriding __module__ on a function or class.
Example usage::
@set_module('numpy')
def example():
pass
assert example.__module__ == 'numpy'
"""
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
See NEP-18 for example usage.
Parameters
----------
dispatcher : callable
Function that when called like ``dispatcher(*args, **kwargs)`` with
arguments from the NumPy function call returns an iterable of
array-like arguments to check for ``__array_function__``.
module : str, optional
__module__ attribute to set on new function, e.g., ``module='numpy'``.
By default, module is copied from the decorated function.
verify : bool, optional
If True, verify the that the signature of the dispatcher and decorated
function signatures match exactly: all required and optional arguments
should appear in order with the same names, but the default values for
all optional arguments should be ``None``. Only disable verification
if the dispatcher's signature needs to deviate for some particular
reason, e.g., because the function has a signature like
``func(*args, **kwargs)``.
docs_from_dispatcher : bool, optional
If True, copy docs from the dispatcher function onto the dispatched
function, rather than from the implementation. This is useful for
functions defined in C, which otherwise don't have docstrings.
Returns
-------
Function suitable for decorating the implementation of a NumPy function.
"""
if not ARRAY_FUNCTION_ENABLED:
def decorator(implementation):
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
if module is not None:
implementation.__module__ = module
return implementation
return decorator
def decorator(implementation):
if verify:
verify_matching_signatures(implementation, dispatcher)
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return implement_array_function(
implementation, public_api, relevant_args, args, kwargs)
public_api.__code__ = public_api.__code__.replace(
co_name=implementation.__name__,
co_filename='<__array_function__ internals>')
if module is not None:
public_api.__module__ = module
public_api._implementation = implementation
return public_api
return decorator
def array_function_from_dispatcher(
implementation, module=None, verify=True, docs_from_dispatcher=True):
"""Like array_function_dispatcher, but with function arguments flipped."""
def decorator(dispatcher):
return array_function_dispatch(
dispatcher, module, verify=verify,
docs_from_dispatcher=docs_from_dispatcher)(implementation)
return decorator
| {
"content_hash": "86d347cae2cfa2ac1b042da44d81aa3e",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 35.76960784313726,
"alnum_prop": 0.6516376593120461,
"repo_name": "seberg/numpy",
"id": "cb550152ebede227848a8b503bedc26d3fec331f",
"size": "7297",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "numpy/core/overrides.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5837467"
},
{
"name": "C++",
"bytes": "190416"
},
{
"name": "Cython",
"bytes": "146283"
},
{
"name": "D",
"bytes": "19"
},
{
"name": "Dockerfile",
"bytes": "5201"
},
{
"name": "Forth",
"bytes": "3787"
},
{
"name": "Fortran",
"bytes": "24042"
},
{
"name": "Makefile",
"bytes": "1697"
},
{
"name": "Python",
"bytes": "10325819"
},
{
"name": "Shell",
"bytes": "25419"
},
{
"name": "Smarty",
"bytes": "4104"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
"""Script for running all test files (except memory leaks tests)."""
import os
import sys
from psutil.tests import unittest
from psutil.tests import VERBOSITY
def get_suite():
HERE = os.path.abspath(os.path.dirname(__file__))
testmodules = [os.path.splitext(x)[0] for x in os.listdir(HERE)
if x.endswith('.py') and x.startswith('test_') and not
x.startswith('test_memory_leaks')]
suite = unittest.TestSuite()
for tm in testmodules:
# ...so that "make test" will print the full test paths
tm = "psutil.tests.%s" % tm
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(tm))
return suite
def main():
# run tests
result = unittest.TextTestRunner(verbosity=VERBOSITY).run(get_suite())
success = result.wasSuccessful()
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()
| {
"content_hash": "4c6eb7f321aeef75f27560910e275620",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 28.870967741935484,
"alnum_prop": 0.6435754189944134,
"repo_name": "jules185/IoT_Hackathon",
"id": "88bcd6208900eeb0563bc7ed93c07d06bc403796",
"size": "1079",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".homeassistant/deps/psutil/tests/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12396"
},
{
"name": "HTML",
"bytes": "1557"
},
{
"name": "JavaScript",
"bytes": "2843"
},
{
"name": "Python",
"bytes": "8347316"
}
],
"symlink_target": ""
} |
"""The Ironic Service API."""
import logging
import sys
from oslo.config import cfg
from six.moves import socketserver
from wsgiref import simple_server
from ironic.api import app
from ironic.common.i18n import _LI
from ironic.common import service as ironic_service
from ironic.openstack.common import log
CONF = cfg.CONF
class ThreadedSimpleServer(socketserver.ThreadingMixIn,
simple_server.WSGIServer):
"""A Mixin class to make the API service greenthread-able."""
pass
def main():
# Pase config file and command line options, then start logging
ironic_service.prepare_service(sys.argv)
# Build and start the WSGI app
host = CONF.api.host_ip
port = CONF.api.port
wsgi = simple_server.make_server(
host, port,
app.VersionSelectorApplication(),
server_class=ThreadedSimpleServer)
LOG = log.getLogger(__name__)
LOG.info(_LI("Serving on http://%(host)s:%(port)s"),
{'host': host, 'port': port})
LOG.info(_LI("Configuration:"))
CONF.log_opt_values(LOG, logging.INFO)
try:
wsgi.serve_forever()
except KeyboardInterrupt:
pass
| {
"content_hash": "af0f9d0706ce07c866b55dd699fc03ce",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 26.244444444444444,
"alnum_prop": 0.6689246401354784,
"repo_name": "debayanray/ironic_backup",
"id": "14c5dbdb3ec149c9f10f2acbe14cf6891a792e2e",
"size": "1866",
"binary": false,
"copies": "4",
"ref": "refs/heads/fix_for_bug_1418327_node_boot_mode",
"path": "ironic/cmd/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2208014"
}
],
"symlink_target": ""
} |
"""consent file validation results
Revision ID: e9a549d1882d
Revises: 6de7e8a83d66, 1db87855f77c
Create Date: 2021-06-09 16:35:26.683816
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.consent_file import ConsentSyncStatus, ConsentType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'e9a549d1882d'
down_revision = ('6de7e8a83d66', '1db87855f77c')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('consent_file',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('modified', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('participant_id', sa.Integer(), nullable=True),
sa.Column('type', rdr_service.model.utils.Enum(ConsentType), nullable=True),
sa.Column('file_exists', sa.Boolean(), nullable=True),
sa.Column('is_signature_valid', sa.Boolean(), nullable=True),
sa.Column('is_signing_date_valid', sa.Boolean(), nullable=True),
sa.Column('signature_str', sa.String(length=200), nullable=True),
sa.Column('is_signature_image', sa.Boolean(), nullable=True),
sa.Column('signing_date', sa.Date(), nullable=True),
sa.Column('other_errors', sa.String(length=200), nullable=True),
sa.Column('sync_status', rdr_service.model.utils.Enum(ConsentSyncStatus), nullable=True),
sa.ForeignKeyConstraint(['participant_id'], ['participant.participant_id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('consent_file')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"content_hash": "a62b83edfb49e1936bd75c0318a35295",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 125,
"avg_line_length": 38.94805194805195,
"alnum_prop": 0.735245081693898,
"repo_name": "all-of-us/raw-data-repository",
"id": "664c7c9a46682142fd46f539768074ed82accaf0",
"size": "2999",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/alembic/versions/e9a549d1882d_consent_file_validation_results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from .common import * | {
"content_hash": "49ff64c7550820cfccb80266786a46d8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 21,
"alnum_prop": 0.7619047619047619,
"repo_name": "abirafdirp/inventory",
"id": "1e2ec17cd85897f3c46e5026e75c1765f6d69db3",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/config/settings/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10398"
},
{
"name": "HTML",
"bytes": "50346"
},
{
"name": "JavaScript",
"bytes": "198706"
},
{
"name": "Python",
"bytes": "56912"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
"""The following code should emit a raising-non-exception.
Previously, it didn't, due to a bug in the check for bad-exception-context,
which prevented further checking on the Raise node.
"""
# pylint: disable=import-error, too-few-public-methods, useless-object-inheritance
from missing_module import missing
class Exc(object):
"""Not an actual exception."""
raise Exc from missing # [raising-non-exception]
| {
"content_hash": "4d1bf9c152777cb3079a64616f193210",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 32,
"alnum_prop": 0.7596153846153846,
"repo_name": "ruchee/vimrc",
"id": "7961c52206690ac50be57a2bcbcb745b3d89ef89",
"size": "416",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/functional/r/raise/raising_non_exception_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
__all__ = ["staff"]
# Selenium WebDriver
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
#from selenium.webdriver.common.keys import Keys
from gluon import current
from s3 import s3_debug
from tests import *
# -----------------------------------------------------------------------------
def staff():
""" Tests for Staff """
config = current.test_config
browser = config.browser
# Logout
logout()
# Open HRM module
url = "%s/hrm" % config.url
browser.get(url)
# Check no unauthenticated access
try:
elem = browser.find_element_by_xpath("//div[@class='error']")
except NoSuchElementException:
if "Staff" in browser.title:
assert 0, "HRM accessible to unauthenticated users!"
else:
raise RuntimeError
else:
s3_debug(elem.text)
# Login
login()
# Open HRM module
browser.get(url)
# Check authenticated access
if "Staff" not in browser.title:
assert 0, "HRM inaccessible to authenticated user!"
# Create a Staff member
_create()
# -----------------------------------------------------------------------------
def _create():
""" Create a Staff member """
config = current.test_config
browser = config.browser
# Login
login()
# Open Create form
url = "%s/hrm/human_resource/create?group=staff" % config.url
browser.get(url)
# Check authenticated access
try:
elem = browser.find_element_by_xpath("//div[@class='error']")
except NoSuchElementException:
# ok, continue
pass
else:
#assert 0, elem.text
assert 0, "Insufficient Privileges"
# Fill in sample data
# @ToDo
# END =========================================================================
| {
"content_hash": "6c5ad2814c7984de59f6e3bd55393f8c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 23.367088607594937,
"alnum_prop": 0.5471289274106176,
"repo_name": "flavour/cedarbluff",
"id": "748388e71cf7922fa476fb2bc7164aac9188afb7",
"size": "1846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tests/hrm/staff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9763403"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21560680"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
import numpy
import h5py
import matplotlib.pyplot as plt
from basemapInterp2D import basemapInterp2D
inImageFileName = '../syntheticCuspVortex/image001.h5'
outFolder= '.'
h5File = h5py.File(inImageFileName, 'r')
bounds = h5File["bounds"][...]
inImageData = h5File["data"][...]
inMask = numpy.array(h5File["mask"][...],float)
h5File.close()
inx = numpy.linspace(bounds[0],bounds[1],inImageData.shape[1])
iny = numpy.linspace(bounds[2],bounds[3],inImageData.shape[0])
(inX,inY) = numpy.meshgrid(inx,iny)
vx = 0.4*(inY-0.5)
vy = numpy.zeros(inImageData.shape)
vmax = numpy.amax(inImageData)
vmin = numpy.amin(inImageData)
ts = numpy.linspace(0.,1.,4)
for tIndex in range(len(ts)):
t = ts[tIndex]
# find the position backwards in time
outX = -vx*t+inX
outY = -vy*t+inY
maskValue = -1e8
outImageData = basemapInterp2D(inImageData,inx,iny,outX,outY,masked=maskValue, order=1)
outMask = basemapInterp2D(inMask,inx,iny,outX,outY,masked=maskValue,order=1)
outImageData[outImageData == maskValue] = 0.0
outMask = numpy.array(outMask > 0.99,numpy.uint8)
outImageData *= outMask
h5File = h5py.File('%s/image%03i.h5'%(outFolder,tIndex+1), 'w')
dataset = h5File.create_dataset("bounds", data=bounds)
dataset = h5File.create_dataset("data", data=outImageData)
dataset = h5File.create_dataset("mask", data=outMask)
dataset = h5File.create_dataset("time", data=numpy.array(t))
h5File.close()
plt.imsave('%s/image%03i.png'%(outFolder,tIndex+1), outImageData[::-1,:],
vmin=vmin, vmax=vmax, cmap='gray')
# fig = plt.figure(1)
# ax = fig.add_subplot(111)
# plt.imshow(inMask, extent=[bounds[0],bounds[1],bounds[3],bounds[2]], cmap='gray')
# ax.set_ylim(ax.get_ylim()[::-1])
# plt.axis('tight')
#
# fig = plt.figure(2)
# ax = fig.add_subplot(111)
# plt.imshow(outMask, extent=[bounds[0],bounds[1],bounds[3],bounds[2]], cmap='gray')
# ax.set_ylim(ax.get_ylim()[::-1])
# plt.axis('tight')
#
# plt.show()
| {
"content_hash": "fe5e1f19dd2679f1c7a218e4e2284d80",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 89,
"avg_line_length": 28.114285714285714,
"alnum_prop": 0.6808943089430894,
"repo_name": "gabyx/acciv",
"id": "651d61ca336cde28d7d18b9a68aed3eba699c8b1",
"size": "1986",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/syntheticShear/makeImages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1358"
},
{
"name": "C++",
"bytes": "286340"
},
{
"name": "CMake",
"bytes": "5926"
},
{
"name": "Makefile",
"bytes": "649"
},
{
"name": "Matlab",
"bytes": "17000"
},
{
"name": "Python",
"bytes": "60835"
},
{
"name": "Shell",
"bytes": "7525"
}
],
"symlink_target": ""
} |
from functools import update_wrapper
from django.http import Http404, HttpResponseRedirect
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import logout as auth_logout, REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin', app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
admin_class.validate(model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if LOGIN_FORM_KEY in request.POST and request.user.is_authenticated():
auth_logout(request)
if not self.has_permission(request):
if request.path == reverse('admin:logout',
current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut),
name='view_on_site'),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in six.iteritems(self._registry):
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_label': app_label,
'app_url': reverse('admin:app_list', kwargs={'app_label': app_label}, current_app=self.name),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
}
context.update(extra_context or {})
return TemplateResponse(request,self.index_template or
'admin/index.html', context,
current_app=self.name)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
}
context.update(extra_context or {})
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context, current_app=self.name)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| {
"content_hash": "9af229c9de5dc680c2b13dd00f276536",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 121,
"avg_line_length": 41.38274336283186,
"alnum_prop": 0.5609730018711574,
"repo_name": "edisonlz/fruit",
"id": "b7f643b7f46688abff39e1e98993face94f58d97",
"size": "18705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_project/base/site-packages/django/contrib/admin/sites.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1482"
},
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "C",
"bytes": "3085"
},
{
"name": "C++",
"bytes": "4823"
},
{
"name": "CSS",
"bytes": "660927"
},
{
"name": "DIGITAL Command Language",
"bytes": "27853"
},
{
"name": "GAP",
"bytes": "6045"
},
{
"name": "Go",
"bytes": "13616"
},
{
"name": "Groff",
"bytes": "7199"
},
{
"name": "HTML",
"bytes": "7678961"
},
{
"name": "Java",
"bytes": "208173"
},
{
"name": "JavaScript",
"bytes": "2626051"
},
{
"name": "Makefile",
"bytes": "16810"
},
{
"name": "Nginx",
"bytes": "19215"
},
{
"name": "PHP",
"bytes": "205978"
},
{
"name": "Perl",
"bytes": "27627"
},
{
"name": "Python",
"bytes": "15609476"
},
{
"name": "Shell",
"bytes": "13663"
},
{
"name": "TeX",
"bytes": "60714"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import time
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.core.resources import Execute
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
OK_MESSAGE = "Metastore OK - Hive command took {0:.3f}s"
CRITICAL_MESSAGE = "Metastore on {0} failed ({1})"
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
HIVE_METASTORE_URIS_KEY = '{{hive-site/hive.metastore.uris}}'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
PERCENT_WARNING = 200
PERCENT_CRITICAL = 200
# default keytab location
SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
# default smoke principal
SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
# default smoke user
SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
SMOKEUSER_DEFAULT = 'ambari-qa'
HIVE_CONF_DIR = '/usr/iop/current/hive-metastore/conf/conf.server'
HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
HIVE_BIN_DIR = '/usr/iop/current/hive-metastore/bin'
HIVE_BIN_DIR_LEGACY = '/usr/lib/hive/bin'
HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
HADOOPUSER_DEFAULT = 'hadoop'
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
if not HIVE_METASTORE_URIS_KEY in configurations:
return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
# defaults
smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
smokeuser = SMOKEUSER_DEFAULT
# check script params
if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
# check configurations last as they should always take precedence
if SMOKEUSER_PRINCIPAL_KEY in configurations:
smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
result_code = None
try:
if security_enabled:
if SMOKEUSER_KEYTAB_KEY in configurations:
smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
else:
kerberos_executable_search_paths = None
kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
Execute(kinitcmd, user=smokeuser,
path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
timeout=10)
if host_name is None:
host_name = socket.getfqdn()
for uri in metastore_uris:
if host_name in uri:
metastore_uri = uri
conf_dir = HIVE_CONF_DIR_LEGACY
bin_dir = HIVE_BIN_DIR_LEGACY
if os.path.exists(HIVE_CONF_DIR):
conf_dir = HIVE_CONF_DIR
bin_dir = HIVE_BIN_DIR
cmd = format("export HIVE_CONF_DIR='{conf_dir}' ; "
"hive --hiveconf hive.metastore.uris={metastore_uri}\
--hiveconf hive.metastore.client.connect.retry.delay=1s\
--hiveconf hive.metastore.failure.retries=1\
--hiveconf hive.metastore.connect.retries=1\
--hiveconf hive.metastore.client.socket.timeout=20s\
--hiveconf hive.execution.engine=mr -e 'show databases;'")
start_time = time.time()
try:
Execute(cmd, user=smokeuser,
path=["/bin/", "/usr/bin/", "/usr/sbin/", bin_dir],
timeout=60 )
total_time = time.time() - start_time
result_code = 'OK'
label = OK_MESSAGE.format(total_time)
except Exception, exception:
result_code = 'CRITICAL'
label = CRITICAL_MESSAGE.format(host_name, str(exception))
except Exception, e:
label = str(e)
result_code = 'UNKNOWN'
return ((result_code, [label])) | {
"content_hash": "6faee70fb535ab8f0ea4cdd99eeef284",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 95,
"avg_line_length": 35.85164835164835,
"alnum_prop": 0.724904214559387,
"repo_name": "alexryndin/ambari",
"id": "9d4d6a2c8ec9afc4b8ccd7505386b1818732bbc4",
"size": "6548",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_metastore.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import struct
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| {
"content_hash": "aad3d6d0a26201a6261ee3c2858ab1b4",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 90,
"avg_line_length": 28.812080536912752,
"alnum_prop": 0.6671325413463778,
"repo_name": "dagurval/bitcoinxt",
"id": "d73fcb9d5740a4b78a500d6383b19f3ccecd39c7",
"size": "4589",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/linearize/linearize-hashes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "717784"
},
{
"name": "C++",
"bytes": "4762139"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "180068"
},
{
"name": "Makefile",
"bytes": "103364"
},
{
"name": "Objective-C",
"bytes": "26456"
},
{
"name": "Objective-C++",
"bytes": "7234"
},
{
"name": "Python",
"bytes": "894288"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "18453"
},
{
"name": "Shell",
"bytes": "53938"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .constants import MSG_REMOVED
from .removed_message import RemovedMessage
from .server_message_parser import ServerMessageParser
__all__ = ['RemovedMessageParser']
class RemovedMessageParser(ServerMessageParser):
MESSAGE_TYPE = MSG_REMOVED
def parse(self, pod):
return RemovedMessage(pod['collection'], pod['id'])
| {
"content_hash": "29affa172e0b567790a3c8f66f30062f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 26.529411764705884,
"alnum_prop": 0.7516629711751663,
"repo_name": "foxdog-studios/pyddp",
"id": "bfcc184f305d7a510d9c98e0d3e07d4a42489023",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ddp/messages/server/removed_message_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "182982"
},
{
"name": "Shell",
"bytes": "3484"
}
],
"symlink_target": ""
} |
"""
calc.py
Implements a very simple calculator
"""
import math
def sum(a, b):
return a + b
def sub(a, b):
return a - b
def div(a, b):
if b == 0:
raise ValueError
return 0
return a / b
def exp(a, b):
return math.pow(a, b)
def fibonacci(i):
if i < 0:
raise ValueError
if i == 0:
return 0
if i == 1:
return 1
if i == 2:
return 1
else:
# Recursion to find the i-th value of fibonacci series by using the values
# that are computed for smaller inputs
return fibonacci(i-1) + fibonacci(i-2)
| {
"content_hash": "d6a9228e9cd13d0f55c4e656111f56e7",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 14.75609756097561,
"alnum_prop": 0.547107438016529,
"repo_name": "albarji/qatest",
"id": "cfaaca4c0f32bdb32547e7d635cb0fba41ae5890",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1035"
}
],
"symlink_target": ""
} |
from django_bulk_update.helper import bulk_update as bulk_update_helper
from django.core.management import BaseCommand
from casexml.apps.phone.models import SyncLogSQL, LOG_FORMAT_SIMPLIFIED, \
properly_wrap_sync_log
class Command(BaseCommand):
"""
Forces a 412 for a given user by creating bad state in the all synclogs
for the given user after the given date
"""
def add_arguments(self, parser):
parser.add_argument('user_id')
parser.add_argument('date')
def handle(self, user_id, date, **options):
# SQL
synclogs_sql = SyncLogSQL.objects.filter(
user_id=user_id,
date=date,
log_format=LOG_FORMAT_SIMPLIFIED
)
for synclog in synclogs_sql:
doc = properly_wrap_sync_log(synclog.doc)
doc.case_ids_on_phone = {'broken to force 412'}
synclog.doc = doc.to_json()
bulk_update_helper(synclogs_sql)
| {
"content_hash": "dca8d7f120bdcbbb945e7b65f6380917",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 32.86206896551724,
"alnum_prop": 0.6411332633788038,
"repo_name": "dimagi/commcare-hq",
"id": "c78e19e2df2a73f6395fbb06e88e9c600e848b4d",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/casexml/apps/phone/management/commands/invalidate_sync_heads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
import subprocess
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', type=str, default='')
parser.add_argument('--user_name', type=str, default='')
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--dataproc_version', type=str, default='')
parser.add_argument('--nb_user', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
spark_def_path = "/usr/lib/spark/conf/spark-defaults.conf"
os.system('touch /tmp/r_version')
r_ver = subprocess.check_output("R --version | awk '/version / {print $3}'", shell=True)
with open('/tmp/r_version', 'w') as outfile:
outfile.write(r_ver)
os.system('touch /tmp/python_version')
python_ver = subprocess.check_output("python3.5 -V 2>/dev/null | awk '{print $2}'", shell=True)
if python_ver != '':
with open('/tmp/python_version', 'w') as outfile:
outfile.write(python_ver)
else:
python_ver = subprocess.check_output("python3.4 -V 2>/dev/null | awk '{print $2}'", shell=True)
with open('/tmp/python_version', 'w') as outfile:
outfile.write(python_ver)
os.system('touch /tmp/spark_version')
spark_ver = subprocess.check_output("dpkg -l | grep spark-core | tr -s ' ' '-' | cut -f 4 -d '-'", shell=True)
with open('/tmp/spark_version', 'w') as outfile:
outfile.write(spark_ver)
os.system('touch /tmp/hadoop_version')
hadoop_ver = subprocess.check_output("dpkg -l | grep hadoop | head -n 1 | tr -s ' ' '-' | cut -f 3 -d '-'", shell=True)
with open('/tmp/hadoop_version', 'w') as outfile:
outfile.write(hadoop_ver)
os.system('/bin/tar -zhcvf /tmp/jars.tar.gz --no-recursion --absolute-names --ignore-failed-read /usr/lib/hadoop/* /usr/lib/hadoop/client/*')
os.system('/bin/tar -zhcvf /tmp/spark.tar.gz -C /usr/lib/ spark')
md5sum = subprocess.check_output('md5sum /tmp/jars.tar.gz', shell=True)
with open('/tmp/jars-checksum.chk', 'w') as outfile:
outfile.write(md5sum)
md5sum = subprocess.check_output('md5sum /tmp/spark.tar.gz', shell=True)
with open('/tmp/spark-checksum.chk', 'w') as outfile:
outfile.write(md5sum)
os.system('gsutil -m cp /etc/hive/conf/hive-site.xml gs://{0}/{1}/{2}/config/hive-site.xml'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /etc/hadoop/conf/* gs://{0}/{1}/{2}/config/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('sudo -u {0} hdfs dfs -mkdir /user/{0}'.format(args.nb_user))
os.system('sudo -u {0} hdfs dfs -chown -R {0}:{0} /user/{0}'.format(args.nb_user))
os.system('gsutil -m cp /tmp/jars.tar.gz gs://{0}/jars/{1}/'.format(args.bucket, args.dataproc_version))
os.system('gsutil -m cp /tmp/jars-checksum.chk gs://{0}/jars/{1}/'.format(args.bucket, args.dataproc_version))
os.system('gsutil -m cp {0} gs://{1}/{2}/{3}/'.format(spark_def_path, args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/python_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/spark_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/r_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/hadoop_version gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/spark.tar.gz gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
os.system('gsutil -m cp /tmp/spark-checksum.chk gs://{0}/{1}/{2}/'.format(args.bucket, args.user_name, args.cluster_name))
| {
"content_hash": "3bb55c025108a3aca793209bcf307104",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 151,
"avg_line_length": 59.44444444444444,
"alnum_prop": 0.642456608811749,
"repo_name": "epam/DLab",
"id": "ebf69905a3bdbac574d401eded9b205b063b7a5d",
"size": "4514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_jars_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81633"
},
{
"name": "HTML",
"bytes": "110323"
},
{
"name": "Java",
"bytes": "2473499"
},
{
"name": "Jupyter Notebook",
"bytes": "80955"
},
{
"name": "Python",
"bytes": "1861086"
},
{
"name": "R",
"bytes": "4894"
},
{
"name": "Ruby",
"bytes": "62731"
},
{
"name": "Shell",
"bytes": "18826"
},
{
"name": "TypeScript",
"bytes": "363308"
}
],
"symlink_target": ""
} |
import bpy
import os
import time
import struct
from mathutils import Vector
from subprocess import Popen, PIPE
from time import localtime, strftime
# Global constants
FORMAT_VERSION = 1
DEFAULT_PART_NAME = 'default'
PART_NAME_LENGTH = 32
def cutName(name, maxLength):
""" Cuts given name to be exactly maxLength long. """
""" Free space is filled with null terminators """
nameLength = len(name)
if nameLength > maxLength:
return name[0 : maxLength]
else:
return name + str('\0' * (maxLength - nameLength))
def getMeshObjects(objects):
""" Returns list of scene objects of type 'MESH' that don`t define collision """
meshOnly = []
for o in objects:
collision = o.get('rvDefinesCollision', False)
exclude = o.get('rvExclude', False)
if o.type == 'MESH' and not collision and not exclude:
meshOnly.append(o)
return meshOnly
def getDefiningObjects(objects):
meshOnly = []
for o in objects:
collision = o.get('rvDefinesCollision', False)
exclude = o.get('rvExclude', False)
if o.type == 'MESH' and collision and not exclude:
meshOnly.append(o)
return meshOnly
def saveVertexType4(fh, co, normal, uv, weights):
fh.write(struct.pack('=ffffffffffffffff', co.x, co.z, co.y, normal.x, normal.z, normal.y,
uv.x, uv.y, float(0.0), float(0.0), float(0.0), float(0.0), weights[0], weights[1],
weights[2], weights[3]))
def saveVertex(fh, obj, fnormal, useSmooth, uvLayer, vtx, vInx):
uv = Vector((0.0, 1.0))
normal = Vector((fnormal.x, fnormal.y, fnormal.z))
weights = (0.0, 0.0, 0.0, 0.0)
co = obj.location + vtx.co;
co.x = -co.x
if uvLayer != None:
uv.x = uvLayer.uv_raw[vInx * 2 + 0]
uv.y = -uvLayer.uv_raw[vInx * 2 + 1]
if useSmooth:
normal = vtx.normal
normal.x = -normal.x
saveVertexType4(fh, co, normal, uv, weights)
def analyzeMeshObject(obj, meshFaces):
global DEFAULT_PART_NAME
mesh = obj.data
parts = []
halfSize = obj.dimensions * 0.5
candidParts = []
centerOfMass = Vector((0.0, 0.0, 0.0))
trianglesCount = 0
meshVerticesCount = len(mesh.vertices)
meshMaterialCount = len(mesh.materials)
if meshMaterialCount > 0:
# Create parts. It is important to iterate it manually
# so material names order is preserved.
for i in range(meshMaterialCount):
candidParts.append({'name': mesh.materials[i].name, 'start': 0, 'count': 0})
else:
# If there are no materials defined, create default part placeholder.
candidParts.append({'name': DEFAULT_PART_NAME, 'start': 0, 'count': 0})
for f in meshFaces:
# Some faces can be quads - values have to doubled then.
modifier = 2 if len(f.vertices) == 4 else 1
candidParts[f.material_index]['count'] += 3 * modifier
trianglesCount += 1 * modifier
# Update part`s start attribute so they take other parts into account.
for i in range(0, len(candidParts)):
if i > 0:
candidParts[i]['start'] = candidParts[i - 1]['start'] + candidParts[i - 1]['count']
# Only export parts that have any triangles assigned.
for p in candidParts:
if p['count'] > 0:
parts.append(p)
centerMax = Vector((-9999.999, -9999.999, -9999.999))
centerMin = Vector(( 9999.999, 9999.999, 9999.999))
for v in mesh.vertices:
centerMax.x = max(centerMax.x, v.co.x)
centerMin.x = min(centerMin.x, v.co.x)
centerMax.y = max(centerMax.y, v.co.y)
centerMin.y = min(centerMin.y, v.co.y)
centerMax.z = max(centerMax.z, v.co.z)
centerMin.z = min(centerMin.z, v.co.z)
centerOfMass.x = abs(centerMax.x) - abs(centerMin.x)
centerOfMass.y = abs(centerMax.y) - abs(centerMin.y)
centerOfMass.z = abs(centerMax.z) - abs(centerMin.z)
centerOfMass *= 0.5
return centerOfMass, halfSize, trianglesCount, parts
def saveDefining(obj, fh):
mesh = obj.data
trianglesCount = 0
# Create tessfaces for this mesh so triangles can be exported
mesh.calc_tessface()
for tf in mesh.tessfaces:
trianglesCount += 1
# If face was a quad...
if len(tf.vertices) == 4:
trianglesCount += 1
fh.write(struct.pack('=II', len(mesh.vertices), trianglesCount))
for v in mesh.vertices:
co = obj.location + v.co
fh.write(struct.pack('=fff', -co.x, co.z, co.y))
for f in mesh.tessfaces:
fh.write(struct.pack('=III', f.vertices[0], f.vertices[1], f.vertices[2]))
# If face was a quad...
if len(f.vertices) == 4:
fh.write(struct.pack('=III', f.vertices[2], f.vertices[3], f.vertices[0]))
for f in mesh.tessfaces:
fh.write(struct.pack('=B', int(0)))
# If face was a quad...
if len(f.vertices) == 4:
fh.write(struct.pack('=B', int(0)))
def saveMesh(obj, fh):
mesh = obj.data
defining = getDefiningObjects(obj.children)
# Create tessfaces for this mesh so triangles can be exported
mesh.calc_tessface()
# Sort local copy of faces by material index (ascending)
meshFaces = sorted(mesh.tessfaces, key=lambda x: x.material_index, reverse=False)
centerOfMass, halfSize, trianglesCount, parts = analyzeMeshObject(obj, meshFaces)
# Save mesh header
fh.write(cutName(obj.name, 32).encode('ascii'))
fh.write(struct.pack('=III', len(defining), trianglesCount, len(parts)))
# Save parts information
for p in parts:
fh.write(cutName(p['name'], PART_NAME_LENGTH).encode('ascii'))
fh.write(struct.pack('=II', p['start'], p['count']))
# Save vertices. Split quads into triangles.
for f in meshFaces:
if len(mesh.tessface_uv_textures) > 0:
uvLayer = mesh.tessface_uv_textures[0].data[f.index]
else:
uvLayer = None
v0 = mesh.vertices[f.vertices[0]]
v1 = mesh.vertices[f.vertices[1]]
v2 = mesh.vertices[f.vertices[2]]
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v0, 0)
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v1, 1)
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v2, 2)
# If face was a quad...
if len(f.vertices) == 4:
v3 = mesh.vertices[f.vertices[3]]
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v2, 2)
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v3, 3)
saveVertex(fh, obj, f.normal, f.use_smooth, uvLayer, v0, 0)
# Save face smoothness information. Split quads into triangles.
for f in meshFaces:
fh.write(struct.pack('=B', f.use_smooth))
# If face was a quad...
if len(f.vertices) == 4:
fh.write(struct.pack('=B', f.use_smooth))
for o in defining:
saveDefining(o, fh)
def save(operator, context, maaFilePath, deleteRma):
global FORMAT_VERSION
startTime = time.clock()
objectsToExport = getMeshObjects(context.scene.objects)
rmaFilePath = os.path.splitext(maaFilePath)[0] + '.rma'
fh = open(rmaFilePath, 'wb')
fh.write('RMA\0'.encode('ascii'))
fh.write(struct.pack('=II', FORMAT_VERSION, len(objectsToExport)))
for o in objectsToExport:
saveMesh(o, fh)
fh.close()
# Execute processing on RBM file to get EMP file
executablePath = os.path.dirname(__file__) + '/processing/processing'
process = Popen([executablePath, rmaFilePath, maaFilePath], stdout=PIPE)
(log, err) = process.communicate()
processExitCode = process.wait()
if deleteRma:
os.remove(rmaFilePath)
finishMoment = strftime("%H:%M:%S", localtime())
duration = time.clock() - startTime
if processExitCode == 0:
print('[{0}] MAA file "{1}" exported in: {2:.2f} s'.format(finishMoment, maaFilePath,
duration))
else:
print('Error occured while exporting file "{0}". Log:{1}'.format(maaFilePath, log))
return {'FINISHED'}
| {
"content_hash": "9c669016938520d411ef80e1acda8212",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 95,
"avg_line_length": 31.58984375,
"alnum_prop": 0.617410659082478,
"repo_name": "creepydragon/revision1",
"id": "9b5aa37236cb07c5b970a9c429d942319c52710e",
"size": "8178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blender/addons/rv_io_mesh_maa/export_maa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "65252"
},
{
"name": "C++",
"bytes": "1256414"
},
{
"name": "Lex",
"bytes": "2552"
},
{
"name": "Python",
"bytes": "37861"
},
{
"name": "QMake",
"bytes": "30527"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
import re
import sys
import os
import os.path
import logging
from shutil import copy
from datetime import datetime
from mako.template import Template
from mako.lookup import TemplateLookup
#: Location within source directory to cache compiled mako modules
MAKO_MODULES_DIR = '.mako_modules'
#: Paths to ignore when building
IGNORE_PATTERNS = [
r'\.DS_Store$',
r'\./\.hgignore$',
r'\./\.gitignore$',
r'\.swp$',
r'^\./templates',
r'^\./\.hg',
r'^\./\.git',
r'^\./{0}'.format(MAKO_MODULES_DIR),
]
for i in range(len(IGNORE_PATTERNS)):
IGNORE_PATTERNS[i] = re.compile(IGNORE_PATTERNS[i])
logger = logging.getLogger(__package__)
class AttributeDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Project(object):
def __init__(self, src_root, build_root, **kwargs):
self.src_root = os.path.abspath(src_root)
self.build_root = os.path.abspath(build_root)
self.copy_flag = kwargs.get('copy')
self.lookup = TemplateLookup(
directories=[self.src_root],
module_directory=os.path.join(self.src_root, MAKO_MODULES_DIR),
output_encoding='utf-8',
input_encoding='utf-8',
)
# Make any python modules in src_root/templates/ available for import
sys.path.insert(0, os.path.join(self.src_root, 'templates'))
def get_template_metadata(self, root_relative_src_path):
"""Opens a template and collects its __meta__ module-level dictionary"""
# Skip non-templates
if os.path.splitext(root_relative_src_path)[1] not in ['.mako']:
return None
metadata = {'src_path': os.path.normpath(root_relative_src_path),
'build_path': os.path.normpath(root_relative_src_path).replace('.mako', ''),
}
template = Template(filename=root_relative_src_path,
output_encoding='utf-8',
input_encoding='utf-8')
metadata.update(getattr(template.module, '__meta__', {}))
return AttributeDict(metadata)
def build_template(self, root_relative_src_path):
"""Builds a single template"""
def relative_path(path):
"""Translates a root-relative path to a path relative to this file"""
backout_path = os.path.relpath('.', os.path.dirname(root_relative_src_path))
return os.path.normpath(os.path.join(backout_path, path))
# Absolute paths to source and build objects
src_path = os.path.normpath(os.path.join(self.src_root, root_relative_src_path))
build_path = os.path.normpath(os.path.join(self.build_root, root_relative_src_path))
if os.path.exists(build_path):
os.unlink(build_path)
extension = os.path.splitext(src_path)[1]
# Skip non-templates, and just link them to the source file
if extension not in ['.mako']:
if self.copy_flag:
logger.info("Copying %s" % root_relative_src_path)
copy(src_path, build_path)
else:
logger.info("Symlinking %s" % root_relative_src_path)
os.symlink(src_path, build_path)
return
# Determine output filename
build_path = build_path.replace('.mako', '')
# Render the template to the output path
logger.info("Rendering %s" % root_relative_src_path.replace('.mako', ''))
# Find this template's metadata
(md,) = [t for t in self.templates
if t['src_path'] == os.path.normpath(root_relative_src_path)]
with open(build_path, 'w') as fh:
mako_src = self.lookup.get_template(root_relative_src_path)
fh.write(mako_src.render(
relative_path=relative_path,
templates=self.templates,
articles=self.articles,
meta=md,
))
def _scan(self):
"""Collects metadata from all templates found in the source directory"""
logger.info("Scanning templates")
self.templates = []
os.chdir(self.src_root)
for path, dirs, files in os.walk('.'):
for d in list(dirs):
if any([ignore.search(os.path.join(path, d)) for ignore in IGNORE_PATTERNS]):
dirs.remove(d)
for f in files:
if not any([ignore.search(os.path.join(path, f)) for ignore in IGNORE_PATTERNS]):
md = self.get_template_metadata(os.path.join(path, f))
if md:
self.templates.append(md)
def _link_articles(self):
"""For all templates that have date metadata, sort them and link them back/forth"""
logger.info("Sorting templates")
# If 'date' is present, sort by it
def sort_by_date(template):
if 'date' not in template:
return datetime.strptime('1900-01-01', '%Y-%m-%d')
else:
return datetime.strptime(template['date'], '%Y-%m-%d')
self.articles = [t for t in self.templates if t.get('date')]
self.articles.sort(key=sort_by_date)
# Link them back/forth
previous = None
for t in self.articles:
if previous:
t['previous'] = previous
previous['next'] = t
previous = t
def build(self):
"""Recursively builds all Mako templates in the source directory,
and copies/symlinks all other files"""
self._scan()
self._link_articles()
if not os.path.exists(self.build_root):
os.mkdir(self.build_root)
os.chdir(self.src_root)
for path, dirs, files in os.walk('.'):
for d in list(dirs):
if any([ignore.search(os.path.join(path, d)) for ignore in IGNORE_PATTERNS]):
dirs.remove(d)
logger.debug("Ignoring {0}".format(os.path.join(path, d)))
elif not os.path.exists(os.path.join(self.build_root, path, d)):
os.mkdir(os.path.join(self.build_root, path, d))
for f in files:
if not any([ignore.search(os.path.join(path, f)) for ignore in IGNORE_PATTERNS]):
try:
self.build_template(os.path.join(path, f))
except Exception:
logger.exception('Exception in {0}'.format(os.path.join(path, f)))
else:
logger.debug("Ignoring {0}".format(os.path.join(path, f)))
logger.info("Build complete")
def clean(self):
"""Delete everything in the build directory"""
for root, dirs, files in os.walk(self.build_root, topdown=False):
for f in files:
os.remove(os.path.join(root, f))
for d in dirs:
os.rmdir(os.path.join(root, d))
logger.info("Clean complete")
| {
"content_hash": "3c0dce0dbb4097d02f8baa837678450e",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 97,
"avg_line_length": 35.231155778894475,
"alnum_prop": 0.5662530309513621,
"repo_name": "lukecyca/woodcut",
"id": "394a9e20d7c370e6c356434d67d198a979432351",
"size": "7030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woodcut/project.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10164"
}
],
"symlink_target": ""
} |
"""github_explorer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^explorer/', include('explorer.urls')),
url(r'^$', RedirectView.as_view(url='/explorer')),
]
| {
"content_hash": "4e6b21a2a97af88658f3e7d016f0b443",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.7006437768240343,
"repo_name": "navierula/DivHacks2017",
"id": "23d981045ca32efdecf5b9fbeec6f898c8ce3161",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github_explorer/github_explorer/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "6182"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Jupyter Notebook",
"bytes": "124606"
},
{
"name": "Python",
"bytes": "18353"
}
],
"symlink_target": ""
} |
def _check_for_script(data):
for key, value in data.items():
if value:
if isinstance(value, dict):
if _check_for_script(value):
return True
elif isinstance(value, str):
if "<script>" in value:
return True
elif isinstance(value, list):
for x in value:
if isinstance(x, str):
if "<script>" in value:
return True
return False
| {
"content_hash": "08e4ee540a7e6041bcdd5869bea4f5ac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 47,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.4392523364485981,
"repo_name": "DOAJ/doaj",
"id": "5e598fa3ebec8cacadbc9f4d53cdf172ec6b1f1f",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/api/current/data_objects/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
from openpnm.algorithms import ReactiveTransport
from openpnm.utils import logging, Docorator, GenericSettings
docstr = Docorator()
logger = logging.getLogger(__name__)
@docstr.get_sectionsf('NernstPlanckSettings', sections=['Parameters'])
@docstr.dedent
class NernstPlanckSettings(GenericSettings):
r"""
Parameters
----------
%(ReactiveTransportSettings.parameters)s
quantity : string (default = 'pore.concentration')
The quantity to solve for. Note that this will have the 'ion' name
appended to the end (i.e. ``'pore.concentration.Na'``)
conductance : string (default is 'throat.ad_dif_mig_conductance')
The conductance of the ion.
Other Parameters
----------------
s_scheme : string (default = 'exponential')
##
----
**The following parameters pertain to the ReactiveTransport class**
%(ReactiveTransportSettings.other_parameters)s
----
**The following parameters pertain to the GenericTransport class**
%(GenericTransportSettings.other_parameters)s
"""
ion = ''
quantity = 'pore.concentration'
conductance = 'throat.ad_dif_mig_conductance'
class NernstPlanck(ReactiveTransport):
r"""
A class to simulate transport of charged species (such as ions) in dilute
solutions.
"""
def __init__(self, ion, settings={}, **kwargs):
super().__init__(**kwargs)
# self.name = electrolyte # This interfers with component name
self.settings._update_settings_and_docs(NernstPlanckSettings())
self.settings.update(settings)
# Parse the given ion and append name to quantity and conductance
if ion:
if not type(ion) is str: # Convert ion object to str
ion = ion.name
self.settings['ion'] = ion
quantity = self.settings['quantity']
if not quantity.endswith(ion):
quantity = '.'.join(quantity.split('.')[:2])
quantity += ('.' + ion) # Re-add ion name
self.settings['quantity'] = quantity # Add full value to settings
conductance = self.settings['conductance']
if not conductance.endswith(ion):
conductance = '.'.join(conductance.split('.')[:2])
conductance += ('.' + ion) # Re-add ion name
self.settings['conductance'] = conductance
def setup(self, phase=None, quantity='', conductance='', ion='', **kwargs):
r"""
Parameters
----------
%(NernstPlanckSettings.parameters)s
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity # Add full value to settings
if conductance:
self.settings['conductance'] = conductance
if ion:
self.settings['quantity'] = quantity
super().setup(**kwargs)
| {
"content_hash": "562daa45f63aad42356c51028aa79aff",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 33.206896551724135,
"alnum_prop": 0.6147455867082036,
"repo_name": "TomTranter/OpenPNM",
"id": "817f68bc6877e4de980f7c33dc9a8afe0636f47b",
"size": "2889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openpnm/algorithms/NernstPlanck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "900998"
}
],
"symlink_target": ""
} |
"""Nsxv Mock Test terms for nsxv.py"""
INET_TERM="""\
term permit-mail-services {
destination-address:: MAIL_SERVERS
protocol:: tcp
destination-port:: MAIL_SERVICES
action:: accept
}
"""
INET6_TERM="""\
term test-icmpv6 {
protocol:: icmpv6
icmp-type:: echo-request echo-reply
action:: accept
}
"""
INET_FILTER= """\
header {
comment:: "Sample inet NSXV filter"
target:: nsxv inet
}
term allow-ntp-request {
comment::"Allow ntp request"
source-address:: NTP_SERVERS
source-port:: NTP
destination-address:: INTERNAL
destination-port:: NTP
protocol:: udp
action:: accept
}
"""
INET6_FILTER= """\
header {
comment:: "Sample inet6 NSXV filter"
target:: nsxv inet6
}
term test-icmpv6 {
#destination-address:: WEB_SERVERS
protocol:: icmpv6
icmp-type:: echo-request echo-reply
action:: accept
}
"""
MIXED_FILTER= """\
header {
comment:: "Sample mixed NSXV filter"
target:: nsxv mixed 1009
}
term accept-to-honestdns {
comment:: "Allow name resolution using honestdns."
destination-address:: GOOGLE_DNS
destination-port:: DNS
protocol:: udp
action:: accept
}
"""
POLICY= """\
header {
comment:: "Sample NSXV filter"
target:: nsxv inet 1007
}
term reject-imap-requests {
destination-address:: MAIL_SERVERS
destination-port:: IMAP
protocol:: tcp
action:: reject-with-tcp-rst
}
"""
POLICY_NO_SECTION_ID= """\
header {
comment:: "NSXV filter without section id"
target:: nsxv inet
}
term accept-icmp {
protocol:: icmp
action:: accept
}
"""
POLICY_NO_FILTERTYPE= """\
header {
comment:: "Sample NSXV filter"
target:: nsxv
}
term accept-icmp {
protocol:: icmp
action:: accept
}
"""
POLICY_INCORRECT_FILTERTYPE= """\
header {
comment:: "Sample NSXV filter"
target:: nsxv inet1
}
term accept-icmp {
protocol:: icmp
action:: accept
}
"""
POLICY_OPTION_KYWD= """\
header {
comment:: "Sample NSXV filter"
target:: nsxv inet 1009
}
term accept-bgp-replies {
comment:: "Allow inbound replies to BGP requests."
source-port:: BGP
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
| {
"content_hash": "abb43b27542a78fc4efee3b7f469817c",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 54,
"avg_line_length": 18.182539682539684,
"alnum_prop": 0.6137058053251855,
"repo_name": "sneakywombat/capirca",
"id": "df109d16ea56ff7bcf287a22789a673c0648359d",
"size": "2905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/lib/nsxv_mocktest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "736"
},
{
"name": "Python",
"bytes": "896398"
},
{
"name": "Shell",
"bytes": "909"
}
],
"symlink_target": ""
} |
import os
import tempfile
import time
from unittest import mock
from urllib.request import urlopen
import pytest
from requests.structures import CaseInsensitiveDict
from httpie.downloads import (
parse_content_range, filename_from_content_disposition, filename_from_url,
get_unique_filename, ContentRangeError, Downloader, PARTIAL_CONTENT
)
from .utils import http, MockEnvironment
class Response:
# noinspection PyDefaultArgument
def __init__(self, url, headers={}, status_code=200):
self.url = url
self.headers = CaseInsensitiveDict(headers)
self.status_code = status_code
class TestDownloadUtils:
def test_Content_Range_parsing(self):
parse = parse_content_range
assert parse('bytes 100-199/200', 100) == 200
assert parse('bytes 100-199/*', 100) == 200
# single byte
assert parse('bytes 100-100/*', 100) == 101
# missing
pytest.raises(ContentRangeError, parse, None, 100)
# syntax error
pytest.raises(ContentRangeError, parse, 'beers 100-199/*', 100)
# unexpected range
pytest.raises(ContentRangeError, parse, 'bytes 100-199/*', 99)
# invalid instance-length
pytest.raises(ContentRangeError, parse, 'bytes 100-199/199', 100)
# invalid byte-range-resp-spec
pytest.raises(ContentRangeError, parse, 'bytes 100-99/199', 100)
@pytest.mark.parametrize('header, expected_filename', [
('attachment; filename=hello-WORLD_123.txt', 'hello-WORLD_123.txt'),
('attachment; filename=".hello-WORLD_123.txt"', 'hello-WORLD_123.txt'),
('attachment; filename="white space.txt"', 'white space.txt'),
(r'attachment; filename="\"quotes\".txt"', '"quotes".txt'),
('attachment; filename=/etc/hosts', 'hosts'),
('attachment; filename=', None)
])
def test_Content_Disposition_parsing(self, header, expected_filename):
assert filename_from_content_disposition(header) == expected_filename
def test_filename_from_url(self):
assert 'foo.txt' == filename_from_url(
url='http://example.org/foo',
content_type='text/plain'
)
assert 'foo.html' == filename_from_url(
url='http://example.org/foo',
content_type='text/html; charset=UTF-8'
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type=None
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type='x-foo/bar'
)
@pytest.mark.parametrize(
'orig_name, unique_on_attempt, expected',
[
# Simple
('foo.bar', 0, 'foo.bar'),
('foo.bar', 1, 'foo.bar-1'),
('foo.bar', 10, 'foo.bar-10'),
# Trim
('A' * 20, 0, 'A' * 10),
('A' * 20, 1, 'A' * 8 + '-1'),
('A' * 20, 10, 'A' * 7 + '-10'),
# Trim before ext
('A' * 20 + '.txt', 0, 'A' * 6 + '.txt'),
('A' * 20 + '.txt', 1, 'A' * 4 + '.txt-1'),
# Trim at the end
('foo.' + 'A' * 20, 0, 'foo.' + 'A' * 6),
('foo.' + 'A' * 20, 1, 'foo.' + 'A' * 4 + '-1'),
('foo.' + 'A' * 20, 10, 'foo.' + 'A' * 3 + '-10'),
]
)
@mock.patch('httpie.downloads.get_filename_max_length')
def test_unique_filename(self, get_filename_max_length,
orig_name, unique_on_attempt,
expected):
def attempts(unique_on_attempt=0):
# noinspection PyUnresolvedReferences,PyUnusedLocal
def exists(filename):
if exists.attempt == unique_on_attempt:
return False
exists.attempt += 1
return True
exists.attempt = 0
return exists
get_filename_max_length.return_value = 10
actual = get_unique_filename(orig_name, attempts(unique_on_attempt))
assert expected == actual
class TestDownloads:
def test_actual_download(self, httpbin_both, httpbin):
robots_txt = '/robots.txt'
body = urlopen(httpbin + robots_txt).read().decode()
env = MockEnvironment(stdin_isatty=True, stdout_isatty=False)
r = http('--download', httpbin_both.url + robots_txt, env=env)
assert 'Downloading' in r.stderr
assert '[K' in r.stderr
assert 'Done' in r.stderr
assert body == r
def test_download_with_Content_Length(self, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(output_file=devnull, progress_file=devnull)
downloader.start(
initial_url='/',
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 10}
)
)
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
downloader.finish()
assert not downloader.interrupted
downloader._progress_reporter.join()
def test_download_no_Content_Length(self, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(output_file=devnull, progress_file=devnull)
downloader.start(
final_response=Response(url=httpbin_both.url + '/'),
initial_url='/'
)
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
downloader.finish()
assert not downloader.interrupted
downloader._progress_reporter.join()
def test_download_output_from_content_disposition(self, httpbin_both):
with tempfile.TemporaryDirectory() as tmp_dirname, open(os.devnull, 'w') as devnull:
orig_cwd = os.getcwd()
os.chdir(tmp_dirname)
try:
assert not os.path.isfile('filename.bin')
downloader = Downloader(progress_file=devnull)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={
'Content-Length': 5,
'Content-Disposition': 'attachment; filename="filename.bin"',
}
),
initial_url='/'
)
downloader.chunk_downloaded(b'12345')
downloader.finish()
downloader.failed() # Stop the reporter
assert not downloader.interrupted
downloader._progress_reporter.join()
# TODO: Auto-close the file in that case?
downloader._output_file.close()
assert os.path.isfile('filename.bin')
finally:
os.chdir(orig_cwd)
def test_download_interrupted(self, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(output_file=devnull, progress_file=devnull)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5}
),
initial_url='/'
)
downloader.chunk_downloaded(b'1234')
downloader.finish()
assert downloader.interrupted
downloader._progress_reporter.join()
def test_download_resumed(self, httpbin_both):
with tempfile.TemporaryDirectory() as tmp_dirname:
file = os.path.join(tmp_dirname, 'file.bin')
with open(file, 'a'):
pass
with open(os.devnull, 'w') as devnull, open(file, 'a+b') as output_file:
# Start and interrupt the transfer after 3 bytes written
downloader = Downloader(output_file=output_file, progress_file=devnull)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5}
),
initial_url='/'
)
downloader.chunk_downloaded(b'123')
downloader.finish()
downloader.failed()
assert downloader.interrupted
downloader._progress_reporter.join()
# Write bytes
with open(file, 'wb') as fh:
fh.write(b'123')
with open(os.devnull, 'w') as devnull, open(file, 'a+b') as output_file:
# Resume the transfer
downloader = Downloader(output_file=output_file, progress_file=devnull, resume=True)
# Ensure `pre_request()` is working as expected too
headers = {}
downloader.pre_request(headers)
assert headers['Accept-Encoding'] == 'identity'
assert headers['Range'] == 'bytes=3-'
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5, 'Content-Range': 'bytes 3-4/5'},
status_code=PARTIAL_CONTENT
),
initial_url='/'
)
downloader.chunk_downloaded(b'45')
downloader.finish()
downloader._progress_reporter.join()
def test_download_with_redirect_original_url_used_for_filename(self, httpbin):
# Redirect from `/redirect/1` to `/get`.
expected_filename = '1.json'
orig_cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmp_dirname:
os.chdir(tmp_dirname)
try:
assert os.listdir('.') == []
http('--download', httpbin.url + '/redirect/1')
assert os.listdir('.') == [expected_filename]
finally:
os.chdir(orig_cwd)
| {
"content_hash": "1f0d92f9aa3c48dcaea607eadd0d3814",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 100,
"avg_line_length": 38.007462686567166,
"alnum_prop": 0.5322010602788141,
"repo_name": "jakubroztocil/httpie",
"id": "9b6d38f980286bc89e8980dc15998733f3e14c15",
"size": "10186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_downloads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5084"
},
{
"name": "Python",
"bytes": "214825"
},
{
"name": "Ruby",
"bytes": "3083"
},
{
"name": "Shell",
"bytes": "3565"
}
],
"symlink_target": ""
} |
import sys
import gdb
import os
import os.path
pythondir = '/tmp/jenkins-GCC-6-build_build-toolchain-mac-143_20170216_1487268977/install-native/share/gcc-arm-none-eabi'
libdir = '/tmp/jenkins-GCC-6-build_build-toolchain-mac-143_20170216_1487268977/install-native/arm-none-eabi/lib/thumb/v6-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| {
"content_hash": "f56506ee244feee8eb13c897e5bd27bb",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 123,
"avg_line_length": 41.111111111111114,
"alnum_prop": 0.7135135135135136,
"repo_name": "ChangsoonKim/STM32F7DiscTutor",
"id": "cd8244ac373a66181a03f33057e917e3dd7a0a9b",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolchain/osx/gcc-arm-none-eabi-6-2017-q1-update/arm-none-eabi/lib/thumb/v6-m/libstdc++.a-gdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4483749"
},
{
"name": "C",
"bytes": "155831052"
},
{
"name": "C++",
"bytes": "14522753"
},
{
"name": "HTML",
"bytes": "22473152"
},
{
"name": "Logos",
"bytes": "9680"
},
{
"name": "Makefile",
"bytes": "25498"
},
{
"name": "Objective-C",
"bytes": "285838"
},
{
"name": "Python",
"bytes": "288546"
},
{
"name": "Roff",
"bytes": "2842557"
},
{
"name": "Shell",
"bytes": "20768"
},
{
"name": "XC",
"bytes": "9187"
},
{
"name": "XS",
"bytes": "9137"
}
],
"symlink_target": ""
} |
"""A factory-pattern class which returns models."""
import autoconverter
import change_gan
models_map = {
'autoconverter': autoconverter,
'change_gan': change_gan,
}
def get_model(model_name):
if model_name not in models_map:
raise ValueError('Name of dataset unknown %s' % model_name)
return models_map[model_name]
| {
"content_hash": "59d299d7cb07cedf0dcab8d6b73fa306",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.6947674418604651,
"repo_name": "shygiants/ChangeGAN",
"id": "28e2a2ff2a619b9d4505f6320d69af6af5ad0e06",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "change-gan/change-gan/models/models_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "551"
},
{
"name": "HTML",
"bytes": "1590"
},
{
"name": "JavaScript",
"bytes": "6910"
},
{
"name": "Jupyter Notebook",
"bytes": "360387"
},
{
"name": "Python",
"bytes": "100218"
},
{
"name": "Shell",
"bytes": "2318"
}
],
"symlink_target": ""
} |
"""
This module test for counterpartyd compability with Ethereum's Smart Contracts.
"""
"""
import os
import pytest
from pyethereum import tester
import serpent
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
pblogger = tester.pb.pblogger
pblogger.log_pre_state = True # dump storage at account before execution
pblogger.log_post_state = True # dump storage at account after execution
pblogger.log_block = False # dump block after TX was applied
pblogger.log_memory = False # dump memory before each op
pblogger.log_stack = True # dump stack before each op
pblogger.log_op = True # log op, gas, stack before each op
pblogger.log_apply_op = True # log op, gas, stack before each op
pblogger.log_json = False # generate machine readable output
gasprice = 0
startgas = 10000
"""
### Counterparty compatibility ###
import server
from counterpartylib.lib import (util, config, database)
from counterpartylib.lib.messages import execute
from counterpartylib.lib.messages.scriptlib import (blocks, rlp, processblock)
import subprocess # Serpent is Python 3‐incompatible.
import binascii
import os
import sys
import logging
logger = logging.getLogger(__name__)
import tempfile
CURR_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(CURR_DIR, '..')))
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
TIMESTAMP = 1410973349
tx_index = 1
class serpent(object):
def compile(code):
evmcode = subprocess.check_output(['serpent', 'compile', code])
evmcode = evmcode[:-1] # Strip newline.
return binascii.unhexlify(bytes(evmcode))
def encode_datalist(vals):
def enc(n):
if type(n) == int:
return n.to_bytes(32, byteorder='big')
elif type(n) == str and len(n) == 40:
return b'\x00' * 12 + binascii.unhexlify(n)
elif type(n) == str:
return b'\x00' * (32 - len(n)) + n.encode('utf-8') # TODO: ugly (and multi‐byte characters)
elif n is True:
return 1
elif n is False or n is None:
return 0
def henc(n):
return util.hexlify(enc(n))
if isinstance(vals, (tuple, list)):
return ''.join(map(henc, vals))
elif vals == '':
return b''
else:
assert False
# Assume you're getting in numbers or 0x...
# return ''.join(map(enc, list(map(numberize, vals.split(' ')))))
class tester(object):
gas_limit = 100000
class serpent(object):
def compile_lll(lll_code):
code = subprocess.check_output(['serpent', 'compile_lll', lll_code])
code = code[:-1] # Strip newline.
return binascii.unhexlify(bytes(code))
class state(object):
# N/A
def mine (self, n, coinbase):
global TIMESTAMP
TIMESTAMP += 50000
def snapshot(self):
cursor = db.cursor()
name = 'xyz'
cursor.execute('''SAVEPOINT {}'''.format(name))
return name
def revert(self, name):
cursor = db.cursor()
cursor.execute('''ROLLBACK TO SAVEPOINT {}'''.format(name))
def create_contract(self, code, endowment=0, sender=''):
if not sender:
sender = '82a978b3f5962a5b0957d9ee9eef472ee55b42f1' # PyEthereum uses ECDSA to derive this string from `sender = 0`.
util.credit(db, sender, config.XCP, max(endowment*2, 100000000), action='unit test', event='facefeed')
success, data = tester.state.do_send(self, sender, '', endowment, data=code)
contract_id = data
return contract_id
def evm(self, evmcode, endowment=0, sender=''):
# Publish code.
contract_id = tester.state.create_contract(self, evmcode, endowment=endowment, sender=sender)
# Return contract_id.
return contract_id
def contract(self, code, sender='', endowment=0):
# Compile fake code.
if code:
evmcode = serpent.compile(code)
else:
evmcode = b''
return tester.state.evm(self, evmcode, endowment=endowment, sender=sender)
def do_send (self, sender, to, value, data=[]):
global tx_index
if not sender:
sender = util.contract_sha3('foo'.encode('utf-8'))
# Construct `tx`.
tx = { 'source': sender,
'block_index': 0,
'tx_hash': '{}{}'.format(to, tx_index),
'block_time': TIMESTAMP,
'tx_index': tx_index
}
tx_index += 1
tx_obj = execute.Transaction(tx, to, 1, tester.gas_limit, value, data)
# Force block init.
def fake_block_init(self, db):
self.db = db
self.timestamp = TIMESTAMP
self.number = 9001
self.prevhash = 'facefeed'
self.difficulty = 1337
return
blocks.Block.__init__ = fake_block_init
block_obj = blocks.Block(db)
# Run.
processblock.MULTIPLIER_CONSTANT_FACTOR = 1
success, output, gas_remained = processblock.apply_transaction(db, tx_obj, block_obj)
# Decode, return result.
return success, output
def send (self, sender, to, value, data=[]):
# print('tuple', sender, to, value, data)
# Encode data.
data = serpent.encode_datalist(data)
data = binascii.unhexlify(data)
# Execute contract.
# print('qux', data, type(data))
util.credit(db, sender, config.XCP, value + 100000000, action='unit test', event='facefeed')
success, output = tester.state.do_send(self, sender, to, value, data=data)
if output:
return rlp.decode_datalist(bytes(output))
else:
return []
class block(object):
def to_dict():
return True # NOTE: Not applicable.
def set_code(contract_id, code):
cursor = db.cursor()
bindings = {'block_index': 0, 'code': code, 'contract_id': contract_id}
sql='''update contracts set code = :code where contract_id = :contract_id'''
cursor.execute(sql, bindings)
cursor.close()
def get_storage_data(contract_id, key):
block = blocks.Block(db)
return block.get_storage_data(contract_id, key)
def get_balance(address):
block = blocks.Block(db)
return block.get_balance(address)
tester.k0 = '82a978b3f5962a5b0957d9ee9eef472ee55b42f1'
tester.k1 = '7d577a597b2742b498cb5cf0c26cdcd726d39e6e'
tester.k2 = '82a978b3f5962a5b0957d9ee9eef472ee55b42f1'
tester.a0 = '82a978b3f5962a5b0957d9ee9eef472ee55b42f1'
tester.a1 = 'dceceaf3fc5c0a63d195d69b1a90011b7b19650d'
tester.a2 = 'dceceaf3fc5c0a63d195d69b1a90011b7b19650d'
tester.a3 = '598443f1880ef585b21f1d7585bd0577402861e5'
def setup_function(function):
server.initialise(database_file=tempfile.gettempdir()+'/counterpartyd.unittest.db',
rpc_port=9999, rpc_password='pass',
backend_password='pass',
testnet=True, testcoin=False)
try:
os.remove(config.DATABASE)
except:
pass
# Connect to database.
global db
db = database.get_connection(read_only=False, foreign_keys=False)
from counterpartylib.lib import blocks
blocks.initialise(db)
def teardown_function(function):
global db
del db
os.remove(config.DATABASE)
### Counterparty compatibility ###
# Test EVM contracts
serpent_code = 'return(msg.data[0] ^ msg.data[1])'
evm_code = serpent.compile(serpent_code)
def test_evm():
s = tester.state()
c = s.evm(evm_code)
o = s.send(tester.k0, c, 0, [2, 5])
assert o == [32]
# Test serpent compilation of variables using _with_, doing a simple
# arithmetic calculation 20 * 30 + 10 = 610
sixten_code =\
'''
(with 'x 10
(with 'y 20
(with 'z 30
(seq
(set 'a (add (mul (get 'y) (get 'z)) (get 'x)))
(return (ref 'a) 32)
)
)
)
)
'''
def test_sixten():
s = tester.state()
c = s.contract('')
s.block.set_code(c, tester.serpent.compile_lll(sixten_code))
o1 = s.send(tester.k0, c, 0, [])
assert o1 == [610]
# Test Serpent's import mechanism
mul2_code = \
'''
return(msg.data[0]*2)
'''
filename = "mul2_qwertyuioplkjhgfdsa.se"
returnten_code = \
'''
x = create("%s")
return(call(x, 5))
''' % filename
def test_returnten():
s = tester.state()
open(filename, 'w').write(mul2_code)
c = s.contract(returnten_code)
o1 = s.send(tester.k0, c, 0, [])
os.remove(filename)
assert o1 == [10]
# Test a simple namecoin implementation
namecoin_code =\
'''
if !contract.storage[msg.data[0]]:
contract.storage[msg.data[0]] = msg.data[1]
return(1)
else:
return(0)
'''
def test_namecoin():
s = tester.state()
c = s.contract(namecoin_code)
o1 = s.send(tester.k0, c, 0, ['"george"', 45])
assert o1 == [1]
o2 = s.send(tester.k0, c, 0, ['"george"', 20])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, ['"harry"', 60])
assert o3 == [1]
assert s.block.to_dict()
# Test a simple currency implementation
currency_code = '''
init:
contract.storage[msg.sender] = 1000
code:
if msg.datasize == 1:
addr = msg.data[0]
return(contract.storage[addr])
else:
from = msg.sender
fromvalue = contract.storage[from]
to = msg.data[0]
value = msg.data[1]
if fromvalue >= value:
contract.storage[from] = fromvalue - value
contract.storage[to] = contract.storage[to] + value
return(1)
else:
return(0)
'''
def test_currency():
s = tester.state()
c = s.contract(currency_code, sender=tester.k0)
o1 = s.send(tester.k0, c, 0, [tester.a2, 200])
assert o1 == [1]
o2 = s.send(tester.k0, c, 0, [tester.a2, 900])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, [tester.a0])
assert o3 == [800]
o4 = s.send(tester.k0, c, 0, [tester.a2])
assert o4 == [200]
# Test a data feed
data_feed_code = '''
if !contract.storage[1000]:
contract.storage[1000] = 1
contract.storage[1001] = msg.sender
return(20)
elif msg.datasize == 2:
if msg.sender == contract.storage[1001]:
contract.storage[msg.data[0]] = msg.data[1]
return(1)
else:
return(0)
else:
return(contract.storage[msg.data[0]])
'''
def test_data_feeds():
s = tester.state()
c = s.contract(data_feed_code, sender=tester.k0)
o1 = s.send(tester.k0, c, 0)
assert o1 == [20]
o2 = s.send(tester.k0, c, 0, [500])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, [500, 19])
assert o3 == [1]
o4 = s.send(tester.k0, c, 0, [500])
assert o4 == [19]
o5 = s.send(tester.k1, c, 0, [500, 726])
assert o5 == [0]
o6 = s.send(tester.k0, c, 0, [500, 726])
assert o6 == [1]
return s, c
# Test an example hedging contract, using the data feed. This tests
# contracts calling other contracts
hedge_code = '''
if !contract.storage[1000]:
contract.storage[1000] = msg.sender
contract.storage[1002] = msg.value
contract.storage[1003] = msg.data[0]
contract.storage[1004] = msg.data[1]
return(1)
elif !contract.storage[1001]:
ethvalue = contract.storage[1002]
if msg.value >= ethvalue:
contract.storage[1001] = msg.sender
c = call(contract.storage[1003],[contract.storage[1004]],1)
othervalue = ethvalue * c
contract.storage[1005] = othervalue
contract.storage[1006] = block.timestamp + 500
return([2,othervalue],2)
else:
othervalue = contract.storage[1005]
ethvalue = othervalue / call(contract.storage[1003],contract.storage[1004])
if ethvalue >= contract.balance:
send(contract.storage[1000],contract.balance)
return(3)
elif block.timestamp > contract.storage[1006]:
send(contract.storage[1001],contract.balance - ethvalue)
send(contract.storage[1000],ethvalue)
return(4)
else:
return(5)
'''
def test_hedge():
s, c = test_data_feeds()
c2 = s.contract(hedge_code, sender=tester.k0)
# Have the first party register, sending 10^16 wei and
# asking for a hedge using currency code 500
o1 = s.send(tester.k0, c2, 10**16, [c, 500])
assert o1 == [1]
# Have the second party register. It should receive the
# amount of units of the second currency that it is
# entitled to. Note that from the previous test this is
# set to 726
o2 = s.send(tester.k2, c2, 10**16)
assert o2 == [2, 7260000000000000000]
snapshot = s.snapshot()
# Set the price of the asset down to 300 wei
o3 = s.send(tester.k0, c, 0, [500, 300])
assert o3 == [1]
# Finalize the contract. Expect code 3, meaning a margin call
o4 = s.send(tester.k0, c2, 0)
assert o4 == [3]
s.revert(snapshot)
# Don't change the price. Finalize, and expect code 5, meaning
# the time has not expired yet
o5 = s.send(tester.k0, c2, 0)
assert o5 == [5]
s.mine(100, tester.a3)
# Mine ten blocks, and try. Expect code 4, meaning a normal execution
# where both get their share
o6 = s.send(tester.k0, c2, 0)
assert o6 == [4]
# Test the LIFO nature of call and the FIFO nature of post
arither_code = '''
init:
contract.storage[0] = 10
code:
if msg.data[0] == 0:
contract.storage[0] += 1
elif msg.data[0] == 1:
contract.storage[0] *= 10
call(contract.address, 0)
contract.storage[0] *= 10
elif msg.data[0] == 2:
contract.storage[0] *= 10
postcall(tx.gas / 2, contract.address, 0)
contract.storage[0] *= 10
elif msg.data[0] == 3:
return(contract.storage[0])
'''
def test_post():
s = tester.state()
c = s.contract(arither_code)
s.send(tester.k0, c, 0, [1])
o2 = s.send(tester.k0, c, 0, [3])
assert o2 == [1010]
c = s.contract(arither_code)
s.send(tester.k0, c, 0, [2])
o2 = s.send(tester.k0, c, 0, [3])
assert o2 == [1001]
# Test suicides and suicide reverts
suicider_code = '''
if msg.data[0] == 0:
contract.storage[15] = 40
call(contract.address, 3)
i = 0
while i < msg.data[1]:
i += 1
elif msg.data[0] == 1:
contract.storage[15] = 20
msg(tx.gas - 100, contract.address, 0, [0, msg.data[1]], 2)
elif msg.data[0] == 2:
return(10)
elif msg.data[0] == 3:
suicide(0)
elif msg.data[0] == 4:
return(contract.storage[15])
'''
def test_suicider():
s = tester.state()
c = s.contract(suicider_code)
prev_gas_limit = tester.gas_limit
tester.gas_limit = 4000
# Run normally: suicide processes, so the attempt to ping the
# contract fails
s.send(tester.k0, c, 0, [1, 10])
o2 = s.send(tester.k0, c, 0, [2])
assert o2 == []
c = s.contract(suicider_code)
# Run the suicider in such a way that it suicides in a sub-call,
# then runs out of gas, leading to a revert of the suicide and the
# storage mutation
s.send(tester.k0, c, 0, [1, 4000])
# Check that the suicide got reverted
o2 = s.send(tester.k0, c, 0, [2])
assert o2 == [10]
# Check that the storage op got reverted
o3 = s.send(tester.k0, c, 0, [4])
assert o3 == [20]
tester.gas_limit = prev_gas_limit
# Test reverts
reverter_code = '''
if msg.data[0] == 0:
msg(1000, contract.address, 0, 1)
msg(1000, contract.address, 0, 2)
elif msg.data[0] == 1:
send(1, 9)
contract.storage[8080] = 4040
contract.storage[160160] = 2020
elif msg.data[0] == 2:
send(2, 9)
contract.storage[8081] = 4039
contract.storage[160161] = 2019
call(contract.address, 2)
contract.storage["waste_some_gas"] = 0
'''
def test_reverter():
s = tester.state()
c = s.contract(reverter_code, endowment=10**15)
s.send(tester.k0, c, 0, [0])
assert s.block.get_storage_data(c, 8080) == 4040
assert s.block.get_balance('0'*39+'1') == 9
assert s.block.get_storage_data(c, 8081) == 0
assert s.block.get_balance('0'*39+'2') == 0
# Test stateless contracts
add1_code = \
'''
contract.storage[1] += msg.data[0]
'''
filename2 = "stateless_qwertyuioplkjhgfdsa.se"
stateless_test_code = \
'''
x = create("%s")
call(x, 6)
call_stateless(x, 4)
call_stateless(x, 60)
call(x, 40)
return(contract.storage[1])
''' % filename2
def test_stateless():
s = tester.state()
open(filename2, 'w').write(add1_code)
c = s.contract(stateless_test_code)
o1 = s.send(tester.k0, c, 0, [])
os.remove(filename2)
assert o1 == [64]
# https://github.com/ethereum/serpent/issues/8
array_code = '''
a = array(1)
a[0] = 1
return(a, 1)
'''
def test_array():
s = tester.state()
c = s.contract(array_code)
assert [1] == s.send(tester.k0, c, 0, [])
array_code2 = '''
a = array(1)
something = 2
a[0] = 1
return(a, 1)
'''
def test_array2():
s = tester.state()
c = s.contract(array_code2)
assert [1] == s.send(tester.k0, c, 0, [])
array_code3="""
a = array(3)
return(a, 3)
"""
def test_array3():
s = tester.state()
c = s.contract(array_code3)
assert [0,0,0] == s.send(tester.k0, c, 0, [])
| {
"content_hash": "c5d8af943ad09cc0cce6ce7baa8308e9",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 132,
"avg_line_length": 28.866129032258065,
"alnum_prop": 0.5909370285522714,
"repo_name": "gotcha/testcounter",
"id": "a8b84b3d53c12755e2d20947af4943c1696a7689",
"size": "17901",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "counterpartylib/test/contracts_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "611962"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_squashed_0010_auto_20170205_2230'),
]
operations = [
migrations.AlterField(
model_name='discussion',
name='comment',
field=models.TextField(help_text='<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled'),
),
migrations.AlterField(
model_name='issue',
name='description',
field=models.TextField(help_text='<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled'),
),
]
| {
"content_hash": "90305fa32e27748aa990bc4e1d316e1d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 154,
"avg_line_length": 33.17391304347826,
"alnum_prop": 0.6277850589777195,
"repo_name": "PeteAndersen/swarfarm",
"id": "5cc882187bc8b97ca99ff4ab38224af142ad0c08",
"size": "834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "feedback/migrations/0002_auto_20170502_1518.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31891"
},
{
"name": "HTML",
"bytes": "352588"
},
{
"name": "JavaScript",
"bytes": "79075"
},
{
"name": "Python",
"bytes": "982216"
},
{
"name": "Shell",
"bytes": "3403"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
import os
import math
import pytest
import numpy as np
from numpy.testing import assert_raises, assert_equal
from . import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestIntentInOut(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [_path('src', 'regression', 'inout.f90')]
@pytest.mark.slow
def test_inout(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
assert_raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo(x)
assert_equal(x, [3, 1, 2])
| {
"content_hash": "0b149576987d76d0d32fa0faa780f383",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 64,
"avg_line_length": 25.866666666666667,
"alnum_prop": 0.6533505154639175,
"repo_name": "tynn/numpy",
"id": "d695de61b75cd9e7052fcc37772acf1389a82247",
"size": "776",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "numpy/f2py/tests/test_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8111444"
},
{
"name": "C++",
"bytes": "165060"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6639339"
}
],
"symlink_target": ""
} |
from flask import Blueprint, request, jsonify, g
from flask_restful import Api, Resource
from horse import models
from .schemas.user import user_schema, users_schema, user_action_schema
users_bp = Blueprint('users_api', __name__)
users_api = Api(users_bp)
class User(Resource):
def get(self, user_pk):
user = g.repos.users.get(user_pk)
result = user_schema.dump(user)
return jsonify(result.data)
users_api.add_resource(User, '/users/<string:user_pk>')
class UserList(Resource):
def get(self):
users = g.repos.users.all()
result = users_schema.dump(users)
return jsonify({
'items': result.data
})
def post(self):
data, errors = user_schema.load(request.get_json())
if errors:
return {'errors': errors}, 400
user = models.User(name=data['name'])
g.repos.users.store(user)
result = user_schema.dump(user)
return result.data, 201
users_api.add_resource(UserList, '/users')
class UserFollow(Resource):
def post(self, user_pk):
data, errors = user_action_schema.load(request.get_json())
if errors:
return {'errors': errors}, 400
user = g.repos.users.get(user_pk)
user_to_follow = g.repos.users.get(data['pk'])
user.add_to_followed_users(user_to_follow)
users_api.add_resource(UserFollow, '/users/<string:user_pk>/follow')
class UserLikesMovie(Resource):
def post(self, user_pk):
data, errors = user_action_schema.load(request.get_json())
if errors:
return {'errors': errors}, 400
user = g.repos.users.get(user_pk)
movie = g.repos.movies.get(data['pk'])
user.add_to_liked_movies(movie)
users_api.add_resource(UserLikesMovie, '/users/<string:user_pk>/liked_movies')
class UserUnlikesMovie(Resource):
def delete(self, user_pk, movie_pk):
user = g.repos.users.get(user_pk)
movie = g.repos.movies.get(movie_pk)
user.remove_from_liked_movies(movie)
return None, 204
users_api.add_resource(
UserUnlikesMovie, '/users/<string:user_pk>/liked_movies/<string:movie_pk>'
)
class UserUnfollow(Resource):
def delete(self, user_pk, other_user_pk):
user = g.repos.users.get(user_pk)
other_user = g.repos.users.get(other_user_pk)
user.remove_from_followed_users(other_user)
return None, 204
users_api.add_resource(
UserUnfollow, '/users/<string:user_pk>/follow/<string:other_user_pk>'
)
| {
"content_hash": "a6355e282c56ede084b4a5214566ee75",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 27.150537634408604,
"alnum_prop": 0.6376237623762376,
"repo_name": "pragmaticcoders/horse",
"id": "ddde29de1f1fb60a44b3f8167e82f9a8523fbb9c",
"size": "2525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horse/web/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31460"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import urllib
import urllib2
import shlex
from django.template.loader import render_to_string
from django.conf import settings
from file_system import File
from subprocess import check_call, CalledProcessError
class TemplateProcessor:
@staticmethod
def process(resource):
try:
rendered = render_to_string(resource.source_file.path, settings.CONTEXT)
resource.source_file.write(rendered)
except:
print >> sys.stderr, \
"***********************\nError while rendering page %s\n***********************" % \
resource.url
raise
## aym-cms code refactored into processors.
class CleverCSS:
@staticmethod
def process(resource):
import clevercss
data = resource.source_file.read_all()
out = clevercss.convert(data)
out_file = File(resource.source_file.path_without_extension + ".css")
out_file.write(out)
resource.source_file.delete()
class HSS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
hss = settings.HSS_PATH
if not hss or not os.path.exists(hss):
raise ValueError("HSS Processor cannot be found at [%s]" % hss)
try:
check_call([hss, resource.source_file.path, "-output", out_file.parent.path + '/'])
except CalledProcessError, e:
print 'Syntax Error when calling HSS Processor:', e
return None
resource.source_file.delete()
out_file.copy_to(resource.source_file.path)
out_file.delete()
class SASS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
load_path = os.path.dirname(resource.file.path)
sass = settings.SASS_PATH
if not sass or not os.path.exists(sass):
raise ValueError("SASS Processor cannot be found at [%s]" % sass)
try:
check_call([sass, "-I", load_path, resource.source_file.path,
out_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling SASS Processor:', e
return None
resource.source_file.delete()
resource.source_file = out_file
class SASSMake:
@staticmethod
def p(s):
sys.stdout.write(s)
sys.stdout.flush()
@staticmethod
def process(resource):
"""
MT: "make" the target: don't compile if target is not out of date.
This is determined by checking the file dates of all dependant files.
"""
source_fn = resource.file.path
target_fn = resource.source_file.path_without_extension + ".css"
# target_fn points to temp directory; make it point to deploy directory
target_fn = target_fn.replace(settings.TMP_DIR, settings.DEPLOY_DIR)
if not os.path.exists(target_fn):
# target does not exist - compile it
SASS.process(resource)
SASSMake.p("target %s does not exist - compiling %s\n" % (target_fn, str(resource)))
return
# get existing target last mod time
target_mtime = os.path.getmtime(target_fn)
def check_file(pfn):
""" parse file for @imports; returns True if target is out of date """
SASSMake.p("SASSMake: " + pfn + "\n")
if os.path.getmtime(pfn) > target_mtime:
return True
with open(pfn, "r") as f:
for line in f:
# check for @import lines
parts = shlex.split(line)
if len(parts) == 2 and parts[0] == "@import":
ifn = parts[1].strip(";")
# prepend current file path
ifn = os.path.join(os.path.dirname(os.path.abspath(pfn)),
ifn)
# append .scss if this file is not found
if not os.path.exists(ifn):
ifn += '.scss'
# check file
if check_file(ifn):
# if one needs to be built the others don't bother
return True
return False
if check_file(source_fn):
# out of date; build it
SASS.process(resource)
return
class LessCSS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
if not out_file.parent.exists:
out_file.parent.make()
less = settings.LESS_CSS_PATH
if not less or not os.path.exists(less):
raise ValueError("Less CSS Processor cannot be found at [%s]" % less)
try:
check_call([less, resource.source_file.path, out_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling less:', e
else:
resource.source_file.delete()
"""
Assign our out_file as the source_file for this resource in order for
other processors to be able to correctly process this resource too.
This is needed because this processor changes the extension of the source file.
See bugreport at http://support.ringce.com/ringce/topics/lesscss_yuicompressor_fail_and_sitemap_generation_broken
"""
resource.source_file = out_file
if not out_file.exists:
print 'Error Occurred when processing with Less'
class Stylus:
@staticmethod
def process(resource):
stylus = settings.STYLUS_PATH
if not stylus or not os.path.exists(stylus):
raise ValueError("Stylus Processor cannot be found at [%s]" % stylus)
try:
check_call([stylus, resource.source_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling stylus:', e
out_file = File(resource.source_file.path_without_extension + ".css")
if not out_file.exists:
print 'Error Occurred when processing with Stylus'
else:
resource.source_file.delete()
resource.source_file = out_file
class CSSPrefixer:
@staticmethod
def process(resource):
data = resource.source_file.read_all()
try:
import cssprefixer
out = cssprefixer.process(data, debug=False, minify=False)
except ImportError:
try:
data = urllib.urlencode({"css": resource.source_file.read_all()})
req = urllib2.Request("http://cssprefixer.appspot.com/process/", data)
out = urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
print 'HTTP Error %s when calling remote CSSPrefixer' % e.code
return False
except urllib2.URLError, e:
print 'Error when calling remote CSSPrefixer:', e.reason
return False
resource.source_file.write(out)
class CSSmin:
@staticmethod
def process(resource):
import cssmin
data = resource.source_file.read_all()
out = cssmin.cssmin(data)
resource.source_file.write(out)
class CoffeeScript:
@staticmethod
def process(resource):
coffee = settings.COFFEE_PATH
if not coffee or not os.path.exists(coffee):
raise ValueError("CoffeeScript Processor cannot be found at [%s]" % coffee)
try:
check_call([coffee, "-b", "-c", resource.source_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling CoffeeScript:', e
return None
out_file = File(resource.source_file.path_without_extension + ".js")
if not out_file.exists:
print 'Error Occurred when processing with CoffeeScript'
else:
resource.source_file.delete()
resource.source_file = out_file
class JSmin:
@staticmethod
def process(resource):
import jsmin
data = resource.source_file.read_all()
out = jsmin.jsmin(data)
resource.source_file.write(out)
class UglifyJS:
@staticmethod
def process(resource):
tmp_file = File(resource.source_file.path + ".z-tmp")
if hasattr(settings, "UGLIFYJS"):
compress = settings.UGLIFYJS
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
try:
check_call([compress, resource.source_file.path,
"--output", tmp_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling UglifyJS:', e
return False
else:
try:
data = urllib.urlencode({"js_code": resource.source_file.read_all()})
req = urllib2.Request("http://marijnhaverbeke.nl/uglifyjs", data)
res = urllib2.urlopen(req).read()
tmp_file.write(res)
except urllib2.HTTPError, e:
print 'HTTP Error %s when calling remote UglifyJS' % e.code
return False
except urllib2.URLError, e:
print 'Error when calling remote UglifyJS:', e.reason
return False
resource.source_file.delete()
tmp_file.move_to(resource.source_file.path)
class YUICompressor:
@staticmethod
def process(resource):
if settings.YUI_COMPRESSOR == None:
return
compress = settings.YUI_COMPRESSOR
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
if not compress or not os.path.exists(compress):
raise ValueError(
"YUI Compressor cannot be found at [%s]" % compress)
tmp_file = File(resource.source_file.path + ".z-tmp")
try:
check_call(["java", "-jar", compress,
resource.source_file.path, "-o",
tmp_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling YUI Compressor:', e
else:
resource.source_file.delete()
tmp_file.move_to(resource.source_file.path)
class ClosureCompiler:
@staticmethod
def process(resource):
tmp_file = File(resource.source_file.path + ".z-tmp")
if hasattr(settings, "CLOSURE_COMPILER"):
compress = settings.CLOSURE_COMPILER
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
if not compress or not os.path.exists(compress):
raise ValueError(
"Closure Compiler cannot be found at [%s]" % compress)
try:
check_call(["java", "-jar", compress, "--js",
resource.source_file.path, "--js_output_file",
tmp_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling Closure Compiler:', e
return False
else:
try:
data = urllib.urlencode({
"js_code": resource.source_file.read_all(),
"output_info": "compiled_code"
})
req = urllib2.Request("http://closure-compiler.appspot.com/compile", data)
res = urllib2.urlopen(req).read()
tmp_file.write(res)
except urllib2.HTTPError, e:
print 'HTTP Error %s when calling remote Closure Compiler' % e.code
return False
except urllib2.URLError, e:
print 'Error when calling remote Closure Compiler:', e.reason
return False
resource.source_file.delete()
tmp_file.move_to(resource.source_file.path)
class Thumbnail:
@staticmethod
def process(resource):
from PIL import Image
i = Image.open(resource.source_file.path)
if i.mode != 'RGBA':
i = i.convert('RGBA')
i.thumbnail(
(settings.THUMBNAIL_MAX_WIDTH, settings.THUMBNAIL_MAX_HEIGHT),
Image.ANTIALIAS
)
orig_path, _, orig_extension = resource.source_file.path.rpartition('.')
if "THUMBNAIL_FILENAME_POSTFIX" in dir(settings):
postfix = settings.THUMBNAIL_FILENAME_POSTFIX
else:
postfix = "-thumb"
thumb_path = "%s%s.%s" % (orig_path, postfix, orig_extension)
if i.format == "JPEG" and "THUMBNAIL_JPEG_QUALITY" in dir(settings):
i.save(thumb_path, quality = settings.THUMBNAIL_JPEG_QUALITY, optimize = True)
else:
i.save(thumb_path)
| {
"content_hash": "22df2b1fff45145d687aae8caf0fe85c",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 125,
"avg_line_length": 38.30994152046784,
"alnum_prop": 0.5665547244695467,
"repo_name": "m-thielen/hyde",
"id": "83e4aae8552a84aa5b316a362a9c023519e72099",
"size": "13102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hydeengine/media_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "5177"
},
{
"name": "CSS",
"bytes": "23189"
},
{
"name": "Emacs Lisp",
"bytes": "349"
},
{
"name": "HTML",
"bytes": "44531"
},
{
"name": "JavaScript",
"bytes": "19002"
},
{
"name": "Python",
"bytes": "326878"
}
],
"symlink_target": ""
} |
from PyQt4.QtCore import QRect
from PyQt4.QtGui import QApplication, QPixmap
def getScreenDimensions():
"""Returns the posx, posy, width and height of the monitor as a QRect."""
return QRect(QApplication.desktop().x(),
QApplication.desktop().y(),
QApplication.desktop().width(),
QApplication.desktop().height())
def cropImage(qpixmap, qrect):
"""Returns a cropped QPixmap using the QRect as dimensions."""
if qrect.width == 0 or qrect.height == 0:
return None
return qpixmap.copy(qrect.normalized())
def captureFullScreen():
"""Returns a QPixmap containing a screenshot of the entire screen."""
rect = getScreenDimensions()
return captureRectangle(rect)
def captureRectangle(qrect):
"""Returns a QPixmap containing a screenshot the size of qrect."""
if qrect.width == 0 or qrect.height == 0:
return None
return QPixmap.grabWindow(QApplication.desktop().winId(), *qrect.getRect())
| {
"content_hash": "7817632794943bc139a52d278a7cf626",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 33.3,
"alnum_prop": 0.6716716716716716,
"repo_name": "Catgroove/ninjapic",
"id": "b2291a429466dc5668b34d5196d30a848a681d5c",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11193"
}
],
"symlink_target": ""
} |
"""Create / interact with Google Cloud Datastore transactions."""
from google.cloud.datastore.batch import Batch
class Transaction(Batch):
"""An abstraction representing datastore Transactions.
Transactions can be used to build up a bulk mutation and ensure all
or none succeed (transactionally).
For example, the following snippet of code will put the two ``save``
operations (either ``insert`` or ``upsert``) into the same
mutation, and execute those within a transaction:
.. testsetup:: txn-put-multi, txn-api
from google.cloud import datastore
from tests.system.test_system import Config # system tests
client = datastore.Client()
key1 = client.key('_Doctest')
entity1 = datastore.Entity(key=key1)
entity1['foo'] = 1337
key2 = client.key('_Doctest', 'abcd1234')
entity2 = datastore.Entity(key=key2)
entity2['foo'] = 42
Config.TO_DELETE.extend([entity1, entity2])
.. doctest:: txn-put-multi
>>> with client.transaction():
... client.put_multi([entity1, entity2])
Because it derives from :class:`~google.cloud.datastore.batch.Batch`,
:class:`Transaction` also provides :meth:`put` and :meth:`delete` methods:
.. doctest:: txn-api
>>> with client.transaction() as xact:
... xact.put(entity1)
... xact.delete(entity2.key)
By default, the transaction is rolled back if the transaction block
exits with an error:
.. testsetup:: txn-error
from google.cloud import datastore
client = datastore.Client()
def do_some_work():
return
class SomeException(Exception):
pass
.. doctest:: txn-error
>>> with client.transaction():
... do_some_work()
... raise SomeException # rolls back
Traceback (most recent call last):
...
SomeException
If the transaction block exits without an exception, it will commit
by default.
.. warning::
Inside a transaction, automatically assigned IDs for
entities will not be available at save time! That means, if you
try:
.. testsetup:: txn-entity-key, txn-entity-key-after, txn-manual
from google.cloud import datastore
from tests.system.test_system import Config # system tests
client = datastore.Client()
def Entity(*args, **kwargs):
entity = datastore.Entity(*args, **kwargs)
Config.TO_DELETE.append(entity)
return entity
.. doctest:: txn-entity-key
>>> with client.transaction():
... entity = Entity(key=client.key('Thing'))
... client.put(entity)
``entity`` won't have a complete key until the transaction is
committed.
Once you exit the transaction (or call :meth:`commit`), the
automatically generated ID will be assigned to the entity:
.. doctest:: txn-entity-key-after
>>> with client.transaction():
... entity = Entity(key=client.key('Thing'))
... client.put(entity)
... print(entity.key.is_partial) # There is no ID on this key.
...
True
>>> print(entity.key.is_partial) # There *is* an ID.
False
If you don't want to use the context manager you can initialize a
transaction manually:
.. doctest:: txn-manual
>>> transaction = client.transaction()
>>> transaction.begin()
>>>
>>> entity = Entity(key=client.key('Thing'))
>>> transaction.put(entity)
>>>
>>> transaction.commit()
:type client: :class:`google.cloud.datastore.client.Client`
:param client: the client used to connect to datastore.
"""
_status = None
def __init__(self, client):
super(Transaction, self).__init__(client)
self._id = None
@property
def id(self):
"""Getter for the transaction ID.
:rtype: str
:returns: The ID of the current transaction.
"""
return self._id
def current(self):
"""Return the topmost transaction.
.. note::
If the topmost element on the stack is not a transaction,
returns None.
:rtype: :class:`google.cloud.datastore.transaction.Transaction` or None
:returns: The current transaction (if any are active).
"""
top = super(Transaction, self).current()
if isinstance(top, Transaction):
return top
def begin(self):
"""Begins a transaction.
This method is called automatically when entering a with
statement, however it can be called explicitly if you don't want
to use a context manager.
:raises: :class:`~exceptions.ValueError` if the transaction has
already begun.
"""
super(Transaction, self).begin()
try:
response_pb = self._client._datastore_api.begin_transaction(
self.project)
self._id = response_pb.transaction
except: # noqa: E722 do not use bare except, specify exception instead
self._status = self._ABORTED
raise
def rollback(self):
"""Rolls back the current transaction.
This method has necessary side-effects:
- Sets the current transaction's ID to None.
"""
try:
# No need to use the response it contains nothing.
self._client._datastore_api.rollback(self.project, self._id)
finally:
super(Transaction, self).rollback()
# Clear our own ID in case this gets accidentally reused.
self._id = None
def commit(self):
"""Commits the transaction.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
This method has necessary side-effects:
- Sets the current transaction's ID to None.
"""
try:
super(Transaction, self).commit()
finally:
# Clear our own ID in case this gets accidentally reused.
self._id = None
| {
"content_hash": "f55bb60ece8a6ff821e71c660e44519b",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.5971360381861576,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "6108bd80647a6c9b787307d9adc987094b1407b8",
"size": "6861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/datastore/transaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""
Contains functionality to use a Zigbee device as a switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.zigbee/
"""
import voluptuous as vol
from homeassistant.components.switch import SwitchDevice
from homeassistant.components.zigbee import (
ZigBeeDigitalOut, ZigBeeDigitalOutConfig, PLATFORM_SCHEMA)
DEPENDENCIES = ['zigbee']
CONF_ON_STATE = 'on_state'
DEFAULT_ON_STATE = 'high'
DEPENDENCIES = ['zigbee']
STATES = ['high', 'low']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ON_STATE): vol.In(STATES),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zigbee switch platform."""
add_entities([ZigBeeSwitch(hass, ZigBeeDigitalOutConfig(config))])
class ZigBeeSwitch(ZigBeeDigitalOut, SwitchDevice):
"""Representation of a Zigbee Digital Out device."""
pass
| {
"content_hash": "dc0f3f0d74f78c3ea1636baef8dc0251",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.7481081081081081,
"repo_name": "PetePriority/home-assistant",
"id": "81fb8348c4e2e8f54e1d99ccfee3b04fd7b5a77d",
"size": "925",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zigbee/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
(r'^missing_redirect_uri/?$', 'myresource.apps.oauth2.views.missing_redirect_uri'),
(r'^authorize/?$', 'myresource.apps.oauth2.views.authorize'),
(r'^authorize/aadhaar/?$', 'myresource.apps.oauth2.views.authorize_aadhaar'),
(r'^token/?$', 'oauth2app.token.handler'),
)
| {
"content_hash": "35e265fafebd5f8630d93b9edaf79f94",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 98,
"avg_line_length": 53.25,
"alnum_prop": 0.5938967136150235,
"repo_name": "pingali/aadhaar-oauth2-server",
"id": "22693cd021be3ec5a33ba1db7e2c5555a157a281",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myresource/apps/oauth2/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "54980"
},
{
"name": "Python",
"bytes": "78518"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
| {
"content_hash": "fb92087b1c2ab81d97e0bd470b12d731",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 80,
"avg_line_length": 32.3598615916955,
"alnum_prop": 0.7045551753635586,
"repo_name": "dwrpayne/zulip",
"id": "b837884828c873ac208768a4ae882c855413f426",
"size": "9787",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "385505"
},
{
"name": "JavaScript",
"bytes": "1571916"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1877232"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from flask.ext.script import Manager
from flask.ext.celery import install_commands as install_celery_commands
from myapp import create_app
app = create_app()
manager = Manager(app)
install_celery_commands(manager)
if __name__ == "__main__":
manager.run()
| {
"content_hash": "7f458133a4ecbd1a3c7ed3d194ec8952",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 72,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.7441860465116279,
"repo_name": "ask/flask-celery",
"id": "44573ec5c074599b23745b0d65c42e6a1256721a",
"size": "323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9636"
}
],
"symlink_target": ""
} |
import pickle
import pickletools
from test import support
from test.pickletester import AbstractPickleTests
from test.pickletester import AbstractPickleModuleTests
import unittest
class OptimizedPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def dumps(self, arg, proto=None):
return pickletools.optimize(pickle.dumps(arg, proto))
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
# Test relies on precise output of dumps()
test_pickle_to_2x = None
def test_optimize_long_binget(self):
data = [str(i) for i in range(257)]
data.append(data[-1])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(data, proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, data)
# compare two strings, xxx don't use assertIs() here:
self.assertEqual(unpickled[-1], unpickled[-2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, data)
# compare two strings, xxx don't use assertIs() here:
self.assertEqual(unpickled2[-1], unpickled2[-2])
self.assertNotIn(pickle.LONG_BINGET, pickled2)
self.assertNotIn(pickle.LONG_BINPUT, pickled2)
def test_optimize_binput_and_memoize(self):
pickled = (b'\x80\x04\x95\x15\x00\x00\x00\x00\x00\x00\x00'
b']\x94(\x8c\x04spamq\x01\x8c\x03ham\x94h\x02e.')
# 0: \x80 PROTO 4
# 2: \x95 FRAME 21
# 11: ] EMPTY_LIST
# 12: \x94 MEMOIZE
# 13: ( MARK
# 14: \x8c SHORT_BINUNICODE 'spam'
# 20: q BINPUT 1
# 22: \x8c SHORT_BINUNICODE 'ham'
# 27: \x94 MEMOIZE
# 28: h BINGET 2
# 30: e APPENDS (MARK at 13)
# 31: . STOP
self.assertIn(pickle.BINPUT, pickled)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, ['spam', 'ham', 'ham'])
self.assertIs(unpickled[1], unpickled[2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, ['spam', 'ham', 'ham'])
self.assertIs(unpickled2[1], unpickled2[2])
self.assertNotIn(pickle.BINPUT, pickled2)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'bytes_types',
'UP_TO_NEWLINE', 'TAKEN_FROM_ARGUMENT1',
'TAKEN_FROM_ARGUMENT4', 'TAKEN_FROM_ARGUMENT4U',
'TAKEN_FROM_ARGUMENT8U', 'ArgumentDescriptor',
'read_uint1', 'read_uint2', 'read_int4', 'read_uint4',
'read_uint8', 'read_stringnl', 'read_stringnl_noescape',
'read_stringnl_noescape_pair', 'read_string1',
'read_string4', 'read_bytes1', 'read_bytes4',
'read_bytes8', 'read_unicodestringnl',
'read_unicodestring1', 'read_unicodestring4',
'read_unicodestring8', 'read_decimalnl_short',
'read_decimalnl_long', 'read_floatnl', 'read_float8',
'read_long1', 'read_long4',
'uint1', 'uint2', 'int4', 'uint4', 'uint8', 'stringnl',
'stringnl_noescape', 'stringnl_noescape_pair', 'string1',
'string4', 'bytes1', 'bytes4', 'bytes8',
'unicodestringnl', 'unicodestring1', 'unicodestring4',
'unicodestring8', 'decimalnl_short', 'decimalnl_long',
'floatnl', 'float8', 'long1', 'long4',
'StackObject',
'pyint', 'pylong', 'pyinteger_or_bool', 'pybool', 'pyfloat',
'pybytes_or_str', 'pystring', 'pybytes', 'pyunicode',
'pynone', 'pytuple', 'pylist', 'pydict', 'pyset',
'pyfrozenset', 'anyobject', 'markobject', 'stackslice',
'OpcodeInfo', 'opcodes', 'code2op',
}
support.check__all__(self, pickletools, blacklist=blacklist)
def test_main():
support.run_unittest(OptimizedPickleTests)
support.run_unittest(MiscTestCase)
support.run_doctest(pickletools)
if __name__ == "__main__":
test_main()
| {
"content_hash": "7690d688cc2785131f459060cffdcceb",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 81,
"avg_line_length": 43.51960784313726,
"alnum_prop": 0.5582338364496509,
"repo_name": "yotchang4s/cafebabepy",
"id": "6bf9d182767f4b4efc194f859a3964b95e6ecd98",
"size": "4439",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/main/python/test/test_pickletools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "10148"
},
{
"name": "Java",
"bytes": "253479"
}
],
"symlink_target": ""
} |
"""
Will measure the distance from one subjects rois to all other subjects in mm on specified surfaces.
Usage:
ciftify_postPINT2_sub2sub [options] <concatenated-pint> <output_sub2sub.csv>
Arguments:
<concatenated-pint> The concatenated PINT outputs (csv file)
<output_sub2sub.csv> The outputfile name
Options:
--surfL SURFACE The left surface on to measure distances on (see details)
--surfR SURFACE The right surface to to measure distances on (see details)
--roiidx INT Measure distances for only this roi (default will loop over all ROIs)
--pvertex-col COLNAME The column [default: pvertex] to read the personlized vertices
--debug Debug logging in Erin's very verbose style
-n,--dry-run Dry run
--help Print help
DETAILS
Requires that all PINT summary files have already been comdined into one
"concatenated" input. by ciftify_postPINT1_concat.py
If surfL and surfR are not given, measurements will be done on the
HCP S1200 Average mid-surface.
Will output a csv with four columns. 'subid1', 'subid2', 'roiidx', 'distance'
Written by Erin W Dickie, May 5, 2017
"""
import random
import os
import sys
import logging
import logging.config
import pandas as pd
import numpy as np
import nibabel as nib
from docopt import docopt
import ciftify
config_path = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(config_path, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def main():
global DEBUG
global DRYRUN
arguments = docopt(__doc__)
allvertices_csv = arguments['<concatenated-pint>']
output_sub2sub = arguments['<output_sub2sub.csv>']
surfL = arguments['--surfL']
surfR = arguments['--surfR']
roiidx = arguments['--roiidx']
pvertex_colname = arguments['--pvertex-col']
DEBUG = arguments['--debug']
DRYRUN = arguments['--dry-run']
if DEBUG:
logger.setLevel(logging.DEBUG)
logging.getLogger('ciftify').setLevel(logging.DEBUG)
ciftify.utils.log_arguments(arguments)
if not surfL:
surfL = os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.L.midthickness_MSMAll.32k_fs_LR.surf.gii')
surfR = os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.R.midthickness_MSMAll.32k_fs_LR.surf.gii')
## read in the concatenated results
vertices_df = pd.read_csv(allvertices_csv)
vertices_df = vertices_df.loc[:,['subid','hemi','roiidx',pvertex_colname]]
if roiidx:
roiidx = int(roiidx)
if roiidx in vertices_df.loc[:,'roiidx']:
result = calc_allroiidx_distances(vertices_df, roiidx, surfL, surfR, pvertex_colname)
else:
logger.critical("roiidx argument given is not in the concatenated df")
sys.exit(1)
else:
all_rois = vertices_df.roiidx.unique()
all_sub2sub = (calc_allroiidx_distances(vertices_df, roi, surfL, surfR, pvertex_colname) for roi in all_rois)
result = pd.concat(all_sub2sub, ignore_index=True)
### write out the resutls to a csv
result.to_csv(output_sub2sub,
columns = ['subid1','subid2','roiidx','distance'],
index = False)
def calc_subdistances_distances(roidf, surf, subid, pvertex_colname):
'''
calculates all distances from one subject to the rest for one roi
returns a dataframe with columns: subid1, subid2, roiidx, distances'
'''
## read the subids vertex from the roidf
pvertex1 = int(roidf.loc[roidf.loc[:,'subid']==subid, pvertex_colname])
## set up a new df
thisdf = roidf.rename(columns={'subid': 'subid2', pvertex_colname: 'pvertex2'})
thisdf['subid1'] = subid
thisdf['pvertex1'] = pvertex1
## calculate the distances
distances = ciftify.niio.get_surf_distances(surf, pvertex1)
thisdf.loc[:,'distance'] = distances[thisdf.loc[:,'pvertex2'],0]
## set cases were ivertices are the same to distance 0
thisdf.loc[thisdf.loc[:,'pvertex2'] == thisdf.loc[:,'pvertex1'],'distance'] = 0
## trim and return the result
result = thisdf.loc[:,['subid1','subid2','roiidx','distance']]
return(result)
def calc_allroiidx_distances(vertices_df, roi, surfL, surfR, pvertex_colname):
'''
loop over all subjects calculating distances for one roi
'''
## determine the surface for measurment
hemi = vertices_df.loc[vertices_df.roiidx==roi,'hemi'].values[0]
if hemi == "L": surf = surfL
if hemi == "R": surf = surfR
## subset the dataframe
roidf = vertices_df.loc[vertices_df.roiidx==roi,:]
## run all the subjects and return into a tupley thing of results
all_dfs = (calc_subdistances_distances(roidf, surf, subid, pvertex_colname) for subid in vertices_df.subid.unique())
## concatenate all the results
roi_sub2sub = pd.concat(all_dfs, ignore_index=True)
return(roi_sub2sub)
if __name__ == "__main__":
main()
| {
"content_hash": "eba3a839aa61de353b8bef7bd85deb07",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 120,
"avg_line_length": 36.289855072463766,
"alnum_prop": 0.6731230031948882,
"repo_name": "edickie/ciftify",
"id": "230a1944c61a005f89420012697d67298b7a6d69",
"size": "5031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ciftify/bin/ciftify_postPINT2_sub2sub.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1948"
},
{
"name": "Jupyter Notebook",
"bytes": "175297"
},
{
"name": "Python",
"bytes": "667858"
},
{
"name": "Shell",
"bytes": "2566"
}
],
"symlink_target": ""
} |
import logging
from holmes.facters import Facter
from holmes.utils import _
class JSFacter(Facter):
@classmethod
def get_fact_definitions(cls):
return {
'page.js': {
'title': _('JS'),
'description': lambda value: list(value),
'unit': 'js',
'category': _('Static'),
},
'total.requests.js': {
'title': _('Total JS requests'),
'description': lambda value: value,
'category': _('HTTP'),
'unit': 'number'
},
'total.size.js': {
'title': _('Total JS size'),
'description': lambda value: '%d' % value,
'unit': 'kb',
'category': _('SEO'),
},
'total.size.js.gzipped': {
'title': _('Total JS size gzipped'),
'description': lambda value: '%d' % value,
'unit': 'kb',
'category': _('SEO')
}
}
def get_facts(self):
js_files = self.get_js_requests()
self.review.data['page.js'] = set()
self.review.data['total.size.js'] = 0
self.review.data['total.size.js.gzipped'] = 0
self.add_fact(
key='page.js',
value=set(),
)
self.add_fact(
key='total.size.js',
value=0,
)
self.add_fact(
key='total.size.js.gzipped',
value=0,
)
num_js = 0
js_to_get = set()
for js_file in js_files:
src = js_file.get('src')
src = self.normalize_url(src)
if src:
js_to_get.add(src)
num_js += 1
self.add_fact(
key='total.requests.js',
value=num_js,
)
for url in js_to_get:
self.async_get(url, self.handle_url_loaded)
def handle_url_loaded(self, url, response):
logging.debug('Got response (%s) from %s!' % (response.status_code,
url))
self.review.facts['page.js']['value'].add(url)
self.review.data['page.js'].add((url, response))
if response.text:
size_js = len(response.text) / 1024.0
size_gzip = len(self.to_gzip(response.text)) / 1024.0
else:
size_js = 0
size_gzip = 0
self.review.facts['total.size.js']['value'] += size_js
self.review.data['total.size.js'] += size_js
self.review.facts['total.size.js.gzipped']['value'] += size_gzip
self.review.data['total.size.js.gzipped'] += size_gzip
def get_js_requests(self):
return self.reviewer.current_html.cssselect('script[src]')
| {
"content_hash": "67b7d35b99a880c8223d977eda101077",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 75,
"avg_line_length": 28.27,
"alnum_prop": 0.4591439688715953,
"repo_name": "marcelometal/holmes-api",
"id": "0d7e125153b080bd81bffd037b97cfb3ecbb3d3a",
"size": "2870",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "holmes/facters/js.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Indexers for FeedApp, FeedBrand, FeedCollection, FeedShelf, FeedItem for
feed homepage and curation tool search.
"""
from amo.utils import attach_trans_dict
import mkt.carriers
import mkt.feed.constants as feed
import mkt.regions
from mkt.search.indexers import BaseIndexer
from mkt.translations.utils import format_translation_es
from mkt.webapps.models import Webapp
def get_slug_multifield():
# TODO: convert to new syntax on ES 1.0+.
return {
'type': 'multi_field',
'fields': {
'slug': {'type': 'string'},
'raw': {'type': 'string', 'index': 'not_analyzed'},
}
}
class FeedAppIndexer(BaseIndexer):
@classmethod
def get_model(cls):
"""Returns the Django model this MappingType relates to"""
from mkt.feed.models import FeedApp
return FeedApp
@classmethod
def get_mapping(cls):
"""Returns an Elasticsearch mapping for this MappingType"""
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'long'},
'background_color': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'image_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'preview': {'type': 'object', 'dynamic': 'true'},
'pullquote_attribution': cls.string_not_analyzed(),
'pullquote_rating': {'type': 'short'},
'pullquote_text': {'type': 'string',
'analyzer': 'default_icu'},
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
'type': cls.string_not_analyzed(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',))
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Converts this instance into an Elasticsearch document"""
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach translations for searching and indexing.
attach_trans_dict(cls.get_model(), [obj])
attach_trans_dict(Webapp, [obj.app])
doc = {
'id': obj.id,
'app': obj.app_id,
'background_color': obj.background_color,
'created': obj.created,
'image_hash': obj.image_hash,
'item_type': feed.FEED_TYPE_APP,
'preview': {'id': obj.preview.id,
'thumbnail_size': obj.preview.thumbnail_size,
'thumbnail_url': obj.preview.thumbnail_url}
if getattr(obj, 'preview') else None,
'pullquote_attribution': obj.pullquote_attribution,
'pullquote_rating': obj.pullquote_rating,
'search_names': list(
set(string for _, string
in obj.app.translations[obj.app.name_id])),
'slug': obj.slug,
'type': obj.type,
}
# Handle localized fields.
for field in ('description', 'pullquote_text'):
doc.update(format_translation_es(obj, field))
return doc
class FeedBrandIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedBrand
return FeedBrand
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
return {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'layout': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'slug': get_slug_multifield(),
'type': {'type': 'string'},
}
}
}
@classmethod
def extract_document(cls, pk=None, obj=None):
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
return {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'created': obj.created,
'layout': obj.layout,
'item_type': feed.FEED_TYPE_BRAND,
'slug': obj.slug,
'type': obj.type,
}
class FeedCollectionIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedCollection
return FeedCollection
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'background_color': cls.string_not_analyzed(),
'group_apps': {'type': 'object', 'dynamic': 'true'},
'group_names': {'type': 'object', 'dynamic': 'true'},
'image_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
'type': cls.string_not_analyzed(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',
'name'))
@classmethod
def extract_document(cls, pk=None, obj=None):
from mkt.feed.models import FeedCollectionMembership
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
attach_trans_dict(cls.get_model(), [obj])
doc = {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'background_color': obj.background_color,
'created': obj.created,
'group_apps': {}, # Map of app IDs to index in group_names below.
'group_names': [], # List of ES-serialized group names.
'image_hash': obj.image_hash,
'item_type': feed.FEED_TYPE_COLL,
'search_names': list(
set(string for _, string
in obj.translations[obj.name_id])),
'slug': obj.slug,
'type': obj.type,
}
# Grouped apps. Key off of translation, pointed to app IDs.
memberships = obj.feedcollectionmembership_set.all()
attach_trans_dict(FeedCollectionMembership, memberships)
for member in memberships:
if member.group:
grp_translation = format_translation_es(member, 'group')
if grp_translation not in doc['group_names']:
doc['group_names'].append(grp_translation)
doc['group_apps'][member.app_id] = (
doc['group_names'].index(grp_translation))
# Handle localized fields.
for field in ('description', 'name'):
doc.update(format_translation_es(obj, field))
return doc
class FeedShelfIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedShelf
return FeedShelf
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'carrier': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'image_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'region': cls.string_not_analyzed(),
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',
'name'))
@classmethod
def extract_document(cls, pk=None, obj=None):
if obj is None:
obj = cls.get_model().get(pk=pk)
attach_trans_dict(cls.get_model(), [obj])
doc = {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'carrier': mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
'created': obj.created,
'image_hash': obj.image_hash,
'item_type': feed.FEED_TYPE_SHELF,
'region': mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
'search_names': list(set(string for _, string
in obj.translations[obj.name_id])),
'slug': obj.slug,
}
# Handle localized fields.
for field in ('description', 'name'):
doc.update(format_translation_es(obj, field))
return doc
class FeedItemIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedItem
return FeedItem
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
return {
doc_type: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'long'},
'brand': {'type': 'long'},
'carrier': {'type': 'integer'},
'category': {'type': 'integer'},
'collection': {'type': 'long'},
'item_type': cls.string_not_analyzed(),
'order': {'type': 'integer'},
'region': {'type': 'integer'},
'shelf': {'type': 'long'},
}
}
}
@classmethod
def extract_document(cls, pk=None, obj=None):
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
return {
'id': obj.id,
'app': obj.app_id if obj.item_type == feed.FEED_TYPE_APP
else None,
'brand': obj.brand_id if obj.item_type == feed.FEED_TYPE_BRAND
else None,
'carrier': obj.carrier,
'category': obj.category,
'collection': obj.collection_id if
obj.item_type == feed.FEED_TYPE_COLL else None,
'item_type': obj.item_type,
# If no order, put it at end. Make sure order > 0 since we do a
# ES reciprocal modifier query.
'order': obj.order + 1 if obj.order is not None else 100,
'region': obj.region,
'shelf': obj.shelf_id if obj.item_type == feed.FEED_TYPE_SHELF
else None,
}
| {
"content_hash": "e08988935ee975abb8dec6c6f7054bd9",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 78,
"avg_line_length": 35.30529595015577,
"alnum_prop": 0.49360275302214773,
"repo_name": "andymckay/zamboni",
"id": "1503c03aa515be3d81048545e23d505294c63c19",
"size": "11333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/feed/indexers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357533"
},
{
"name": "JavaScript",
"bytes": "524153"
},
{
"name": "Python",
"bytes": "3863676"
},
{
"name": "Shell",
"bytes": "14980"
}
],
"symlink_target": ""
} |
"""Policy Network with naive board encoding"""
import os
from src.learn.bots.CommonLearn import CommonLearn
import src.learn.bots.utils as utils
class Learn(CommonLearn):
def handle_data(self, data):
boards = data[data.columns[3:-2]].as_matrix()
y = utils.policy_output(data['move'])
boards, _other = self.get_symmetries(
boards, other_data=[y, data['color']])
y, colors = _other
X = utils.encode_board(boards, colors)
print('X.shape:', X.shape)
print('Y.shape:', y.shape)
return X, y
def get_path_to_self(self):
return os.path.abspath(__file__)
if __name__ == '__main__':
Learn().run()
| {
"content_hash": "5895f85778a002f0869bdcfbfe34746f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 53,
"avg_line_length": 23.133333333333333,
"alnum_prop": 0.5951008645533141,
"repo_name": "nathbo/GO_DILab",
"id": "079ca96a429c56a18b77d78656fa339423c100c9",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/learn/bots/_22/learn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "320762"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
} |
from unittest.mock import Mock, patch
from pyramid.config import Configurator
from opentelemetry import trace
from opentelemetry.instrumentation.propagators import (
TraceResponsePropagator,
get_global_response_propagator,
set_global_response_propagator,
)
from opentelemetry.instrumentation.pyramid import PyramidInstrumentor
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.wsgitestutil import WsgiTestBase
from opentelemetry.util.http import get_excluded_urls
# pylint: disable=import-error
from .pyramid_base_test import InstrumentationTest
def expected_attributes(override_attributes):
default_attributes = {
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_SERVER_NAME: "localhost",
SpanAttributes.HTTP_SCHEME: "http",
SpanAttributes.NET_HOST_PORT: 80,
SpanAttributes.HTTP_HOST: "localhost",
SpanAttributes.HTTP_TARGET: "/",
SpanAttributes.HTTP_FLAVOR: "1.1",
SpanAttributes.HTTP_STATUS_CODE: 200,
}
for key, val in override_attributes.items():
default_attributes[key] = val
return default_attributes
class TestProgrammatic(InstrumentationTest, WsgiTestBase):
def setUp(self):
super().setUp()
config = Configurator()
PyramidInstrumentor().instrument_config(config)
self.config = config
self._common_initialization(self.config)
self.env_patch = patch.dict(
"os.environ",
{
"OTEL_PYTHON_PYRAMID_EXCLUDED_URLS": "http://localhost/excluded_arg/123,excluded_noarg"
},
)
self.env_patch.start()
self.exclude_patch = patch(
"opentelemetry.instrumentation.pyramid.callbacks._excluded_urls",
get_excluded_urls("PYRAMID"),
)
self.exclude_patch.start()
def tearDown(self):
super().tearDown()
with self.disable_logging():
PyramidInstrumentor().uninstrument_config(self.config)
def test_uninstrument(self):
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
PyramidInstrumentor().uninstrument_config(self.config)
# Need to remake the WSGI app export
self._common_initialization(self.config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
def test_simple(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_TARGET: "/hello/123",
SpanAttributes.HTTP_ROUTE: "/hello/{helloid}",
}
)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "/hello/{helloid}")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_response_headers(self):
orig = get_global_response_propagator()
set_global_response_propagator(TraceResponsePropagator())
response = self.client.get("/hello/500")
self.assertTraceResponseHeaderMatchesSpan(
response.headers, self.memory_exporter.get_finished_spans()[0]
)
set_global_response_propagator(orig)
def test_not_recording(self):
mock_tracer = Mock()
mock_span = Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
with patch("opentelemetry.trace.get_tracer"):
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
def test_404(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_METHOD: "POST",
SpanAttributes.HTTP_TARGET: "/bye",
SpanAttributes.HTTP_STATUS_CODE: 404,
}
)
resp = self.client.post("/bye")
self.assertEqual(404, resp.status_code)
resp.close()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "HTTP POST")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_internal_error(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_TARGET: "/hello/500",
SpanAttributes.HTTP_ROUTE: "/hello/{helloid}",
SpanAttributes.HTTP_STATUS_CODE: 500,
}
)
resp = self.client.get("/hello/500")
self.assertEqual(500, resp.status_code)
resp.close()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "/hello/{helloid}")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_internal_exception(self):
expected_attrs = expected_attributes(
{
SpanAttributes.HTTP_TARGET: "/hello/900",
SpanAttributes.HTTP_ROUTE: "/hello/{helloid}",
SpanAttributes.HTTP_STATUS_CODE: 500,
}
)
with self.assertRaises(NotImplementedError):
resp = self.client.get("/hello/900")
resp.close()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.assertEqual(span_list[0].name, "/hello/{helloid}")
self.assertEqual(span_list[0].kind, trace.SpanKind.SERVER)
self.assertEqual(span_list[0].attributes, expected_attrs)
def test_tween_list(self):
tween_list = "opentelemetry.instrumentation.pyramid.trace_tween_factory\npyramid.tweens.excview_tween_factory"
config = Configurator(settings={"pyramid.tweens": tween_list})
PyramidInstrumentor().instrument_config(config)
self._common_initialization(config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
PyramidInstrumentor().uninstrument_config(config)
# Need to remake the WSGI app export
self._common_initialization(config)
resp = self.client.get("/hello/123")
self.assertEqual(200, resp.status_code)
self.assertEqual([b"Hello: 123"], list(resp.response))
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
@patch("opentelemetry.instrumentation.pyramid.callbacks._logger")
def test_warnings(self, mock_logger):
tween_list = "pyramid.tweens.excview_tween_factory"
config = Configurator(settings={"pyramid.tweens": tween_list})
PyramidInstrumentor().instrument_config(config)
self._common_initialization(config)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertEqual(mock_logger.warning.called, True)
mock_logger.warning.called = False
tween_list = (
"opentelemetry.instrumentation.pyramid.trace_tween_factory"
)
config = Configurator(settings={"pyramid.tweens": tween_list})
self._common_initialization(config)
self.client.get("/hello/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.assertEqual(mock_logger.warning.called, True)
def test_exclude_lists(self):
self.client.get("/excluded_arg/123")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.client.get("/excluded_arg/125")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.client.get("/excluded_noarg")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
self.client.get("/excluded_noarg2")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
| {
"content_hash": "5246ddc8d817a2772bef4a61f52d628f",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 118,
"avg_line_length": 38.727659574468085,
"alnum_prop": 0.6411383364465444,
"repo_name": "open-telemetry/opentelemetry-python-contrib",
"id": "d3a4fa91db1697e1a9e541d3353de2f31a9a6041",
"size": "9686",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "instrumentation/opentelemetry-instrumentation-pyramid/tests/test_programmatic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "118"
},
{
"name": "Python",
"bytes": "1960979"
},
{
"name": "Shell",
"bytes": "7256"
}
],
"symlink_target": ""
} |
"""
Django settings for IrrigatorPro Web Site
"""
import os
import re
import sys
###
# Where am I?
###
TEST_RUNNER = 'uga.test_runner.ManagedModelTestRunner'
ABSOLUTE_PROJECT_ROOT = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..'
)
)
###
# Number of days in future where we generate a water register.
###
WATER_REGISTER_DELTA = 7
###
# Site settings
###
# Name displayed on pages, for easy change
SITE_NAME = "IrrigatorPro (Devel)"
SITE_ID = 1
ADMINS = (
('Gregory R. Warnes', 'greg@warnes.net' ),
('Bill Edwards', 'edwardsb2001@yahoo.com'),
('Alain Leblanc', 'aalebl@gmail.com' ),
('Tony Winter', 'tony@warnes.net' ),
)
MANAGERS = ADMINS
###
# Debug settings
###
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#COMPUTE_FULL_SEASON = True # Calculate water register through the end of the season.
COMPUTE_FULL_SEASON = False # If False, only calculate water register through today + 5 days.
###
# Paths
###
ABSOLUTE_TEMPLATES_PATH = os.path.abspath(os.path.join(ABSOLUTE_PROJECT_ROOT, 'templates/'))
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(ABSOLUTE_PROJECT_ROOT, 'static/'))
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.abspath(os.path.join(ABSOLUTE_PROJECT_ROOT, 'media/'))
# URL that handles the media, static, etc.
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.abspath(os.path.join(ABSOLUTE_PROJECT_ROOT, 'staticfiles/')),
)
###
# Database settings
###
DATABASES = {
'default': {
## Use sqlite3 for development ##
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ABSOLUTE_PROJECT_ROOT, 'db.sqlite3'),
'timeout': 20,
##
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'NAME': 'prod_database_name',
# The rest is not used with sqlite3:
'USER': 'prod_user',
'PASSWORD': 'prod_p@ssword',
'HOST': 'localhost',
'PORT': '',
}
}
###
# Localization
###
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Eastern'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
###
# Finders/Loaders
###
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'common.middleware.AuditMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'session_security.middleware.SessionSecurityMiddleware',
)
ROOT_URLCONF = 'irrigator_pro.urls'
# Python dotted path to the WSGI application used by Django's runserver.
# disabled - outsite the app
WSGI_APPLICATION = 'irrigator_pro.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ABSOLUTE_TEMPLATES_PATH,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'home.context_processors.sitevars',
# default template context processors
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# required by django-admin-tools and django-allauth
'django.core.context_processors.request',
#'django.contrib.messages.context_processors.messages',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'home.context_processors.global_settings',
)
###
# APPS
###
# django debugging stuff
ADMIN_TOOL_APPS = (
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'smuggler',
'admin_steroids',
'hijack',
)
# django
CORE_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# django admin
'django.contrib.admin',
'django.contrib.admindocs',
)
EXTERNAL_APPS = (
'model_blocks',
'django_extensions',
'session_security',
'extra_views',
'mathfilters',
)
AUTHENTICATION_APPS = (
'allauth',
'allauth.account',
'allauth.socialaccount',
# ... include the providers you want to enable:
# 'allauth.socialaccount.providers.amazon',
# 'allauth.socialaccount.providers.angellist',
# 'allauth.socialaccount.providers.bitbucket',
# 'allauth.socialaccount.providers.bitly',
# 'allauth.socialaccount.providers.dropbox',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.flickr',
# 'allauth.socialaccount.providers.feedly',
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.instagram',
# 'allauth.socialaccount.providers.linkedin',
# 'allauth.socialaccount.providers.linkedin_oauth2',
# 'allauth.socialaccount.providers.openid',
# 'allauth.socialaccount.providers.persona',
# 'allauth.socialaccount.providers.soundcloud',
# 'allauth.socialaccount.providers.stackexchange',
# 'allauth.socialaccount.providers.tumblr',
# 'allauth.socialaccount.providers.twitch',
# 'allauth.socialaccount.providers.twitter',
# 'allauth.socialaccount.providers.vimeo',
# 'allauth.socialaccount.providers.vk',
# 'allauth.socialaccount.providers.weibo',
)
LOCAL_APPS = (
'extra_fixtures', # only holds /fixtures
'contact_info',
'common',
'farms',
'uga',
'irrigator_pro',
'notifications',
'home',
)
# the order is important!
INSTALLED_APPS = LOCAL_APPS \
+ ADMIN_TOOL_APPS \
+ CORE_APPS \
+ EXTERNAL_APPS \
+ AUTHENTICATION_APPS
# Routers maps which models and records to databases
DATABASE_ROUTERS = [ 'uga.router.UGARouter' ]
###
# Logging
###
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
# New in Django 1.5
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
###
# Email Settings
###
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
DEFAULT_FROM_EMAIL = 'webmaster@irrigatorpro.org'
SERVER_EMAIL = 'webmaster@irrigatorpro.org'
# EMAIL_PORT = ''
# EMAIL_HOST_USER = ''
# EMAIL_HOST_PASSWORD = ''
# EMAIL_USE_TLS = ''
###
# Authenticationx Settings
###
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = False
LOGIN_REDIRECT_URL = '/farm/report/summary_report/'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/farm/settings/contact_info/'
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/farm/settings/contact_info/'
ACCOUNT_LOGIN_REDIRECT_URL = '/farm/settings/contact_info/'
ACCOUNT_AUTHENTICATION_METHOD = "email" # User can login using either userid or email
ACCOUNT_EMAIL_REQUIRED = True # User is required to hand over an e-mail address when signing up.
ACCOUNT_EMAIL_VERIFICATION = "mandatory" # User is blocked from logging in until the email address is verified.
ACCOUNT_USERNAME_REQUIRED = False # Do not prompt the user to enter a username
ACCOUNT_PASSWORD_MIN_LENGTH = 8 # Minimum password length.
ACCOUNT_LOGOUT_ON_GET = True # User is automatically logged out by a mere GET request
ACCOUNT_USER_DISPLAY = lambda user: user.email
###
# django-session-security Session Timeout Settings
###
# WARN_AFTER
# Time (in seconds) before the user should be warned that is session will
# expire because of inactivity. Default 540.
# SESSION_SECURITY_WARN_AFTER=540
# EXPIRE_AFTER
# Time (in seconds) before the user should be logged out if inactive. Default
# is 600.
# SESSION_SECURITY_EXPIRE_AFTER=600
# PASSIVE_URLS
# List of urls that should be ignored by the middleware. For example the
# ping ajax request of session_security is made without user intervention,
# as such it should not be used to update the user's last activity datetime.
# SESSION_SECURITY_PASSIVE_URLS=[]
# EXPIRE_AT_BROWSER_CLOSE
# Required for this module to operate properly
SESSION_EXPIRE_AT_BROWSER_CLOSE=True
###
## django-admin-tools Settings
###
ADMIN_TOOLS_THEMING_CSS = 'css/theming.css'
###
## Google Analytics
##
# Google Analytics Key -- This is a placeholder to avoid errors if not set.
# Set the actual value in local.py
GA_KEY = ""
###
## Setting for the notification emails. Actual values need to be set in local.py
###
# SMTP server hostname
NOTIFICATION_SMTP = "localhost"
# URL for Notification link back to server
NOTIFICATION_HOST = "http://irrigatorpro.org/farm/report/summary_report/"
# SMTP server port
NOTIFICATION_PORT = "25"
###
## Setting for the sms notification emails. Actual values need to be set in local.py
###
TWILIO_ACCOUNT_SID = ""
TWILIO_AUTH_TOKEN = ""
TWILIO_PHONE_NUMBER = ""
###
# This avoids warnings messages bout old test runner
###
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
###
# Settings for django-smugger
###
#
# List of models to be excluded from dump. Use the form 'app_label.ModelName'. Default: [].
# SMUGGLER_EXCLUDE_LIST = []
#
# Uploaded fixtures are stored in this directory (if requested). Default: None.
SMUGGLER_FIXTURE_DIR = ABSOLUTE_PROJECT_ROOT + "/fixtures"
#
# Format for dumped files. Any of the serialization formats supported by Django, json, xml and in some cases yaml. Default: 'json'.
# SMUGGLER_FORMAT = "json"
#
# Indentation for dumped files. Default: 2.
# SMUGGLER_INDENT = 2
###
# Settings for django-hijack
###
HIJACK_NOTIFY_ADMIN = True
HIJACK_LOGIN_REDIRECT_URL = "/farm/report/summary_report/"
REVERSE_HIJACK_LOGIN_REDIRECT_URL = '/farm/admin/auth/user/'
| {
"content_hash": "25c3d8265737b3ee85fd215631eb371c",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 131,
"avg_line_length": 29.621380846325167,
"alnum_prop": 0.6765413533834587,
"repo_name": "warnes/irrigatorpro",
"id": "b7267566a839d028fadae7ea56ad430ab75c8239",
"size": "13326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irrigator_pro/irrigator_pro/settings/default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8346"
},
{
"name": "CSS",
"bytes": "371081"
},
{
"name": "HTML",
"bytes": "1157566"
},
{
"name": "JavaScript",
"bytes": "557792"
},
{
"name": "Python",
"bytes": "373175"
},
{
"name": "Shell",
"bytes": "3849"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('statmaps', '0003_collection_journal_name'),
]
operations = [
migrations.AddField(
model_name='collection',
name='contributors',
field=models.ManyToManyField(related_name=b'collection_contributors', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| {
"content_hash": "27ad062b339fdcbb04c0b7ebff29425b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 111,
"avg_line_length": 28,
"alnum_prop": 0.6564625850340136,
"repo_name": "NeuroVault/NeuroVault",
"id": "e05ce510e0cc392790b59cd61668e71591171e60",
"size": "612",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neurovault/apps/statmaps/migrations/0004_collection_contributors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5862"
},
{
"name": "HTML",
"bytes": "191016"
},
{
"name": "JavaScript",
"bytes": "26595"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "598856"
},
{
"name": "Shell",
"bytes": "4437"
}
],
"symlink_target": ""
} |
"""Limited version of os module: only keep what is more or less relevant in a
browser context
"""
import abc
import sys
error = OSError
name = 'posix'
linesep = '\n'
from posix import *
import posixpath as path
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
environ = {'HOME': __BRYTHON__.curdir,
'PYTHONPATH': __BRYTHON__.brython_path
}
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = True
def chdir(path):
__BRYTHON__.curdir = path
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fspath(path):
return path
def getcwd():
return __BRYTHON__.curdir
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
if name == 'nt':
class _AddedDllDirectory:
def __init__(self, path, cookie, remove_dll_directory):
self.path = path
self._cookie = cookie
self._remove_dll_directory = remove_dll_directory
def close(self):
self._remove_dll_directory(self._cookie)
self.path = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __repr__(self):
if self.path:
return "<AddedDllDirectory({!r})>".format(self.path)
return "<AddedDllDirectory()>"
def add_dll_directory(path):
"""Add a path to the DLL search path.
This search path is used when resolving dependencies for imported
extension modules (the module itself is resolved through sys.path),
and also by ctypes.
Remove the directory by calling close() on the returned object or
using it in a with statement.
"""
import nt
cookie = nt._add_dll_directory(path)
return _AddedDllDirectory(
path,
cookie,
nt._remove_dll_directory
)
def scandir(*args, **kw):
raise NotImplementedError
_set = set()
supports_dir_fd = _set
supports_effective_ids = _set
supports_fd = _set
supports_follow_symlinks = _set
| {
"content_hash": "f4636b326dd0d683e8e883614057a33a",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 27.928057553956833,
"alnum_prop": 0.639361154044307,
"repo_name": "kikocorreoso/brython",
"id": "9e736a6a4898ab591bf8af3c5ea4b57e3db9f543",
"size": "3882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/src/Lib/os.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21158"
},
{
"name": "HTML",
"bytes": "5011615"
},
{
"name": "JavaScript",
"bytes": "7230101"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19224768"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
import sys
from . import main
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "f8d958c4c9a9e31f29eb7e016ae5ac94",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 15.2,
"alnum_prop": 0.5789473684210527,
"repo_name": "davidfoerster/schema-matching",
"id": "dbb189cc5f482b54b34718d46251d49fbbd7ba6e",
"size": "76",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/schema_matching/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "719"
},
{
"name": "Python",
"bytes": "74814"
}
],
"symlink_target": ""
} |
"Test posix functions"
from test import support
# Skip these tests if there is no posix module.
posix = support.import_module('posix')
import errno
import sys
import time
import os
import platform
import pwd
import shutil
import stat
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
for teardown_file in self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(None, None, None)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
try:
name = pwd.getpwuid(posix.getuid()).pw_name
except KeyError:
# the current UID may not have a pwd entry
raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertTrue(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
self.assertTrue(posix.statvfs(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
with open(support.TESTFN, 'w') as fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', None) in os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
try:
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
else:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
finally:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
else:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section is locked
posix.lockf(fd, posix.F_ULOCK, 4)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
posix.posix_fallocate(fd, 0, 10)
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
raise
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_fd, "test needs fd support in os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.utime(fd)
posix.utime(fd, None)
self.assertRaises(TypeError, posix.utime, fd, (None, None))
self.assertRaises(TypeError, posix.utime, fd, (now, None))
self.assertRaises(TypeError, posix.utime, fd, (None, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(None, None))
self.assertRaises(ValueError, posix.utime, fd, (None, None), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks, "test needs follow_symlinks support in os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, None, follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), follow_symlinks=False)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=False)
posix.utime(support.TESTFN, (now, now), follow_symlinks=False)
posix.utime(support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should not crash
try:
size = posix.writev(fd, [])
except OSError:
# writev(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) for i in [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) for i in buf])
# Issue #20113: empty list of buffers should not crash
try:
size = posix.readv(fd, [])
except OSError:
# readv(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertFalse(os.get_inheritable(fd))
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
self.assertTrue(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, float(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertTrue(posix.stat(support.TESTFN))
self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
self.assertTrue(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify None for path argument',
posix.stat, None)
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') and hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
try:
posix.mknod(support.TESTFN, mode, 0)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()')
@unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()')
def test_makedev(self):
st = posix.stat(support.TESTFN)
dev = st.st_dev
self.assertIsInstance(dev, int)
self.assertGreaterEqual(dev, 0)
major = posix.major(dev)
self.assertIsInstance(major, int)
self.assertGreaterEqual(major, 0)
self.assertEqual(posix.major(dev), major)
self.assertRaises(TypeError, posix.major, float(dev))
self.assertRaises(TypeError, posix.major)
self.assertRaises((ValueError, OverflowError), posix.major, -1)
minor = posix.minor(dev)
self.assertIsInstance(minor, int)
self.assertGreaterEqual(minor, 0)
self.assertEqual(posix.minor(dev), minor)
self.assertRaises(TypeError, posix.minor, float(dev))
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
self.assertEqual(posix.makedev(major, minor), dev)
self.assertRaises(TypeError, posix.makedev, float(major), minor)
self.assertRaises(TypeError, posix.makedev, major, float(minor))
self.assertRaises(TypeError, posix.makedev, major)
self.assertRaises(TypeError, posix.makedev)
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
def check_stat(uid, gid):
if stat_func is not None:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
if uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even if they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix in 4591 fixes it for good!
#
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
elif platform.system() in ('HP-UX', 'SunOS'):
# HP-UX and Solaris can allow a non-root user to chown() to root
# (issue #5113)
raise unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
if 0 not in os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
for t in str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', None))
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', None))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertTrue(support.TESTFN in posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir is called without argument,
# it's the same as listdir(os.curdir).
self.assertTrue(support.TESTFN in posix.listdir())
def test_listdir_bytes(self):
# When listdir is called with a bytes object,
# the returned strings are of type bytes.
self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir in os.supports_fd,
"test needs fd support for posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertTrue(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertTrue(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling with flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(os.get_inheritable(r))
self.assertFalse(os.get_inheritable(w))
self.assertFalse(os.get_blocking(r))
self.assertFalse(os.get_blocking(w))
# try reading from an empty pipe: this should fail, not block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail or perform a partial write, not block
try:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
except OSError:
pass
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
import _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
try:
chflags_func(target_file, flags, **kwargs)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
return posix.chflags(path, flags, follow_symlinks=False)
for fn in (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
try:
fn(_DUMMY_SYMLINK, flags)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
if os.name == "nt":
item_type = str
else:
item_type = bytes
for k, v in posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception, because
# the test results in Error in that case. Is that ok?
# raise unittest.SkipTest("cannot create directory for testing")
return
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest("mkdir cannot create directory sufficiently deep for getcwd test")
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
ret = idg.close()
if ret is not None or not groups:
raise unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
if sys.platform == 'darwin':
import sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests for the posix *at functions follow
@unittest.skipUnless(os.access in os.supports_dir_fd, "test needs dir_fd support for os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertTrue(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
finally:
posix.close(f)
@unittest.skipUnless(os.chmod in os.supports_dir_fd, "test needs dir_fd support in os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
finally:
posix.close(f)
@unittest.skipUnless(os.chown in os.supports_dir_fd, "test needs dir_fd support in os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
finally:
posix.close(f)
@unittest.skipUnless(os.stat in os.supports_dir_fd, "test needs dir_fd support in os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=None)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
finally:
posix.close(f)
@unittest.skipUnless(os.utime in os.supports_dir_fd, "test needs dir_fd support in os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
now = time.time()
posix.utime(support.TESTFN, None, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd and follow_symlinks together
if os.utime in os.supports_follow_symlinks:
try:
posix.utime(support.TESTFN, follow_symlinks=False, dir_fd=f)
except ValueError:
# whoops! using both together not supported on this platform.
pass
finally:
posix.close(f)
@unittest.skipUnless(os.link in os.supports_dir_fd, "test needs dir_fd support in os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir in os.supports_dir_fd, "test needs dir_fd support in os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should not raise exception
finally:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod in os.supports_dir_fd) and hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO and dir_fd support for os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
@unittest.skipUnless(os.open in os.supports_dir_fd, "test needs dir_fd support in os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
try:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
finally:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink in os.supports_dir_fd, "test needs dir_fd support in os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
finally:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename in os.supports_dir_fd, "test needs dir_fd support in os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
except:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
raise
else:
posix.stat(support.TESTFN) # should not raise exception
finally:
posix.close(f)
@unittest.skipUnless(os.symlink in os.supports_dir_fd, "test needs dir_fd support in os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink in os.supports_dir_fd, "test needs dir_fd support in os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should not raise exception
try:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
except:
support.unlink(support.TESTFN + 'del')
raise
else:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
finally:
posix.close(f)
@unittest.skipUnless(os.mkfifo in os.supports_dir_fd, "test needs dir_fd support in os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_yield'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_yield(self):
# This has no error conditions (at least on Linux).
posix.sched_yield()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
if sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched for name, sched in posix.__dict__.items()
if name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
try:
parent = posix.sched_getscheduler(os.getppid())
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() or sched_setscheduler() on
# a process with a scheduling policy other than SCHED_FIFO or SCHED_RR
# is implementation-defined: NetBSD and FreeBSD can return EINVAL.
if not sys.platform.startswith(('freebsd', 'netbsd')):
try:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
except OSError as e:
if e.errno != errno.EPERM:
raise
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, None)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(None)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
try:
interval = posix.sched_rr_get_interval(0)
except OSError as e:
# This likely means that sched_rr_get_interval is only valid for
# processes with the SCHED_RR scheduler in effect.
if e.errno != errno.EINVAL:
raise
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
if len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even if the filesystem doesn't report holes,
# if the OS supports it the SEEK_* constants
# will be defined and will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
with open(support.TESTFN, 'r+b') as fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
for i in range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
except OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it is not true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
raise unittest.SkipTest("OSError raised!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames in their exceptions.
"""
for name in ("rename", "replace", "link"):
function = getattr(os, name, None)
if function is None:
continue
for dst in ("noodly2", support.TESTFN):
try:
function('doesnotexistfilename', dst)
except OSError as e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
break
else:
self.fail("No valid path_error2() test for os." + name)
def test_path_with_null_character(self):
fn = support.TESTFN
fn_with_NUL = fn + '\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = None
try:
with self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # raises
finally:
if fd is not None:
os.close(fd)
self.assertFalse(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertFalse(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
def test_path_with_null_byte(self):
fn = os.fsencode(support.TESTFN)
fn_with_NUL = fn + b'\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = None
try:
with self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # raises
finally:
if fd is not None:
os.close(fd)
self.assertFalse(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertFalse(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
try:
support.run_unittest(PosixTester, PosixGroupsTester)
finally:
support.reap_children()
if __name__ == '__main__':
test_main()
| {
"content_hash": "bcaa162b8976a444d292cf35ccc86cde",
"timestamp": "",
"source": "github",
"line_count": 1249,
"max_line_length": 122,
"avg_line_length": 41.30264211369095,
"alnum_prop": 0.583616027293698,
"repo_name": "ms-iot/python",
"id": "77e5b0c4fdf6fba9059cf6e0d9ce0d0799f3b459",
"size": "51587",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_posix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "481852"
},
{
"name": "Batchfile",
"bytes": "35616"
},
{
"name": "C",
"bytes": "15555469"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "726292"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "M4",
"bytes": "223087"
},
{
"name": "Makefile",
"bytes": "197108"
},
{
"name": "Objective-C",
"bytes": "2098686"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "24948876"
},
{
"name": "Roff",
"bytes": "254942"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from auto_gen import *
from workflow import DBWorkflow
from vistrail import DBVistrail
from log import DBLog
from id_scope import IdScope
| {
"content_hash": "03df4caed3efc412883f40398cac858f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 31,
"avg_line_length": 27.6,
"alnum_prop": 0.8333333333333334,
"repo_name": "CMUSV-VisTrails/WorkflowRecommendation",
"id": "51ea32492afd6d98bd2c7948a90ac590290963d8",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/db/versions/v0_9_4/domain/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "PHP",
"bytes": "48730"
},
{
"name": "Python",
"bytes": "12760768"
},
{
"name": "Shell",
"bytes": "33785"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'VideoCode'
db.create_table(u'video_videocode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('type', self.gf('django.db.models.fields.CharField')(max_length=50)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['video.Category'])),
))
db.send_create_signal(u'video', ['VideoCode'])
def backwards(self, orm):
# Deleting model 'VideoCode'
db.delete_table(u'video_videocode')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'video.category': {
'Meta': {'ordering': "('name',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'video.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'video.video': {
'Meta': {'ordering': "('-publish_date',)", 'object_name': 'Video'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'author_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['video.Category']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_likes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['video.Tag']", 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'videos'", 'to': u"orm['auth.User']"}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'video.videocode': {
'Meta': {'object_name': 'VideoCode'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['video.Category']"}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['video'] | {
"content_hash": "7c0e0f3b3e8e79ccfe0f73ba635cfddd",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 195,
"avg_line_length": 72.95833333333333,
"alnum_prop": 0.5466590519703027,
"repo_name": "OlegPshenichniy/upfavor-mezzanine",
"id": "2e2594346f9c836c3c376dbeab32f17064308af4",
"size": "8779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video/migrations/0003_auto__add_videocode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "306623"
},
{
"name": "JavaScript",
"bytes": "255524"
},
{
"name": "Python",
"bytes": "75117"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core.files.storage import FileSystemStorage
from django.views.generic import TemplateView
from yepes.apps import apps
from yepes.views import ListView
Configuration = apps.get_model('thumbnails', 'Configuration')
SourceFile = apps.get_class('thumbnails.files', 'SourceFile')
class TestMixin(object):
def get_context_data(self, **kwargs):
context = super(TestMixin, self).get_context_data(**kwargs)
context['source'] = self.get_source_file()
return context
def get_source_file(self):
app_config = apps.get_app_config('thumbnails')
app_static_dir = os.path.join(app_config.path, 'static')
app_storage = FileSystemStorage(app_static_dir)
media_storage = FileSystemStorage()
path = 'thumbnails/wolf.jpg'
if not media_storage.exists(path):
media_storage.save(path, app_storage.open(path))
elif app_storage.modified_time(path) > media_storage.modified_time(path):
media_storage.delete(path)
media_storage.save(path, app_storage.open(path))
return SourceFile(media_storage.open(path), path, media_storage)
class ConfigurationsTestView(TestMixin, ListView):
model = Configuration
template_name = 'thumbnails/configurations.html'
class OptionsTestView(TestMixin, TemplateView):
template_name = 'thumbnails/options.html'
| {
"content_hash": "208fde6c44fd2c471e55e55ce0681724",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 32.31818181818182,
"alnum_prop": 0.7011251758087201,
"repo_name": "samuelmaudo/yepes",
"id": "3b59f458385ac0eb41585efe7423fd40263bcb76",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yepes/contrib/thumbnails/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1485"
},
{
"name": "CSS",
"bytes": "2805"
},
{
"name": "HTML",
"bytes": "18543"
},
{
"name": "JavaScript",
"bytes": "56039"
},
{
"name": "Python",
"bytes": "2415982"
}
],
"symlink_target": ""
} |
import datetime
from jsonfield import JSONField
from django.db import models
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.core.urlresolvers import reverse
class UserProfile(models.Model):
user = models.OneToOneField(User)
group = models.CharField(max_length=255)
def __unicode__(self):
return self.user.username
class Session(models.Model):
i_session = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
location = models.CharField(max_length=255)
session_type = models.CharField(max_length=255)
def __unicode__(self):
return self.name
def parse_fields(self, data):
try:
self.name = data.get('Session name', '')
self.location = data.get('Session location', '')
self.session_type = data.get('Session type', '')
except (KeyError, TypeError) as ex:
print ex
class Parent(models.Model):
GENDER_TYPE = (('MALE', 'MALE'), ('FEMALE', 'FEMALE'))
i_parent = models.AutoField(primary_key=True)
user = models.OneToOneField(UserProfile)
full_name = models.CharField(max_length=255)
gender = models.CharField(
max_length=6, blank=True, null=True, choices=GENDER_TYPE)
email_address = models.EmailField(max_length=255, unique=True)
cell_phone_number = models.CharField(max_length=255, blank=True, null=True)
business_phone_number = models.CharField(
max_length=255, blank=True, null=True)
home_phone_number = models.CharField(max_length=255, blank=True, null=True)
secret_code = models.CharField(max_length=255, blank=True, null=True)
def __unicode__(self):
return self.full_name
def create_parent_by_fields(self, data, level):
status = True
try:
user_profile = UserProfile()
if level == 'S':
full_name = data.get('Secondary P/G: Name', '')
gender = data.get('Secondary P/G: Gender', '')
email_address = data.get('Secondary P/G: Email address', '')
home_phone_number = data.get(
'Secondary P/G: Home phone number', '')
cell_phone_number = data.get(
'Secondary P/G: Cell phone number', '')
business_phone_number = data.get(
'Secondary P/G: Business phone number', '')
user_profile.group = 'PS'
else:
full_name = data.get('Primary P/G: Name', '')
gender = data.get('Primary P/G: Gender', '')
email_address = data.get('Primary P/G: Email address', '')
home_phone_number = data.get(
'Primary P/G: Home phone number', '')
cell_phone_number = data.get(
'Primary P/G: Cell phone number', '')
business_phone_number = data.get(
'Primary P/G: Business phone number', '')
user_profile.group = 'PP'
self.full_name = full_name
self.gender = gender
self.email_address = email_address
self.home_phone_number = home_phone_number
self.cell_phone_number = cell_phone_number
self.business_phone_number = business_phone_number
try:
print "===============", full_name
user = User.objects.create_user(
username=email_address, password=email_address, email=email_address)
user.save()
user_profile.user = user
user_profile.save()
self.user = user_profile
self.save()
funds_obj = Funds()
funds_obj.parent = self
funds_obj.amount = float(data.get('Initial_Funds', 0))
funds_obj.remaining_amount = float(
data.get('Initial_Funds', 0))
funds_obj.currency = 'USD'
funds_obj.name = 'Initial'
funds_obj.recieved_time = datetime.datetime.now()
funds_obj.save()
except (ValueError, IntegrityError) as ex:
status = False
except KeyError as ex:
print ex
status = False
return status
class Funds(models.Model):
i_funds = models.AutoField(primary_key=True)
parent = models.ForeignKey(Parent)
name = models.CharField(max_length=255)
amount = models.FloatField()
currency = models.CharField(max_length=255)
remaining_amount = models.FloatField()
is_active = models.BooleanField(default=True)
recieved_time = models.DateTimeField()
def __unicode__(self):
return self.parent.full_name
class Cadet(models.Model):
GENDER_TYPE = (('MALE', 'MALE'), ('FEMALE', 'FEMALE'))
sessions = models.ForeignKey(Session, blank=True, null=True, editable=False)
i_cadet = models.AutoField(primary_key=True)
full_name = models.CharField(max_length=255)
age_today = models.IntegerField()
gender = models.CharField(
max_length=6, blank=True, null=True, choices=GENDER_TYPE)
city = models.CharField(max_length=255, blank=True, null=True)
state = models.CharField(max_length=255, blank=True, null=True)
country = models.CharField(max_length=255, blank=True, null=True)
zip_code = models.CharField(max_length=255, blank=True, null=True)
email_address = models.EmailField(max_length=255, blank=True, null=True)
age_session = models.IntegerField(blank=True, null=True)
primary_parent = models.ForeignKey(Parent, related_name='primary_parent',
verbose_name="Select Cadet's Parent")
address = models.TextField(blank=True, null=True, verbose_name="Address")
contact_number = models.IntegerField(blank=True, null=True)
secondary_parent = models.ForeignKey(
Parent, related_name='secondary_parent', blank=True, null=True, editable=False)
usac_training_program = models.CharField(
max_length=255, blank=True, null=True)
goal = models.TextField(
blank=True, null=True, verbose_name="Why join XMC Camp?")
def __unicode__(self):
return self.full_name
def parse_fields(self, data):
status = True
try:
self.full_name = data.get('Participant: Name', '')
self.age_today = data.get('Participant: Age as of today', '')
self.gender = data.get('Participant: Gender', '')
self.address = data.get('Participant: Address', '')
self.age_session = data.get('Participant: Age as of session', '')
self.city = data.get('Participant: City', '')
self.country = data.get('Participant: Country', '')
self.dob = data.get('Participant: Date of birth', '')
self.email_address = data.get('Participant: Email address', '')
self.contact_number = data.get(
'Participant: Home phone number', '')
self.state = data.get('Participant: State', '')
self.usac_training_program = data.get(
'Participant: USAC Training Program', '')
self.zip_code = data.get('Participant: Zip code', '')
self.goal = data.get(
'Participant: Please explain what you would like to have your son or daugher accomplish while at camp? Explain any special situations or other information the staff should know about your child.', '')
except KeyError as ex:
print ex
status = False
except IntegrityError:
status = False
return status
class PXStaff(models.Model):
ACCOUNT_TYPE = (('AD', 'Adminstrator'), ('CT', 'PX Staff'))
i_px_manager = models.AutoField(primary_key=True)
user = models.OneToOneField(UserProfile)
full_name = models.CharField(max_length=255)
email_address = models.EmailField(max_length=255, unique=True)
password = models.CharField(max_length=255, blank=True, null=True)
account_type = models.CharField(max_length=2, choices=ACCOUNT_TYPE)
def __unicode__(self):
return self.full_name
class Product(models.Model):
i_product = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
description = models.TextField(blank=True, null=True)
cost_per_unit = models.FloatField()
def get_absolute_url(self):
return reverse('product_list')
def __unicode__(self):
return self.name
class SubTransaction(models.Model):
product = models.ForeignKey(Product)
quantity = models.IntegerField()
cost = models.FloatField()
def __unicode__(self):
return 'Product Purchased %s with %d quantity' % (self.product.name, self.quantity)
class CompleteTransaction(models.Model):
i_transaction = models.AutoField(primary_key=True)
cadet = models.ForeignKey(Cadet)
transaction = models.ManyToManyField(SubTransaction)
total_cost = models.FloatField()
created_time = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return self.cadet
class GeneralSettings(models.Model):
i_settings = models.AutoField(primary_key=True)
configuration = JSONField()
def __unicode__(self):
return "General Settings"
class RevertTransaction(models.Model):
i_trans_revert = models.AutoField(primary_key=True)
transaction = models.ForeignKey(SubTransaction)
created_by = models.ForeignKey(UserProfile, related_name='created_by')
approved_by = models.ForeignKey(
UserProfile, related_name='approved_by', blank=True, null=True)
approved_time = models.DateTimeField(default=datetime.datetime.now)
approval_status = models.BooleanField(default=False)
def __unicode__(self):
return 'Replica of Transaction %s' % self.transaction
class StickyNotes(models.Model):
i_note = models.AutoField(primary_key=True)
i_parent = models.ForeignKey(Parent, blank=True, null=True, verbose_name='Please select Parent')
i_cadet = models.ForeignKey(Cadet, blank=True, null=True, verbose_name='Please select Cadet')
issued_by = models.ForeignKey(UserProfile, editable=False)
issued_time = models.DateTimeField(default=datetime.datetime.now, editable=False)
remarks = models.TextField()
def __unicode__(self):
return 'Note Issued to %s by %s' % (self.i_parent, self.issued_by)
| {
"content_hash": "afc56e8a5703ea3acfa6d29085d4ea5a",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 217,
"avg_line_length": 39.330827067669176,
"alnum_prop": 0.6250238960045881,
"repo_name": "oskgeek/xmccamp",
"id": "ec89c1e1635695908a773cd6ed263e1ac19d372d",
"size": "10462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmccamp/controller/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89842"
},
{
"name": "HTML",
"bytes": "638635"
},
{
"name": "JavaScript",
"bytes": "210838"
},
{
"name": "Python",
"bytes": "56984"
}
],
"symlink_target": ""
} |
class FunctionApproximator(object):
"""Map states and actions using a function."""
def __init__(self, n_actions: int) -> None:
super(FunctionApproximator, self).__init__()
self.n_actions = n_actions
self.thetas = []
self.features_shape = (0)
def summed_thetas(self, state, action):
raise NotImplementedError()
def set_thetas(self, addition):
self.thetas += addition
| {
"content_hash": "235b5581eccae6381009f666fb69cb48",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 33.23076923076923,
"alnum_prop": 0.6180555555555556,
"repo_name": "arnomoonens/DeepRL",
"id": "2c642a68aabe9e8e475910e8e20ae5904e88634e",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yarll/functionapproximation/function_approximator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6107"
},
{
"name": "Python",
"bytes": "236593"
}
],
"symlink_target": ""
} |
"""
Goes over a list of docker images, downloads them in a VM,
running boot2docker then downloads a script and executes
it on them. Filters out the results and saves them in a file.
In this version, both the address of the VM and of the script
are hardcoded on local IPs.
"""
from subprocess import Popen, PIPE, TimeoutExpired
import argparse
import datetime
import locale
import os
import psutil
import subprocess
import threading
import time
def resetVm():
proc = Popen("./reset_boot2docker.sh", stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
try:
proc.wait(30)
except TimeoutExpired:
proc.kill()
log('TIMEOUT reset boot2docker')
for process in psutil.process_iter():
if process.name == 'VirtualBox':
process.kill()
resetVm()
time.sleep(2)
log('END RESET boot2docker')
def getImageVulns(image):
# Note that the IP was set by boot2docker.
docker_env = {"DOCKER_HOST": "tcp://192.168.59.105:2375"}
proc = Popen(["docker", "run", image, "/bin/bash", "-c",
"apt-get update && apt-get install wget -y && wget http://192.168.59.3/bashcheck -O /tmp/bashcheck && chmod +x /tmp/bashcheck && /tmp/bashcheck"], env=docker_env, stdout=PIPE, stderr=PIPE)
try:
timeout = 60 * 20
outs, errs = proc.communicate(timeout=timeout)
except TimeoutExpired:
log('TIMEOUT processing %s' % image)
proc.kill()
outs, errs = proc.communicate()
vulns = []
encoding = locale.getdefaultlocale()[1]
for line in outs.splitlines():
if 'Vulnerable' in line.decode(encoding):
vulns.append(line.decode(encoding))
return vulns
def log(message):
now = datetime.datetime.now()
print('[%s] %s' % (now, message))
parser = argparse.ArgumentParser(
description='Tests shellshock on docker images (debian based ONLY).')
parser.add_argument('output_file', metavar='output_file',
help='File to write output to')
inputs = parser.add_mutually_exclusive_group(required=True)
inputs.add_argument('--image_file', metavar='image_file',
help='File to read a list of images from')
inputs.add_argument('--image', metavar='image',
help='A single image')
args = parser.parse_args()
with open(args.output_file, 'w') as output:
images = []
if args.image_file:
with open(args.image_file, 'r') as image_file:
for line in image_file:
images.append(line)
if args.image:
images.append(args.image)
image_counter = 0
for image in images:
image = image.strip().strip('/') # removes images ending with /
log('Processed %s images' % image_counter)
image_counter += 1
log('START RESET boot2docker')
resetVm()
log('Sleeping 45 seconds to avoid triggering DOS protections')
time.sleep(45)
log('START TEST %s' % image)
vulns = getImageVulns(image)
encoding = locale.getdefaultlocale()[1]
for vuln in vulns:
log(vuln)
# Output is in "CSV" format :-)
output.write(image + ',' + vuln + '\n')
output.flush()
log('END TEST %s' % image)
| {
"content_hash": "caf48eef702749bde453d838ac390021",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 190,
"avg_line_length": 31.821052631578947,
"alnum_prop": 0.6705259675818723,
"repo_name": "paradoxengine/docker-tester",
"id": "da67abe42fd9a677d74d9dfd08e6622594b0fd3a",
"size": "3042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "execute_docker_image.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3042"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
} |
"""
This implements the high-level functions for SLIP-39, also called "Shamir Backup".
See https://github.com/satoshilabs/slips/blob/master/slip-0039.md.
"""
import hmac
from collections import defaultdict
from hashlib import pbkdf2_hmac
from typing import Dict, Iterable, List, Optional, Set, Tuple
from .i18n import _
from .mnemonic import Wordlist
Indices = Tuple[int, ...]
MnemonicGroups = Dict[int, Tuple[int, Set[Tuple[int, bytes]]]]
"""
## Simple helpers
"""
_RADIX_BITS = 10
"""The length of the radix in bits."""
def _bits_to_bytes(n: int) -> int:
return (n + 7) // 8
def _bits_to_words(n: int) -> int:
return (n + _RADIX_BITS - 1) // _RADIX_BITS
def _xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
"""
## Constants
"""
_ID_LENGTH_BITS = 15
"""The length of the random identifier in bits."""
_ITERATION_EXP_LENGTH_BITS = 5
"""The length of the iteration exponent in bits."""
_ID_EXP_LENGTH_WORDS = _bits_to_words(_ID_LENGTH_BITS + _ITERATION_EXP_LENGTH_BITS)
"""The length of the random identifier and iteration exponent in words."""
_CHECKSUM_LENGTH_WORDS = 3
"""The length of the RS1024 checksum in words."""
_DIGEST_LENGTH_BYTES = 4
"""The length of the digest of the shared secret in bytes."""
_CUSTOMIZATION_STRING = b"shamir"
"""The customization string used in the RS1024 checksum and in the PBKDF2 salt."""
_GROUP_PREFIX_LENGTH_WORDS = _ID_EXP_LENGTH_WORDS + 1
"""The length of the prefix of the mnemonic that is common to a share group."""
_METADATA_LENGTH_WORDS = _ID_EXP_LENGTH_WORDS + 2 + _CHECKSUM_LENGTH_WORDS
"""The length of the mnemonic in words without the share value."""
_MIN_STRENGTH_BITS = 128
"""The minimum allowed entropy of the master secret."""
_MIN_MNEMONIC_LENGTH_WORDS = _METADATA_LENGTH_WORDS + _bits_to_words(_MIN_STRENGTH_BITS)
"""The minimum allowed length of the mnemonic in words."""
_BASE_ITERATION_COUNT = 10000
"""The minimum number of iterations to use in PBKDF2."""
_ROUND_COUNT = 4
"""The number of rounds to use in the Feistel cipher."""
_SECRET_INDEX = 255
"""The index of the share containing the shared secret."""
_DIGEST_INDEX = 254
"""The index of the share containing the digest of the shared secret."""
"""
# External API
"""
class Slip39Error(RuntimeError):
pass
class Share:
"""
Represents a single mnemonic and offers its parsed metadata.
"""
def __init__(
self,
identifier: int,
iteration_exponent: int,
group_index: int,
group_threshold: int,
group_count: int,
member_index: int,
member_threshold: int,
share_value: bytes,
):
self.index = None
self.identifier = identifier
self.iteration_exponent = iteration_exponent
self.group_index = group_index
self.group_threshold = group_threshold
self.group_count = group_count
self.member_index = member_index
self.member_threshold = member_threshold
self.share_value = share_value
def common_parameters(self) -> tuple:
"""Return the values that uniquely identify a matching set of shares."""
return (
self.identifier,
self.iteration_exponent,
self.group_threshold,
self.group_count,
)
class EncryptedSeed:
"""
Represents the encrypted master seed for BIP-32.
"""
def __init__(self, identifier: int, iteration_exponent: int, encrypted_master_secret: bytes):
self.identifier = identifier
self.iteration_exponent = iteration_exponent
self.encrypted_master_secret = encrypted_master_secret
def decrypt(self, passphrase: str) -> bytes:
"""
Converts the Encrypted Master Secret to a Master Secret by applying the passphrase.
This is analogous to BIP-39 passphrase derivation. We do not use the term "derive"
here, because passphrase function is symmetric in SLIP-39. We are using the terms
"encrypt" and "decrypt" instead.
"""
passphrase = (passphrase or '').encode('utf-8')
ems_len = len(self.encrypted_master_secret)
l = self.encrypted_master_secret[: ems_len // 2]
r = self.encrypted_master_secret[ems_len // 2 :]
salt = _get_salt(self.identifier)
for i in reversed(range(_ROUND_COUNT)):
(l, r) = (
r,
_xor(l, _round_function(i, passphrase, self.iteration_exponent, salt, r)),
)
return r + l
def recover_ems(mnemonics: List[str]) -> EncryptedSeed:
"""
Combines mnemonic shares to obtain the encrypted master secret which was previously
split using Shamir's secret sharing scheme.
Returns identifier, iteration exponent and the encrypted master secret.
"""
if not mnemonics:
raise Slip39Error("The list of mnemonics is empty.")
(
identifier,
iteration_exponent,
group_threshold,
group_count,
groups,
) = _decode_mnemonics(mnemonics)
# Use only groups that have at least the threshold number of shares.
groups = {group_index: group for group_index, group in groups.items() if len(group[1]) >= group[0]}
if len(groups) < group_threshold:
raise Slip39Error(
"Insufficient number of mnemonic groups. Expected {} full groups, but {} were provided.".format(
group_threshold, len(groups)
)
)
group_shares = [
(group_index, _recover_secret(group[0], list(group[1])))
for group_index, group in groups.items()
]
encrypted_master_secret = _recover_secret(group_threshold, group_shares)
return EncryptedSeed(identifier, iteration_exponent, encrypted_master_secret)
def decode_mnemonic(mnemonic: str) -> Share:
"""Converts a share mnemonic to share data."""
mnemonic_data = tuple(_mnemonic_to_indices(mnemonic))
if len(mnemonic_data) < _MIN_MNEMONIC_LENGTH_WORDS:
raise Slip39Error(_('Too short.'))
padding_len = (_RADIX_BITS * (len(mnemonic_data) - _METADATA_LENGTH_WORDS)) % 16
if padding_len > 8:
raise Slip39Error(_('Invalid length.'))
if not _rs1024_verify_checksum(mnemonic_data):
raise Slip39Error(_('Invalid mnemonic checksum.'))
id_exp_int = _int_from_indices(mnemonic_data[:_ID_EXP_LENGTH_WORDS])
identifier = id_exp_int >> _ITERATION_EXP_LENGTH_BITS
iteration_exponent = id_exp_int & ((1 << _ITERATION_EXP_LENGTH_BITS) - 1)
tmp = _int_from_indices(
mnemonic_data[_ID_EXP_LENGTH_WORDS : _ID_EXP_LENGTH_WORDS + 2]
)
(
group_index,
group_threshold,
group_count,
member_index,
member_threshold,
) = _int_to_indices(tmp, 5, 4)
value_data = mnemonic_data[_ID_EXP_LENGTH_WORDS + 2 : -_CHECKSUM_LENGTH_WORDS]
if group_count < group_threshold:
raise Slip39Error(_('Invalid mnemonic group threshold.'))
value_byte_count = _bits_to_bytes(_RADIX_BITS * len(value_data) - padding_len)
value_int = _int_from_indices(value_data)
if value_data[0] >= 1 << (_RADIX_BITS - padding_len):
raise Slip39Error(_('Invalid mnemonic padding.'))
value = value_int.to_bytes(value_byte_count, "big")
return Share(
identifier,
iteration_exponent,
group_index,
group_threshold + 1,
group_count + 1,
member_index,
member_threshold + 1,
value,
)
def get_wordlist() -> Wordlist:
wordlist = Wordlist.from_file('slip39.txt')
required_words = 2**_RADIX_BITS
if len(wordlist) != required_words:
raise Slip39Error(
f"The wordlist should contain {required_words} words, but it contains {len(wordlist)} words."
)
return wordlist
def process_mnemonics(mnemonics: List[str]) -> Tuple[bool, str]:
# Collect valid shares.
shares = []
for i, mnemonic in enumerate(mnemonics):
try:
share = decode_mnemonic(mnemonic)
share.index = i + 1
shares.append(share)
except Slip39Error:
pass
if not shares:
return None, _('No valid shares.')
# Sort shares into groups.
groups: Dict[int, Set[Share]] = defaultdict(set) # group idx : shares
common_params = shares[0].common_parameters()
for share in shares:
if share.common_parameters() != common_params:
error_text = _("Share") + ' #%d ' % share.index + _("is not part of the current set.")
return None, _ERROR_STYLE % error_text
for other in groups[share.group_index]:
if share.member_index == other.member_index:
error_text = _("Share") + ' #%d ' % share.index + _("is a duplicate of share") + ' #%d.' % other.index
return None, _ERROR_STYLE % error_text
groups[share.group_index].add(share)
# Compile information about groups.
groups_completed = 0
for i, group in groups.items():
if group:
member_threshold = next(iter(group)).member_threshold
if len(group) >= member_threshold:
groups_completed += 1
identifier = shares[0].identifier
iteration_exponent = shares[0].iteration_exponent
group_threshold = shares[0].group_threshold
group_count = shares[0].group_count
status = ''
if group_count > 1:
status += _('Completed') + ' <b>%d</b> ' % groups_completed + _('of') + ' <b>%d</b> ' % group_threshold + _('groups needed:<br/>')
for group_index in range(group_count):
group_prefix = _make_group_prefix(identifier, iteration_exponent, group_index, group_threshold, group_count)
status += _group_status(groups[group_index], group_prefix)
if groups_completed >= group_threshold:
if len(mnemonics) > len(shares):
status += _ERROR_STYLE % _('Some shares are invalid.')
else:
try:
encrypted_seed = recover_ems(mnemonics)
status += '<b>' + _('The set is complete!') + '</b>'
except Slip39Error as e:
encrypted_seed = None
status = _ERROR_STYLE % str(e)
return encrypted_seed, status
return None, status
"""
## Group status helpers
"""
_FINISHED = '<span style="color:green;">✔</span>'
_EMPTY = '<span style="color:red;">✕</span>'
_INPROGRESS = '<span style="color:orange;">⚫</span>'
_ERROR_STYLE = '<span style="color:red; font-weight:bold;">' + _('Error') + ': %s</span>'
def _make_group_prefix(identifier, iteration_exponent, group_index, group_threshold, group_count):
wordlist = get_wordlist()
val = identifier
val <<= _ITERATION_EXP_LENGTH_BITS
val += iteration_exponent
val <<= 4
val += group_index
val <<= 4
val += group_threshold - 1
val <<= 4
val += group_count - 1
val >>= 2
prefix = ' '.join(wordlist[idx] for idx in _int_to_indices(val, _GROUP_PREFIX_LENGTH_WORDS, _RADIX_BITS))
return prefix
def _group_status(group: Set[Share], group_prefix) -> str:
len(group)
if not group:
return _EMPTY + '<b>0</b> ' + _('shares from group') + ' <b>' + group_prefix + '</b>.<br/>'
else:
share = next(iter(group))
icon = _FINISHED if len(group) >= share.member_threshold else _INPROGRESS
return icon + '<b>%d</b> ' % len(group) + _('of') + ' <b>%d</b> ' % share.member_threshold + _('shares needed from group') + ' <b>%s</b>.<br/>' % group_prefix
"""
## Convert mnemonics or integers to indices and back
"""
def _int_from_indices(indices: Indices) -> int:
"""Converts a list of base 1024 indices in big endian order to an integer value."""
value = 0
for index in indices:
value = (value << _RADIX_BITS) + index
return value
def _int_to_indices(value: int, output_length: int, bits: int) -> Iterable[int]:
"""Converts an integer value to indices in big endian order."""
mask = (1 << bits) - 1
return ((value >> (i * bits)) & mask for i in reversed(range(output_length)))
def _mnemonic_to_indices(mnemonic: str) -> List[int]:
wordlist = get_wordlist()
indices = []
for word in mnemonic.split():
try:
indices.append(wordlist.index(word.lower()))
except ValueError:
if len(word) > 8:
word = word[:8] + '...'
raise Slip39Error(_('Invalid mnemonic word') + ' "%s".' % word) from None
return indices
"""
## Checksum functions
"""
def _rs1024_polymod(values: Indices) -> int:
GEN = (
0xE0E040,
0x1C1C080,
0x3838100,
0x7070200,
0xE0E0009,
0x1C0C2412,
0x38086C24,
0x3090FC48,
0x21B1F890,
0x3F3F120,
)
chk = 1
for v in values:
b = chk >> 20
chk = (chk & 0xFFFFF) << 10 ^ v
for i in range(10):
chk ^= GEN[i] if ((b >> i) & 1) else 0
return chk
def _rs1024_verify_checksum(data: Indices) -> bool:
"""
Verifies a checksum of the given mnemonic, which was already parsed into Indices.
"""
return _rs1024_polymod(tuple(_CUSTOMIZATION_STRING) + data) == 1
"""
## Internal functions
"""
def _precompute_exp_log() -> Tuple[List[int], List[int]]:
exp = [0 for i in range(255)]
log = [0 for i in range(256)]
poly = 1
for i in range(255):
exp[i] = poly
log[poly] = i
# Multiply poly by the polynomial x + 1.
poly = (poly << 1) ^ poly
# Reduce poly by x^8 + x^4 + x^3 + x + 1.
if poly & 0x100:
poly ^= 0x11B
return exp, log
_EXP_TABLE, _LOG_TABLE = _precompute_exp_log()
def _interpolate(shares, x) -> bytes:
"""
Returns f(x) given the Shamir shares (x_1, f(x_1)), ... , (x_k, f(x_k)).
:param shares: The Shamir shares.
:type shares: A list of pairs (x_i, y_i), where x_i is an integer and y_i is an array of
bytes representing the evaluations of the polynomials in x_i.
:param int x: The x coordinate of the result.
:return: Evaluations of the polynomials in x.
:rtype: Array of bytes.
"""
x_coordinates = set(share[0] for share in shares)
if len(x_coordinates) != len(shares):
raise Slip39Error("Invalid set of shares. Share indices must be unique.")
share_value_lengths = set(len(share[1]) for share in shares)
if len(share_value_lengths) != 1:
raise Slip39Error(
"Invalid set of shares. All share values must have the same length."
)
if x in x_coordinates:
for share in shares:
if share[0] == x:
return share[1]
# Logarithm of the product of (x_i - x) for i = 1, ... , k.
log_prod = sum(_LOG_TABLE[share[0] ^ x] for share in shares)
result = bytes(share_value_lengths.pop())
for share in shares:
# The logarithm of the Lagrange basis polynomial evaluated at x.
log_basis_eval = (
log_prod
- _LOG_TABLE[share[0] ^ x]
- sum(_LOG_TABLE[share[0] ^ other[0]] for other in shares)
) % 255
result = bytes(
intermediate_sum
^ (
_EXP_TABLE[(_LOG_TABLE[share_val] + log_basis_eval) % 255]
if share_val != 0
else 0
)
for share_val, intermediate_sum in zip(share[1], result)
)
return result
def _round_function(i: int, passphrase: bytes, e: int, salt: bytes, r: bytes) -> bytes:
"""The round function used internally by the Feistel cipher."""
return pbkdf2_hmac(
"sha256",
bytes([i]) + passphrase,
salt + r,
(_BASE_ITERATION_COUNT << e) // _ROUND_COUNT,
dklen=len(r),
)
def _get_salt(identifier: int) -> bytes:
return _CUSTOMIZATION_STRING + identifier.to_bytes(
_bits_to_bytes(_ID_LENGTH_BITS), "big"
)
def _create_digest(random_data: bytes, shared_secret: bytes) -> bytes:
return hmac.new(random_data, shared_secret, "sha256").digest()[:_DIGEST_LENGTH_BYTES]
def _recover_secret(threshold: int, shares: List[Tuple[int, bytes]]) -> bytes:
# If the threshold is 1, then the digest of the shared secret is not used.
if threshold == 1:
return shares[0][1]
shared_secret = _interpolate(shares, _SECRET_INDEX)
digest_share = _interpolate(shares, _DIGEST_INDEX)
digest = digest_share[:_DIGEST_LENGTH_BYTES]
random_part = digest_share[_DIGEST_LENGTH_BYTES:]
if digest != _create_digest(random_part, shared_secret):
raise Slip39Error("Invalid digest of the shared secret.")
return shared_secret
def _decode_mnemonics(
mnemonics: List[str],
) -> Tuple[int, int, int, int, MnemonicGroups]:
identifiers = set()
iteration_exponents = set()
group_thresholds = set()
group_counts = set()
# { group_index : [threshold, set_of_member_shares] }
groups = {} # type: MnemonicGroups
for mnemonic in mnemonics:
share = decode_mnemonic(mnemonic)
identifiers.add(share.identifier)
iteration_exponents.add(share.iteration_exponent)
group_thresholds.add(share.group_threshold)
group_counts.add(share.group_count)
group = groups.setdefault(share.group_index, (share.member_threshold, set()))
if group[0] != share.member_threshold:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics in a group must have the same member threshold."
)
group[1].add((share.member_index, share.share_value))
if len(identifiers) != 1 or len(iteration_exponents) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words.".format(
_ID_EXP_LENGTH_WORDS
)
)
if len(group_thresholds) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must have the same group threshold."
)
if len(group_counts) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must have the same group count."
)
for group_index, group in groups.items():
if len(set(share[0] for share in group[1])) != len(group[1]):
raise Slip39Error(
"Invalid set of shares. Member indices in each group must be unique."
)
return (
identifiers.pop(),
iteration_exponents.pop(),
group_thresholds.pop(),
group_counts.pop(),
groups,
)
| {
"content_hash": "ac0f3c26798f4e65b76c9e908d65dbaa",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 166,
"avg_line_length": 31.366554054054053,
"alnum_prop": 0.6081102913457914,
"repo_name": "pooler/electrum-ltc",
"id": "6ab1d776c7971fa1d657bd066202bea2d0508907",
"size": "19665",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "electrum_ltc/slip39.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13024"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "NSIS",
"bytes": "7354"
},
{
"name": "Python",
"bytes": "5325268"
},
{
"name": "QML",
"bytes": "318745"
},
{
"name": "Ruby",
"bytes": "16856"
},
{
"name": "Shell",
"bytes": "105672"
},
{
"name": "kvlang",
"bytes": "70748"
}
],
"symlink_target": ""
} |
class TextOutput(object):
"""Wrapper for text output whose repr is the text itself.
This avoids `repr(output)` adding quotation marks around already-rendered text.
"""
def __init__(self, output):
self.output = output
def __repr__(self):
return self.output
class HtmlOutput:
def __init__(self, output):
self.output = output
def __repr__(self):
return self.output
def output(self):
return self.output
| {
"content_hash": "6c9e1795e319293130c8b6b79faf7bb7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 23.95,
"alnum_prop": 0.6137787056367432,
"repo_name": "lresende/toree-gateway",
"id": "4c776d0d5429c9dcbe144605bc209cf529ba8510",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/wrappers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41558"
},
{
"name": "Shell",
"bytes": "3904"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BindingTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Bindings/BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address": "a7c658f4111ec4ff5a1a647f9d0edd819025b9f20522d2fae897049f32873e73",
"binding_type": "apn",
"credential_sid": null,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"endpoint": "26607274",
"identity": "24987039",
"notification_protocol_version": "3",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"tags": [
"26607274"
],
"links": {
"user": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/24987039"
},
"url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings/BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Bindings/BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings.create(identity="identity", binding_type="apn", address="address")
values = {'Identity': "identity", 'BindingType': "apn", 'Address': "address", }
self.holodeck.assert_has_request(Request(
'post',
'https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Bindings',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address": "a7c658f4111ec4ff5a1a647f9d0edd819025b9f20522d2fae897049f32873e73",
"binding_type": "apn",
"credential_sid": null,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"endpoint": "26607274",
"identity": "24987039",
"notification_protocol_version": "3",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"tags": [
"26607274"
],
"links": {
"user": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/24987039"
},
"url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings/BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings.create(identity="identity", binding_type="apn", address="address")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings.list()
self.holodeck.assert_has_request(Request(
'get',
'https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Bindings',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"bindings": [],
"meta": {
"first_page_url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings?Tag=tag&Identity=identity&PageSize=50&Page=0",
"key": "bindings",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings?Tag=tag&Identity=identity&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"bindings": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address": "a7c658f4111ec4ff5a1a647f9d0edd819025b9f20522d2fae897049f32873e73",
"binding_type": "apn",
"credential_sid": null,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"endpoint": "26607274",
"identity": "24987039",
"notification_protocol_version": "3",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"tags": [
"26607274"
],
"links": {
"user": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/24987039"
},
"url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings/BSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings?Tag=tag&Identity=identity&PageSize=50&Page=0",
"key": "bindings",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://notify.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Bindings?Tag=tag&Identity=identity&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.notify.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings.list()
self.assertIsNotNone(actual)
| {
"content_hash": "f2b4523682e6128c5261a0bae123f690",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 167,
"avg_line_length": 41.311004784689,
"alnum_prop": 0.5418114431318045,
"repo_name": "twilio/twilio-python",
"id": "20e21915b01a855485af41b500b660f986ce3adc",
"size": "8649",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/notify/v1/service/test_binding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import socket
import sys
# ----------- Config ----------
IP_VERSION = 'IPv4'
PORT = 3333
# -------------------------------
if IP_VERSION == 'IPv4':
family_addr = socket.AF_INET
elif IP_VERSION == 'IPv6':
family_addr = socket.AF_INET6
else:
print('IP_VERSION must be IPv4 or IPv6')
sys.exit(1)
try:
sock = socket.socket(family_addr, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error as msg:
print('Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
try:
sock.bind(('', PORT))
except socket.error as msg:
print('Bind failed. Error: ' + str(msg[0]) + ': ' + msg[1])
sys.exit()
while True:
try:
print('Waiting for data...')
data, addr = sock.recvfrom(1024)
if not data:
break
data = data.decode()
print('Reply[' + addr[0] + ':' + str(addr[1]) + '] - ' + data)
reply = 'OK ' + data
sock.sendto(reply.encode(), addr)
except socket.error as msg:
print('Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sock.close()
| {
"content_hash": "20ad3c7abe2afa69369ae511099e2fa0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 25.795454545454547,
"alnum_prop": 0.5488986784140969,
"repo_name": "krzychb/rtd-test-bed",
"id": "7c3676d9f63018ea2f3aa6f3ae571897d17efe14",
"size": "1429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/protocols/sockets/scripts/udpserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "248929"
},
{
"name": "Batchfile",
"bytes": "9428"
},
{
"name": "C",
"bytes": "42611901"
},
{
"name": "C++",
"bytes": "10437923"
},
{
"name": "CMake",
"bytes": "316611"
},
{
"name": "CSS",
"bytes": "1340"
},
{
"name": "Dockerfile",
"bytes": "4319"
},
{
"name": "GDB",
"bytes": "2764"
},
{
"name": "Go",
"bytes": "146670"
},
{
"name": "HCL",
"bytes": "468"
},
{
"name": "HTML",
"bytes": "115431"
},
{
"name": "Inno Setup",
"bytes": "14977"
},
{
"name": "Lex",
"bytes": "7273"
},
{
"name": "M4",
"bytes": "189150"
},
{
"name": "Makefile",
"bytes": "439631"
},
{
"name": "Objective-C",
"bytes": "133538"
},
{
"name": "PHP",
"bytes": "498"
},
{
"name": "Pawn",
"bytes": "151052"
},
{
"name": "Perl",
"bytes": "141532"
},
{
"name": "Python",
"bytes": "1868534"
},
{
"name": "Roff",
"bytes": "102712"
},
{
"name": "Ruby",
"bytes": "206821"
},
{
"name": "Shell",
"bytes": "625528"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "Tcl",
"bytes": "110"
},
{
"name": "TeX",
"bytes": "1961"
},
{
"name": "Visual Basic",
"bytes": "294"
},
{
"name": "XSLT",
"bytes": "80335"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
} |
from pyPanair.postprocess import ffmf_converter
if __name__ == '__main__':
print("converting ffmf to csv file")
ffmf_converter.write_ffmf()
print("success!")
| {
"content_hash": "ddb00e264528b2d0a6e545f0cce37aa7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 29.5,
"alnum_prop": 0.6440677966101694,
"repo_name": "SaTa999/pyPanair",
"id": "25d91b41fccdaa047a6c74fbb9b5029e1a4fb4fa",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/convert_ffmf/convert_ffmf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46920"
}
],
"symlink_target": ""
} |
from django.contrib.auth.backends import ModelBackend
from website.models import Contributor
class CustomAuthentication (ModelBackend):
def authenticate(self, username=None, password=None):
try:
user = Contributor.objects.get (username=username)
if user.password == password:
return user
else:
# Authentication fails if None is returned
return None
except Contributor.DoesNotExist:
return None
def get_user(self, user_id):
try:
return Contributor.objects.get (pk=user_id)
except Contributor.DoesNotExist:
return None
def have_permission(authenticated_user_id, user_id):
if str(authenticated_user_id) == user_id:
return True
else:
return False
| {
"content_hash": "ad3ed91786783ad9be2e972d5408cdd5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 27.8,
"alnum_prop": 0.6223021582733813,
"repo_name": "UKS-Tim3/Issuebox",
"id": "6428ca37755f9c17f3dd375ef19798b2c82250a7",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issuebox/website/auth/backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145726"
},
{
"name": "HTML",
"bytes": "99623"
},
{
"name": "JavaScript",
"bytes": "252084"
},
{
"name": "Python",
"bytes": "78028"
},
{
"name": "Shell",
"bytes": "181"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.login, name='login'),
url(r'^user_login/$', views.user_login, name='user_login'),
]
| {
"content_hash": "291dd24305c9ac9fe1aaa4043e93b09f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 22.5,
"alnum_prop": 0.6444444444444445,
"repo_name": "CMPUT404W16T01/CMPUT404-project-socialdistribution",
"id": "8cdcf785ea7095b597c3f72445f495cbaf04cb08",
"size": "180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "login/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "198264"
},
{
"name": "HTML",
"bytes": "28438"
},
{
"name": "JavaScript",
"bytes": "286361"
},
{
"name": "Python",
"bytes": "37087"
}
],
"symlink_target": ""
} |
"""
Django settings for Mastering Masteries project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import os
import environ
from os.path import abspath, basename, dirname, join, normpath
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('mastering_masteries')
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
SITE_ROOT = dirname(DJANGO_ROOT)
SITE_NAME = basename(DJANGO_ROOT)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'data',
'rest_framework',
'pipeline',
'webpack_loader',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'mastering_masteries.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'mastering_masteries.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Robert""", 'Your email'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
# browserify-specific
PIPELINE_COMPILERS = (
'pipeline_browserify.compiler.BrowserifyCompiler',
)
if DEBUG:
PIPELINE_BROWSERIFY_ARGUMENTS = '-t babelify'
PIPELINE = {
'PIPELINE_ENABLED': True,
'STYLESHEETS': {
'mycss': {
'source_filenames': (
'css/style.css',
'css/project.css',
),
'output_filename': 'css/mysite_css.css',
}
},
'JAVASCRIPT': {
'react': {
'source_filenames': (
'js/bower_components/jquery/dist/jquery.min.js',
'js/bower_components/react/JSXTransformer.js',
'js/bower_components/react/react-with-addons.js',
'js/app.browserify.js',
),
'output_filename': 'js/mysite_js.js',
}
}
}
PIPELINE['CSS_COMPRESSOR'] = 'pipeline.compressors.NoopCompressor'
PIPELINE['JS_COMPRESSOR'] = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'mastering_masteries.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'mastering_masteries.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('mastering_masteries.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
#REST FRAMEWORK
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
} | {
"content_hash": "764f2ca50046adc9bfa00d3e5bf98c09",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 114,
"avg_line_length": 34.831649831649834,
"alnum_prop": 0.618753020782987,
"repo_name": "bobcosc/masteringmastery",
"id": "9572a8a3195977be04d3855992aaaa3a6bd50460",
"size": "10369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82176"
},
{
"name": "HTML",
"bytes": "36917"
},
{
"name": "JavaScript",
"bytes": "280647"
},
{
"name": "Python",
"bytes": "276852"
},
{
"name": "Shell",
"bytes": "3675"
}
],
"symlink_target": ""
} |
"""
MoinMoin - Support Package
This package collects small third party utilities in order
to reduce the necessary steps in installing MoinMoin. Each
source file is copyrighted by its respective author. We've done
our best to assure those files are freely redistributable.
@copyright: 2001-2004 Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
| {
"content_hash": "34bc15ff564f3d7df5c21da28dacbe76",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 37.36363636363637,
"alnum_prop": 0.7128953771289538,
"repo_name": "Glottotopia/aagd",
"id": "cdf81f65a58a9d74872cadab3506a49c4b05b781",
"size": "441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/support/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
import sqlite3 as sqlite
class database:
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
def __del__(self):
self.con.close()
def dbcommit( self ):
self.con.commit()
def insertData( self, query ):
result = self.con.execute( query )
self.con.commit()
return result
def getNames( self, table ):
name = self.con.execute( "select nombre from %s" % (table) )
return [ x[0] for x in name ]
def getIds( self, table ):
name = self.con.execute( "select rowid from %s" % (table) )
return [ x[0] for x in name ]
def getId( self, table, name ):
id = self.con.execute( "select rowid from %s where nombre = '%s'" % ( table, name ) )
id = id.fetchone()[0]
return id
def getName( self, table, name ):
name = self.insertData( "select nombre from %s where rowid = %s" % ( table, name ) )
return name.fetchone()[0]
def getIdWP( self, name ):
id = self.con.execute( "select rowid from workingpaper where nombrewp = '%s'" % ( name ) )
id = id.fetchone()[0]
return id
def getIdAsig( self, descripcion ):
id = self.con.execute( "select rowid from asignaciones where descripcion = '%s'" % (descripcion) )
id = id.fetchone()[0]
return id
def getWorkingPapers( self, nameId ):
workingPaper = self.con.execute( ( "select nombrewp from workingpaper where rowid in ( select wpid from linkworkingpaper where invid = %d )" ) % nameId )
workingPaper = [ x[0] for x in workingPaper ]
return workingPaper
def getWpIds( self ):
wp = self.con.execute( "select rowid from workingpaper")
return [ x[0] for x in wp ]
def getAsignments( self, nameIds ):
Asignments = self.con.execute( ( "select rowid from asignaciones where rowid in ( select asigid from linkasignaciones where asid = %d )" ) % nameIds )
Asignments = [ x[0] for x in Asignments ]
return Asignments
def getAuthorFromAsigId( self, asigId ):
Asignments = self.con.execute( ( "select nombre from investigador where rowid = ( select invid from linkworkingpaper where wpid = ( select wpid from linkasignaciones where asigid = %d ) )" ) % ( asigId ) )
Asignments = Asignments.fetchone()[0]
return Asignments
def getWPFromAsigId( self, asigId ):
WP = self.con.execute( "select nombrewp from workingpaper where rowid in ( select wpid from linkasignaciones where asigid = %d )" % (asigId ) )
WP = WP.fetchone()[0]
return WP
def getAsFromAsigId( self, asigId ):
WP = self.con.execute( "select nombre from asistente where rowid = ( select asid from linkasignaciones where asigid = %d )" % ( asigId ) )
WP = WP.fetchone()[0]
return WP
def getDataFromAsigId( self, asigId ):
data = self.con.execute( "select * from asignaciones where rowid = %d " % ( asigId ) )
return data.fetchall()[0]
def getAsigFromWpAndInv( self, invId, wpId ):
asigs = self.con.execute( "select A.rowid from linkasignaciones as A join linkworkingpaper as B on B.wpid = A.wpid where B.invid = %d and A.wpid = %d" % ( invId, wpId ) )
return [ x[0] for x in asigs ]
def deleteRow( self, asigId ):
self.con.execute( "delete from asignaciones where rowid = %d" % ( asigId ) )
self.con.execute( "delete from linkasignaciones where asigid = %d" % (asigId) )
self.con.commit()
def updateRow( self, asigId, asignado, estado, prioridad, avance, comentarios ):
self.con.execute( "update linkasignaciones set asid = '%d' where asigId = %d" % (asignado, asigId ) )
self.con.execute( "update asignaciones set estatus = '%s' where rowid = %d" % (estado, asigId ) )
self.con.execute( "update asignaciones set prioridad = '%s' where rowid = %d" % (prioridad, asigId ) )
self.con.execute( "update asignaciones set avance = '%s' where rowid = %d" % (avance, asigId ) )
self.con.execute( "update asignaciones set comentarios = '%s' where rowid = %d" % (comentarios, asigId ) )
self.con.commit() | {
"content_hash": "6fec4679876a77e062114c63bae3bf93",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 208,
"avg_line_length": 41.064516129032256,
"alnum_prop": 0.6784498559832417,
"repo_name": "rickardo10/distributed-db-app",
"id": "13882e8d1852bc96214c60dadfe312a9c57289e8",
"size": "3819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66859"
},
{
"name": "JavaScript",
"bytes": "48068"
},
{
"name": "Python",
"bytes": "27093"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class Hlayer0rule1(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer0rule1.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer0rule1, self).__init__(name='Hlayer0rule1', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer0rule1"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule1')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class ClientServerInterface(layer0rule1class0) node
self.add_node()
self.vs[3]["mm__"] = """ClientServerInterface"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class ClientServerInterface(layer0rule1class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class ImplementationModule(layer0rule1class1) node
self.add_node()
self.vs[5]["mm__"] = """ImplementationModule"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class ImplementationModule(layer0rule1class1)
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class StructDeclaration(layer0rule1class2) node
self.add_node()
self.vs[7]["mm__"] = """StructDeclaration"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class StructDeclaration(layer0rule1class2)
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# match association ImplementationModule--contents-->ClientServerInterface node
self.add_node()
self.vs[9]["attr1"] = """contents"""
self.vs[9]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class ClientServerInterface(layer0rule1class0)
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class ImplementationModule(layer0rule1class1)
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class StructDeclaration(layer0rule1class2)
(5,9), # match_class ImplementationModule(layer0rule1class1) -> association contents
(9,3), # association contents -> match_class ClientServerInterface(layer0rule1class0)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'name'),('concat',((5,'name'),('concat',(('constant','_'),('concat',((3,'name'),('concat',(('constant','__'),('constant','idata')))))))))), ((7,'__ApplyAttribute'),('constant','ClientServerStructIData')), ]
| {
"content_hash": "0f50c3a53c9f197de02d8bfa49ac3f2c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 239,
"avg_line_length": 37.329787234042556,
"alnum_prop": 0.5246508976916501,
"repo_name": "levilucio/SyVOLT",
"id": "a665f6616c0c280a7983d8820659ef0cefae9a4f",
"size": "3509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbeddr2C_MM/transformation_from_eclipse/Hlayer0rule1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# import jinja2
from jinja2.ext import Extension
from .templatetags.promotions_tags import promo_ballance
class PromotionsExtension(Extension):
def __init__(self, environment):
super(PromotionsExtension, self).__init__(environment)
environment.filters["promo_ballance"] = promo_ballance
# Nicer import name
core = PromotionsExtension
| {
"content_hash": "4f37636c5c3d5a5f1aa03b918261da98",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 26.533333333333335,
"alnum_prop": 0.7512562814070352,
"repo_name": "phani00/tovp",
"id": "222cde4c39ddd09b510bcd96dca0885f4a63bc55",
"size": "398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/promotions/jinja2tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190528"
},
{
"name": "HTML",
"bytes": "288449"
},
{
"name": "JavaScript",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "514452"
}
],
"symlink_target": ""
} |
from diffusion import *
from work import DiffusionTensorTest
from registration import BSplineDeformableRegistration, AffineRegistration
| {
"content_hash": "60f8e570eaff5be8620838633ee0edf2",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 74,
"avg_line_length": 45.333333333333336,
"alnum_prop": 0.8897058823529411,
"repo_name": "christianbrodbeck/nipype",
"id": "d73af3300a98d8b075cdff571d6036a537ff3b50",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/slicer/legacy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "282"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "2537426"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import sys
if len(sys.argv) < 2:
print("Usage: %s <binary>" % (sys.argv[0]))
sys.exit(-1)
with open(sys.argv[1], "rb") as file_handle:
binary_content = file_handle.read()
file_handle.close()
with open("%s.asm" % (sys.argv[1]), "w") as output_asm_file:
output_asm_file.write("bits 32\n\n")
for c in binary_content:
output_asm_file.write("db 0x%02X\n" % (c & 0xff))
output_asm_file.close()
print("Written %d bytes into output file!" % len(binary_content))
| {
"content_hash": "d7fbcb68662689ea2c2cda63f2fb04dc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 30.705882352941178,
"alnum_prop": 0.5842911877394636,
"repo_name": "Ge0/okita",
"id": "2b073f866e52b0791b6b5cc58a9813382c6e9922",
"size": "545",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "okita-poc/naive_disassembler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "30014"
},
{
"name": "Python",
"bytes": "14549"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9939")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| {
"content_hash": "da5ae1649991abe44eca7358f5bcef99",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.5,
"alnum_prop": 0.759493670886076,
"repo_name": "Faucetcoin/faucetcoin",
"id": "ad38d06f0090dd9a0365aac0b65d097676c2d741",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2531940"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Erlang",
"bytes": "6752"
},
{
"name": "IDL",
"bytes": "14704"
},
{
"name": "JavaScript",
"bytes": "81"
},
{
"name": "Nu",
"bytes": "264"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "PHP",
"bytes": "2969"
},
{
"name": "Perl",
"bytes": "30068"
},
{
"name": "Python",
"bytes": "69724"
},
{
"name": "Shell",
"bytes": "13582"
},
{
"name": "TypeScript",
"bytes": "5244596"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
def page(request, num="1"):
pass
| {
"content_hash": "27d81ffcab76caf28d41523a4927642c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 20,
"alnum_prop": 0.72,
"repo_name": "domenicosolazzo/practice-django",
"id": "1a48f9fe1024ca5b68910751dee2743bc1abb0ff",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http/http_site/url_dispatcher_app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41781"
},
{
"name": "JavaScript",
"bytes": "101825"
},
{
"name": "Python",
"bytes": "7086244"
},
{
"name": "Shell",
"bytes": "7642"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import unittest
import sys
import time
from system_test import TestCase, Qdrouterd, main_module, Process, TIMEOUT, Logger, TestTimeout
from subprocess import PIPE, STDOUT
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
from test_broker import FakeBroker
# How many worker threads?
W_THREADS = 2
# Define oversize denial condition
OVERSIZE_CONDITION_NAME = "amqp:connection:forced"
OVERSIZE_CONDITION_DESC = "Message size exceeded"
OVERSIZE_LINK_CONDITION_NAME = "amqp:link:message-size-exceeded"
# For the next test case define max sizes for each router.
# These are the configured maxMessageSize values
EA1_MAX_SIZE = 50000
INTA_MAX_SIZE = 100000
INTB_MAX_SIZE = 150000
EB1_MAX_SIZE = 200000
# Interior routers check for max message size at message ingress over all connections
# except interrouter connections.
# Edge routers check for max message size at message ingress over all connections
# except interior router connections.
# Edge routers may check a max message size and allow the message to be delivered
# to destinations on that edge router. However, if the message is passed to an
# interior router then the message is subject to the interior router's max size
# before the message is forwarded by the interior router network.
# The bytes-over and bytes-under max that should trigger allow or deny.
# Messages with content this much over should be blocked while
# messages with content this much under should be allowed.
# * client overhead is typically 16 bytes or so
# * interrouter overhead is much larger with annotations
OVER_UNDER = 200
# Alert:
# This module has two large classes that are laid out about the same:
# OversizeMessageTransferTest
# OversizeMulticastTransferTest
# The MessageTransfer test does a single sender and single receiver while
# the MulticastTransfer test does a single sender and four receivers.
# Much of the logic between tests is duplicated. Remember to fix things in both tests.
class OversizeMessageTransferTest(MessagingHandler):
"""
This test connects a sender and a receiver. Then it tries to send _count_
number of messages of the given size through the router or router network.
Messages are to pass through an edge router and get blocked by an interior
or messages are to be blocked by both the edge and the interior.
When 'blocked_by_both' is false then:
* The ingress router should allow the sender's oversize message.
* The message is blocked by the uplink router by rejecting the message
and closing the connection between the interior and edge routers.
* The receiver may receive aborted message indications but that is
not guaranteed.
* If any aborted messages are received then the count must be at most one.
When 'blocked_by_both' is true then:
* The ingress edge router will reject and close the connection on the first message
* The second message may be aborted because the connection between the
edge router and the interior router was closed
* The remainder of the messages are going into a closed connection and
will receive no settlement.
"""
def __init__(self, test_class, sender_host, receiver_host, test_address,
message_size=100000, count=10, blocked_by_both=False, print_to_console=False):
"""
Construct an instance of the unicast test
:param test_class: test class - has wait-connection function
:param sender_host: router for sender connection
:param receiver_host: router for receiver connection or None for link route test
:param test_address: sender/receiver AMQP address
:param message_size: in bytes
:param count: how many messages to send
:param blocked_by_both: true if edge router messages are also blocked by interior
:param print_to_console: print logs as they happen
"""
super(OversizeMessageTransferTest, self).__init__()
self.test_class = test_class
self.sender_host = sender_host
self.receiver_host = receiver_host
self.test_address = test_address
self.msg_size = message_size
self.count = count
self.blocked_by_both = blocked_by_both
self.expect_block = True
self.messages = []
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender = None
self.receiver = None
self.proxy = None
self.network_stable = False
self.n_sent = 0
self.n_rcvd = 0
self.n_accepted = 0
self.n_rejected = 0
self.n_modified = 0
self.n_released = 0
self.n_send_settled = 0
self.n_aborted = 0
self.n_connection_error = 0
self.shut_down = False
self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
self.log_unhandled = False # verbose diagnostics of proton callbacks
def timeout(self):
current = ("check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d" %
(self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error, self.n_send_settled))
self.error = "Timeout Expired " + current
self.logger.log("self.timeout " + self.error)
self._shut_down_test()
def on_start(self, event):
self.logger.log("on_start")
self.logger.log("on_start: secheduling reactor timeout")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.logger.log("Waiting for router network to stabilize")
self.test_class.wait_router_network_connected()
self.network_stable = True
self.logger.log("on_start: generating messages")
for idx in range(self.count):
# construct message in indentifiable chunks
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
while len(body_msg) < self.msg_size:
chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
padlen = 50 - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.msg_size:
body_msg = body_msg[:self.msg_size]
m = Message(body=body_msg)
self.messages.append(m)
if self.receiver_host is not None:
self.logger.log("on_start: opening receiver connection to %s" % (self.receiver_host.addresses[0]))
self.receiver_conn = event.container.connect(self.receiver_host.addresses[0])
self.logger.log("on_start: Creating receiver")
self.receiver = event.container.create_receiver(self.receiver_conn, self.test_address)
self.logger.log("on_start: opening sender connection to %s" % (self.sender_host.addresses[0]))
self.sender_conn = event.container.connect(self.sender_host.addresses[0])
self.logger.log("on_start: Creating sender")
self.sender = event.container.create_sender(self.sender_conn, self.test_address)
self.logger.log("on_start: done")
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
m = self.messages[self.n_sent]
self.logger.log("send. address:%s message:%d of %s length=%d" % (
self.test_address, self.n_sent, self.count, self.msg_size))
self.sender.send(m)
self.n_sent += 1
# if self.n_sent == self.count:
# self.log_unhandled = True
def on_sendable(self, event):
if event.sender == self.sender:
self.logger.log("on_sendable")
self.send()
def on_message(self, event):
self.logger.log("on_message: entry")
if self.expect_block:
# All messages should violate maxMessageSize.
# Receiving any is an error.
self.error = "Received a message. Expected to receive no messages."
self.logger.log(self.error)
self._shut_down_test()
else:
self.n_rcvd += 1
self.accept(event.delivery)
self._check_done()
def on_connection_remote_close(self, event):
if self.shut_down:
return
if event.connection == self.sender_conn:
if event.connection.remote_condition is not None:
if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
self.logger.log("on_connection_remote_close: sender closed with correct condition")
self.n_connection_error += 1
self.sender_conn.close()
self.sender_conn = None
else:
# sender closed but for wrong reason
self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
event.connection.remote_condition.name, event.connection.remote_condition.description)
self.logger.log(self.error)
else:
self.error = "sender close error: Expected a remote_condition but there was none."
self.logger.log(self.error)
else:
# connection error but not for sender
self.error = "unexpected connection close error: wrong connection closed."
self.logger.log(self.error)
self._check_done()
def _shut_down_test(self):
self.shut_down = True
if self.timer:
self.timer.cancel()
self.timer = None
if self.sender:
self.sender.close()
self.sender = None
if self.receiver:
self.receiver.close()
self.receiver = None
if self.sender_conn:
self.sender_conn.close()
self.sender_conn = None
if self.receiver_conn:
self.receiver_conn.close()
self.receiver_conn = None
def _current(self):
return ("net_stable=%s sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d" %
(self.network_stable, self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error, self.n_send_settled))
def _check_done(self):
self.logger.log("check_done: " + self._current())
if self.error is not None:
self.logger.log("TEST FAIL")
self._shut_down_test()
else:
if not self.blocked_by_both:
# Blocked by interior only. Connection to edge stays up
# and all messages must be accounted for.
done = self.n_rejected == 1 and \
self.n_send_settled == self.count
else:
# Blocked by interior and edge. Expect edge connection to go down
# and some of our messaages arrive at edge after it has sent
# AMQP close. Those messages are never settled. TODO: Is that OK?
done = self.n_rejected == 1 and \
self.n_connection_error == 1
if done:
self.logger.log("TEST DONE!!!")
# self.log_unhandled = True # verbose debugging
self._shut_down_test()
def on_rejected(self, event):
self.n_rejected += 1
if self.expect_block:
self.logger.log("on_rejected: entry")
self._check_done()
else:
self.error = "Unexpected on_reject"
self.logger.log(self.error)
self._check_done()
def on_aborted(self, event):
self.logger.log("on_aborted")
self.n_aborted += 1
self._check_done()
def on_settled(self, event):
self.logger.log("on_settled")
if event.connection == self.sender_conn:
self.logger.log("on_settled: sender connection")
self.n_send_settled += 1
self._check_done()
def on_error(self, event):
self.error = "Container error"
self.logger.log(self.error)
self.sender_conn.close()
if self.receiver is not None:
self.receiver_conn.close()
self.timer.cancel()
def on_link_error(self, event):
self.error = event.link.remote_condition.name
self.logger.log("on_link_error: %s" % (self.error))
# Link errors may prevent normal test shutdown so don't even try.
raise Exception(self.error)
def on_reactor_final(self, event):
self.logger.log("on_reactor_final:")
def on_unhandled(self, method, *args):
if self.log_unhandled:
self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))
def run(self):
try:
Container(self).run()
except Exception as e:
self.error = "Container run exception: %s" % (e)
self.logger.log(self.error)
self.logger.dump()
time.sleep(0.2)
#
# DISPATCH-975 Detect that an oversize message is blocked.
# These tests check simple and compound blocking for multicast messages.
#
# Indexes into router arrays for receivers and receiver stats
IDX_INTA = 0
IDX_INTB = 1
IDX_EA1 = 2
IDX_EB1 = 3
class OversizeMulticastTransferTest(MessagingHandler):
"""
This test connects a sender and four receivers. Then it tries to send _count_
number of messages of the given size through the router or router network.
"""
def __init__(self, test_class, sender_host, routers, test_address, expect_receives,
blocked_by_ingress, blocked_by_interior,
message_size=100000, count=10, print_to_console=False):
"""
Construct an instance of the multicast test
:param test_class: test class - has wait-connection function
:param sender_host: router for the sender connection
:param routers: a list of all the routers for receiver connections
:param test_address: sender/receiver AMQP address
:param expect_receives: array of expected receive counts
:param blocked_by_ingress: true if ingress router blocks
:param blocked_by_interior: true if edge router messages also blocked by interior
:param message_size: in bytes
:param count: how many messages to send
:param print_to_console: print logs as they happen
"""
super(OversizeMulticastTransferTest, self).__init__()
self.test_class = test_class
self.sender_host = sender_host
self.routers = routers
self.test_address = test_address
self.msg_size = message_size
self.count = count
self.expect_receives = expect_receives # router array
self.blocked_by_ingress = blocked_by_ingress
self.blocked_by_interior = blocked_by_interior
self.messages = []
self.sender_conn = None
self.receiver_conns = [None, None, None, None] # router array
self.error = None
self.sender = None
self.receivers = [None, None, None, None] # router array
self.proxy = None
self.network_stable = False
self.n_sent = 0
self.n_rcvds = [0, 0, 0, 0] # router array
self.n_accepted = 0
self.n_rejected = 0
self.n_modified = 0
self.n_released = 0
self.n_send_settled = 0
self.n_aborteds = [0, 0, 0, 0] # router array
self.n_connection_error = 0
self.shut_down = False
self.logger = Logger(title=("OversizeMulticastTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
self.log_unhandled = False # verbose diagnostics of proton callbacks
def timeout(self):
current = self._current()
self.error = "Timeout Expired " + current
self.logger.log("self.timeout " + self.error)
self._shut_down_test()
def on_start(self, event):
self.logger.log("on_start")
self.logger.log("on_start: secheduling reactor timeout")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.logger.log("Waiting for router network to stabilize")
self.test_class.wait_router_network_connected()
self.network_stable = True
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
self.logger.log("on_start: opening receiver connection to %s" % (self.routers[idx].addresses[0]))
self.receiver_conns[idx] = event.container.connect(self.routers[idx].addresses[0])
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
self.logger.log("on_start: Creating receiver %d" % idx)
self.receivers[idx] = event.container.create_receiver(self.receiver_conns[idx], self.test_address)
self.logger.log("on_start: generating messages")
for idx in range(self.count):
# construct message in indentifiable chunks
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
while len(body_msg) < self.msg_size:
chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
padlen = 50 - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.msg_size:
body_msg = body_msg[:self.msg_size]
m = Message(body=body_msg)
self.messages.append(m)
self.logger.log("on_start: opening sender connection to %s" % (self.sender_host.addresses[0]))
self.sender_conn = event.container.connect(self.sender_host.addresses[0])
self.logger.log("on_start: Creating sender")
self.sender = event.container.create_sender(self.sender_conn, self.test_address)
self.logger.log("on_start: done")
def rcvr_idx_of(self, rcvr):
"""
Given a receiver, as in event.receiver, return
the router array index of that receiver's router
:param rcvr:
:return: integer index of receiver
"""
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
if rcvr == self.receivers[idx]:
return idx
self.error = "Receiver not found in receivers array."
self.logger.log(self.error)
self.logger.dump()
self._shut_down_test()
raise Exception(self.error)
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
m = self.messages[self.n_sent]
self.logger.log("send. address:%s message:%d of %s length=%d" % (
self.test_address, self.n_sent, self.count, self.msg_size))
self.sender.send(m)
self.n_sent += 1
# if self.n_sent == self.count:
# self.log_unhandled = True
def on_sendable(self, event):
if event.sender == self.sender:
self.logger.log("on_sendable")
self.send()
def on_message(self, event):
self.logger.log("on_message")
if self.shut_down:
return
idx = self.rcvr_idx_of(event.receiver)
if self.expect_receives[idx] == 0:
# Receiving any is an error.
self.error = "Received a message. Expected to receive no messages."
self.logger.log(self.error)
self._shut_down_test()
else:
self.n_rcvds[idx] += 1
self.accept(event.delivery)
self._check_done()
def on_connection_remote_close(self, event):
if self.shut_down:
return
if event.connection == self.sender_conn:
if event.connection.remote_condition is not None:
if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
self.logger.log("on_connection_remote_close: sender closed with correct condition")
self.n_connection_error += 1
self.sender_conn.close()
self.sender_conn = None
else:
# sender closed but for wrong reason
self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
event.connection.remote_condition.name, event.connection.remote_condition.description)
self.logger.log(self.error)
else:
self.error = "sender close error: Expected a remote_condition but there was none."
self.logger.log(self.error)
else:
# connection error but not for sender
self.error = "unexpected connection close error: wrong connection closed."
self.logger.log(self.error)
self._check_done()
def _shut_down_test(self):
self.shut_down = True
if self.timer:
self.timer.cancel()
self.timer = None
if self.sender:
self.sender.close()
self.sender = None
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
if self.receivers[idx]:
self.receivers[idx].close()
self.receivers[idx] = None
if self.sender_conn:
self.sender_conn.close()
self.sender_conn = None
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
if self.receiver_conns[idx]:
self.receiver_conns[idx].close()
self.receiver_conns[idx] = None
def _current(self):
return ("net_stable:%s sent=%d rcvd=%s rejected=%d aborted=%s connection_error:%d send_settled:%d" %
(self.network_stable, self.n_sent, str(self.n_rcvds), self.n_rejected, str(self.n_aborteds), self.n_connection_error, self.n_send_settled))
def _check_done(self):
self.logger.log("check_done: " + self._current())
if self.error is not None:
self.logger.log("TEST FAIL")
self._shut_down_test()
else:
if self.blocked_by_interior:
if self.blocked_by_ingress:
# Blocked by interior and edge. Expect edge connection to go down
# and some of our messaages arrive at edge after it has sent
# AMQP close. Those messages are never settled. TODO: Is that OK?
done = self.n_rejected == 1 and \
self.n_connection_error == 1
else:
# Blocked by interior only. Connection to edge stays up
# and all messages must be accounted for.
all_received = True
for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
if self.expect_receives[idx] > 0:
if not self.n_rcvds[idx] == self.expect_receives[idx]:
all_received = False
done = self.n_rejected <= 1 and \
self.n_send_settled == self.count and \
all_received
else:
# Blocked by edge should never deliver to interior
done = self.n_rejected == 1 and \
self.n_connection_error == 1
if done:
self.logger.log("TEST DONE!!!")
# self.log_unhandled = True # verbose debugging
self._shut_down_test()
def on_rejected(self, event):
self.n_rejected += 1
if self.reject:
self.logger.log("on_rejected: entry")
self._check_done()
else:
self.error = "Unexpected on_reject"
self.logger.log(self.error)
self._check_done()
def on_aborted(self, event):
self.logger.log("on_aborted")
if self.shut_down:
return
idx = self.rcvr_idx_of(event.receiver)
self.n_aborteds[idx] += 1
self._check_done()
def on_settled(self, event):
self.logger.log("on_settled")
if event.connection == self.sender_conn:
self.logger.log("on_settled: sender connection")
self.n_send_settled += 1
self._check_done()
def on_error(self, event):
self.error = "Container error"
self.logger.log(self.error)
self._shut_down_test()
def on_link_error(self, event):
self.error = event.link.remote_condition.name
self.logger.log("on_link_error: %s" % (self.error))
# Link errors may prevent normal test shutdown so don't even try.
raise Exception(self.error)
def on_reactor_final(self, event):
self.logger.log("on_reactor_final:")
def on_unhandled(self, method, *args):
if self.log_unhandled:
self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))
def run(self):
try:
Container(self).run()
except Exception as e:
self.error = "Container run exception: %s" % (e)
self.logger.log(self.error)
self.logger.dump()
time.sleep(0.2)
class MaxMessageSizeBlockOversize(TestCase):
"""
verify that maxMessageSize blocks oversize messages
"""
@classmethod
def setUpClass(cls):
"""Start the router"""
super(MaxMessageSizeBlockOversize, cls).setUpClass()
def router(name, mode, max_size, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes',
'workerThreads': W_THREADS}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('policy', {'maxConnections': 100, 'enableVhostPolicy': 'true', 'maxMessageSize': max_size, 'defaultVhost': '$default'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('vhost', {'hostname': '$default', 'allowUnknownUser': 'true',
'groups': {
'$default': {
'users': '*',
'maxConnections': 100,
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowAnonymousSender': True,
'allowWaypointLinks': True,
'allowDynamicSource': True
}
}
})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
# configuration:
# two edge routers connected via 2 interior routers with max sizes
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
#
# Note:
# * Messages whose senders connect to INT.A or INT.B are subject to max message size
# defined for the ingress router only.
# * Message whose senders connect to EA1 or EA2 are subject to max message size
# defined for the ingress router. If the message is forwarded through the
# connected interior router then the message is subject to another max message size
# defined by the interior router.
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INT.A', 'interior', INTA_MAX_SIZE,
[('listener', {'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge', 'port': cls.INTA_edge_port})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('INT.B', 'interior', INTB_MAX_SIZE,
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge',
'port': cls.INTB_edge_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
router('EA1', 'edge', EA1_MAX_SIZE,
[('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('connector', {'name': 'uplink', 'role': 'edge',
'port': cls.INTA_edge_port})])
cls.EA1 = cls.routers[2]
cls.EA1.listener = cls.EA1.addresses[0]
router('EB1', 'edge', EB1_MAX_SIZE,
[('connector', {'name': 'uplink',
'role': 'edge',
'port': cls.INTB_edge_port,
'maxFrameSize': 1024}),
('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()})])
cls.EB1 = cls.routers[3]
cls.EB1.listener = cls.EB1.addresses[0]
cls.wait_router_network_connected()
@classmethod
def wait_router_network_connected(cls):
cls.INT_A.wait_router_connected('INT.B')
cls.INT_B.wait_router_connected('INT.A')
cls.EA1.wait_connectors()
cls.EB1.wait_connectors()
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] +
cmd.split(' ') +
['--bus',
address or self.address(),
'--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def sense_n_closed_lines(self, routername, pattern=OVERSIZE_CONDITION_NAME):
"""
Read a router's log file and count how many size-exceeded lines are in it.
:param routername:
:return: (int, int) tuple with counts of lines in and lines out
"""
with open("../setUpClass/%s.log" % routername, 'r') as router_log:
log_lines = router_log.read().split("\n")
i_closed_lines = [s for s in log_lines if pattern in s and "<-" in s]
o_closed_lines = [s for s in log_lines if pattern in s and "->" in s]
return (len(i_closed_lines), len(o_closed_lines))
# verify that a message can go through an edge EB1 and get blocked by interior INT.B
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# | ^
# V |
# +--------+ +-------+
# |receiver| |sender |
# | | |199,800|
# +--------+ +-------+
#
def test_60_block_oversize_EB1_INTB_at_INTB(self):
ibefore, obefore = self.sense_n_closed_lines("EB1")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e60",
message_size=EB1_MAX_SIZE - OVER_UNDER,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_60 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("EB1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 0 and idelta == 1
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
# Verfiy that a link was closed with the expected pattern(s)
ilink1, olink1 = self.sense_n_closed_lines("EB1", pattern=OVERSIZE_LINK_CONDITION_NAME)
success = olink1 > 0
if (not success):
test.logger.log("FAIL: Did not see link close in log file. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate link close with condition: amqp:link:message-size-exceeded")
# verify that a message can go through an edge EB1 and get blocked by interior INT.B
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# | ^
# V |
# +--------+ +-------+
# |receiver| |sender |
# | | |199,800|
# +--------+ +-------+
#
def test_61_block_oversize_EB1_EA1_at_INTB(self):
ibefore, obefore = self.sense_n_closed_lines("EB1")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EA1,
"e61",
message_size=EB1_MAX_SIZE - OVER_UNDER,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_61 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("EB1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 0 and idelta == 1
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
# see what happens when a message must be blocked by edge and also by interior
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# | ^
# V |
# +--------+ +-------+
# |receiver| |sender |
# | | |200,200|
# +--------+ +-------+
#
def test_70_block_oversize_EB1_INTB_at_both(self):
ibefore, obefore = self.sense_n_closed_lines("EB1")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e70",
message_size=EB1_MAX_SIZE + OVER_UNDER,
blocked_by_both=True,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_70 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
# EB1 must close connection to sender (odelta == 1) but
# INT.B may or may not close the edge-interior link. Sometimes EB1 senses the
# oversize condition before it has forwarded too many bytes of the first message
# to INT.B. Then EB1 aborts the first message to INT.B and INT.B never
# detects an oversize condition.
iafter, oafter = self.sense_n_closed_lines("EB1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 1 and (idelta == 0 or idelta == 1)
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
# Verify that a multicast can go through an edge EB1 and get blocked by interior INT.B
#
# +-------+ +---------+ +---------+ +-------+
# | rcvr | | rcvr | | rcvr | | rcvr |
# | no | | no | | no | | yes |
# +-------+ +---------+ +---------+ +-------+
# ^ ^ ^ ^
# | | | |
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# ^
# |
# +-------+
# |sender |
# |199,800|
# +-------+
#
def test_80_block_multicast_EB1_INTB_at_INTB(self):
ibefore, obefore = self.sense_n_closed_lines("EB1")
count = 10
test = OversizeMulticastTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.routers,
"multicast/e80",
[0, 0, 0, count],
blocked_by_ingress=False,
blocked_by_interior=True,
message_size=EB1_MAX_SIZE - OVER_UNDER,
count=count,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_80 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("EB1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 0 and idelta == 1
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
# Verify that a multicast blocked by edge ingress goes to no receivers
#
# +-------+ +---------+ +---------+ +-------+
# | rcvr | | rcvr | | rcvr | | rcvr |
# | no | | no | | no | | no |
# +-------+ +---------+ +---------+ +-------+
# ^ ^ ^ ^
# | | | |
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# ^
# |
# +-------+
# |sender |
# | 50,200|
# +-------+
#
def test_81_block_multicast_EA1(self):
ibefore, obefore = self.sense_n_closed_lines("EA1")
count = 10
test = OversizeMulticastTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.routers,
"multicast/e81",
[0, 0, 0, 0],
blocked_by_ingress=True,
blocked_by_interior=False,
message_size=EA1_MAX_SIZE + OVER_UNDER,
count=count,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_81 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("EA1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 1 and idelta == 0
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
# Verify that a multicast blocked by interior ingress goes to no receivers
#
# +-------+ +---------+ +---------+ +-------+
# | rcvr | | rcvr | | rcvr | | rcvr |
# | no | | no | | no | | no |
# +-------+ +---------+ +---------+ +-------+
# ^ ^ ^ ^
# | | | |
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# ^
# |
# +-------+
# |sender |
# |100,200|
# +-------+
#
def test_82_block_multicast_INTA(self):
ibefore, obefore = self.sense_n_closed_lines("INT.A")
count = 10
test = OversizeMulticastTransferTest(MaxMessageSizeBlockOversize,
MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.routers,
"multicast/e82",
[0, 0, 0, 0],
blocked_by_ingress=True,
blocked_by_interior=False,
message_size=INTA_MAX_SIZE + OVER_UNDER,
count=count,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_82 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("INT.A")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 1 and idelta == 0
if (not success):
test.logger.log(
"FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
#
# Link route
#
class Dummy(FakeBroker):
"""
Open a link and sit there. No traffic is expected to reach this broker
"""
def __init__(self, url, container_id):
super(Dummy, self).__init__(url, container_id)
def on_message(self, event):
print("ERROR did not expect a message")
sys.stdout.flush()
class MaxMessageSizeLinkRouteOversize(TestCase):
"""
verify that maxMessageSize blocks oversize messages over link route
"""
@classmethod
def setUpClass(cls):
"""Start the router"""
super(MaxMessageSizeLinkRouteOversize, cls).setUpClass()
cls.fb_port = cls.tester.get_port()
cls.logger = Logger(print_to_console=True)
def router(name, mode, max_size, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes',
'workerThreads': W_THREADS}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('policy', {'maxConnections': 100, 'enableVhostPolicy': 'true', 'maxMessageSize': max_size, 'defaultVhost': '$default'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('linkRoute', {'prefix': 'oversize', 'containerId': 'FakeBroker', 'direction': 'in'}),
('linkRoute', {'prefix': 'oversize', 'containerId': 'FakeBroker', 'direction': 'out'}),
('vhost', {'hostname': '$default', 'allowUnknownUser': 'true',
'groups': {
'$default': {
'users': '*',
'maxConnections': 100,
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowAnonymousSender': True,
'allowWaypointLinks': True,
'allowDynamicSource': True
}
}
})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
return cls.routers[-1]
# configuration:
# two edge routers connected via 2 interior routers with max sizes
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# ^
# #
# v
# +--------+
# | fake |
# | broker |
# +--------+
#
# Note:
# * Messages whose senders connect to INT.A or INT.B are subject to max message size
# defined for the ingress router only.
# * Message whose senders connect to EA1 or EA2 are subject to max message size
# defined for the ingress router. If the message is forwarded through the
# connected interior router then the message is subject to another max message size
# defined by the interior router.
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INT.A', 'interior', INTA_MAX_SIZE,
[('listener', {'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge', 'port': cls.INTA_edge_port})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('INT.B', 'interior', INTB_MAX_SIZE,
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge',
'port': cls.INTB_edge_port}),
('connector', {'name': 'FakeBroker',
'role': 'route-container',
'host': '127.0.0.1',
'port': cls.fb_port,
'saslMechanisms': 'ANONYMOUS'}),
])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
cls.INT_B.fb_port = cls.INT_B.connector_addresses[0]
router('EA1', 'edge', EA1_MAX_SIZE,
[('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('connector', {'name': 'uplink', 'role': 'edge',
'port': cls.INTA_edge_port})])
cls.EA1 = cls.routers[2]
cls.EA1.listener = cls.EA1.addresses[0]
router('EB1', 'edge', EB1_MAX_SIZE,
[('connector', {'name': 'uplink',
'role': 'edge',
'port': cls.INTB_edge_port,
'maxFrameSize': 1024}),
('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()})])
cls.EB1 = cls.routers[3]
cls.EB1.listener = cls.EB1.addresses[0]
cls.wait_router_network_connected()
cls.fake_broker = Dummy("amqp://127.0.0.1:" + str(cls.fb_port),
container_id="FakeBroker")
cls.INT_B.wait_address("oversize",
containers=1, count=2)
@classmethod
def tearDownClass(cls):
"""Stop the fake broker"""
cls.fake_broker.join()
# time.sleep(0.25) # Sleeping a bit here lets INT_B clean up connectors and timers
super(MaxMessageSizeLinkRouteOversize, cls).tearDownClass()
@classmethod
def wait_router_network_connected(cls):
cls.INT_A.wait_router_connected('INT.B')
cls.INT_B.wait_router_connected('INT.A')
cls.EA1.wait_connectors()
cls.EB1.wait_connectors()
def sense_n_closed_lines(self, routername):
"""
Read a router's log file and count how many size-exceeded lines are in it.
:param routername:
:return: (int, int) tuple with counts of lines in and lines out
"""
with open("../setUpClass/%s.log" % routername, 'r') as router_log:
log_lines = router_log.read().split("\n")
i_closed_lines = [s for s in log_lines if OVERSIZE_CONDITION_NAME in s and "<-" in s]
o_closed_lines = [s for s in log_lines if OVERSIZE_CONDITION_NAME in s and "->" in s]
return (len(i_closed_lines), len(o_closed_lines))
# verify that a message can go through an edge EB1 and get blocked by interior INT.B
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
# | ^
# V |
# +--------+ +-------+
# | fake | |sender |
# | broker | |200,200|
# +--------+ +-------+
#
def test_90_block_link_route_EB1_INTB(self):
ibefore, obefore = self.sense_n_closed_lines("EB1")
test = OversizeMessageTransferTest(MaxMessageSizeLinkRouteOversize,
MaxMessageSizeLinkRouteOversize.EB1,
None,
"oversize.e90",
message_size=EB1_MAX_SIZE + OVER_UNDER,
blocked_by_both=True,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_90 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
# Verify that interrouter link was shut down
iafter, oafter = self.sense_n_closed_lines("EB1")
idelta = iafter - ibefore
odelta = oafter - obefore
success = odelta == 1 and (idelta == 0 or idelta == 1)
if (not success):
test.logger.log("FAIL: N closed events in log file did not increment by 1. oBefore: %d, oAfter: %d, iBefore:%d, iAfter:%d" %
(obefore, oafter, ibefore, iafter))
test.logger.dump()
self.assertTrue(success, "Expected router to generate close with condition: message size exceeded")
MaxMessageSizeLinkRouteOversize.wait_router_network_connected()
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "76c7d58b052f10ddad6cbc4c6c1f7799",
"timestamp": "",
"source": "github",
"line_count": 1252,
"max_line_length": 155,
"avg_line_length": 44.685303514377,
"alnum_prop": 0.5079362242162084,
"repo_name": "ganeshmurthy/qpid-dispatch",
"id": "1ed68f1c3559887a1dd608f981c2f2c9f39eeede",
"size": "56736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/system_tests_policy_oversize_compound.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2695814"
},
{
"name": "C++",
"bytes": "359957"
},
{
"name": "CMake",
"bytes": "54018"
},
{
"name": "CSS",
"bytes": "49129"
},
{
"name": "Dockerfile",
"bytes": "3230"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "737682"
},
{
"name": "Objective-C",
"bytes": "1976"
},
{
"name": "Python",
"bytes": "2547017"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
} |
from mitmproxy.tools.console import palettes
class TestPalette:
def test_helptext(self):
for i in palettes.palettes.values():
assert i.palette(False)
for i in palettes.palettes.values():
assert i.palette(True)
| {
"content_hash": "0f7eb06b9eff57c2699bb3f7e1c1c5a1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 28.444444444444443,
"alnum_prop": 0.65234375,
"repo_name": "mitmproxy/mitmproxy",
"id": "05d3c274d46fc478b5f470f28d05e5c8c92defaa",
"size": "256",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/mitmproxy/tools/console/test_palettes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2374404"
},
{
"name": "Shell",
"bytes": "3013"
},
{
"name": "TypeScript",
"bytes": "279000"
}
],
"symlink_target": ""
} |
import sys
import tkinter
import unittest
_tk_unavailable = None
def check_tk_availability():
"""Check that Tk is installed and available."""
global _tk_unavailable
if _tk_unavailable is None:
_tk_unavailable = False
if sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
_tk_unavailable = "cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
_tk_unavailable = "cannot run without OS X gui process"
else: # not OS X
import tkinter
try:
tkinter.Button()
except tkinter.TclError as msg:
# assuming tk is not available
_tk_unavailable = "tk not available: %s" % msg
if _tk_unavailable:
raise unittest.SkipTest(_tk_unavailable)
return
def get_tk_root():
check_tk_availability() # raise exception if tk unavailable
try:
root = tkinter._default_root
except AttributeError:
# it is possible to disable default root in Tkinter, although
# I haven't seen people doing it (but apparently someone did it
# here).
root = None
if root is None:
# create a new master only if there isn't one already
root = tkinter.Tk()
return root
def root_deiconify():
root = get_tk_root()
root.deiconify()
def root_withdraw():
root = get_tk_root()
root.withdraw()
def simulate_mouse_click(widget, x, y):
"""Generate proper events to click at the x, y position (tries to act
like an X server)."""
widget.event_generate('<Enter>', x=0, y=0)
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
import _tkinter
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
def requires_tcl(*version):
return unittest.skipUnless(tcl_version >= version,
'requires Tcl version >= ' + '.'.join(map(str, version)))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = tkinter.Tcl()
patchlevel = []
for x in tcl.call('info', 'patchlevel').split('.'):
try:
x = int(x, 10)
except ValueError:
x = -1
patchlevel.append(x)
_tk_patchlevel = tuple(patchlevel)
return _tk_patchlevel
units = {
'c': 72 / 2.54, # centimeters
'i': 72, # inches
'm': 72 / 25.4, # millimeters
'p': 1, # points
}
def pixels_conv(value):
return float(value[:-1]) * units[value[-1:]]
def tcl_obj_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, _tkinter.Tcl_Obj):
if isinstance(expected, str):
return str(actual) == expected
if isinstance(actual, tuple):
if isinstance(expected, tuple):
return (len(actual) == len(expected) and
all(tcl_obj_eq(act, exp)
for act, exp in zip(actual, expected)))
return False
def widget_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, (str, tkinter.Widget)):
if isinstance(expected, (str, tkinter.Widget)):
return str(actual) == str(expected)
return False
| {
"content_hash": "dd42a6e250894df46a20b571a888619c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 80,
"avg_line_length": 33.11278195488722,
"alnum_prop": 0.5792461398728429,
"repo_name": "paweljasinski/ironpython3",
"id": "fcd9ffc1ca469ea9794c505f2a19e925bc1229cc",
"size": "4404",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/tkinter/test/support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11099"
},
{
"name": "C#",
"bytes": "12284108"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "13117230"
},
{
"name": "Makefile",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "62360"
},
{
"name": "Python",
"bytes": "27267678"
},
{
"name": "R",
"bytes": "4949"
},
{
"name": "Ruby",
"bytes": "19"
},
{
"name": "Shell",
"bytes": "5147"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Presubmit script for changes affecting Source/platform.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import difflib
import os
import re
import sys
USE_PYTHON3 = True
RUNTIMEENABLED_NAME = re.compile(r'\s*name\s*:\s*"([^"]*)"')
CHROMEOS_STATUS = "ChromeOS"
LACROS_STATUS = "Lacros"
# The ignore list will be removed once existing features adopt parity across
# Lacros and ChromeOS.
LACROS_CHROMEOS_FEATURE_STATUS_PARITY_IGNORE_LIST = [
'BarcodeDetector', # crbug.com/1235855
'DigitalGoods', # crbug.com/1235859
'NetInfoDownlinkMax', # crbug.com/1235864
'WebBluetooth', # crbug.com/1235867
'WebBluetoothManufacturerDataFilter', # crbug.com/1235869
'WebBluetoothRemoteCharacteristicNewWriteValue', # crbug.com/235870
]
# pyright: reportMissingImports=false
def RuntimeEnabledFeatures(input_api, filename):
"""Returns the features present in the specified features JSON5 file."""
# We need to wait until we have an input_api object and use this
# roundabout construct to import json5 because this file is
# eval-ed and thus doesn't have __file__.
try:
json5_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'..', '..', '..', 'pyjson5', 'src')
sys.path.append(json5_path)
import json5
return json5.load(open(filename, encoding='utf-8'))['data']
finally:
# Restore sys.path to what it was before.
sys.path.remove(json5_path)
def RuntimeEnabledFeatureNames(filename):
"""Reads the 'name' of each feature in runtime_enabled_features.json5."""
# Note: We don't have a JSON5 parser available, so just use a regex.
with open(filename) as f:
for line in f:
match = RUNTIMEENABLED_NAME.match(line)
if match:
yield match.group(1)
def _CheckRuntimeEnabledFeaturesSorted(input_api, output_api):
"""Check: runtime_enabled_features.json5 feature list sorted alphabetically.
"""
# Read runtime_enabled_features.json5 using the JSON5 parser.
filename = os.path.join(input_api.PresubmitLocalPath(),
'runtime_enabled_features.json5')
features = list(RuntimeEnabledFeatureNames(filename))
# Sort the 'data' section by name.
features_sorted = sorted(features, key=lambda s: s.lower())
if features == features_sorted:
return []
# Diff the sorted/unsorted versions.
differ = difflib.Differ()
diff = differ.compare(features, features_sorted)
return [
output_api.PresubmitError(
'runtime_enabled_features.json5 features must be sorted alphabetically. '
'Diff of feature order follows:',
long_text='\n'.join(diff))
]
def _CheckLacrosChromeOSFeatureStatusParity(input_api, output_api):
"""Check: runtime_enabled_features.json5 feature status parity across Lacros
and ChromeOS.
"""
filename = os.path.join(input_api.PresubmitLocalPath(),
'runtime_enabled_features.json5')
try:
features = RuntimeEnabledFeatures(input_api, filename)
# Check that all features with a status specified for ChromeOS have the
# same status specified for Lacros.
for feature in features:
if feature[
'name'] in LACROS_CHROMEOS_FEATURE_STATUS_PARITY_IGNORE_LIST:
continue
if 'status' in feature and type(feature['status']) is dict:
status_dict = feature['status']
if (CHROMEOS_STATUS in status_dict
or LACROS_STATUS in status_dict) and (
status_dict.get(LACROS_STATUS) !=
status_dict.get(CHROMEOS_STATUS)):
return [output_api.PresubmitError('Feature {} does not have status parity '\
'across Lacros and ChromeOS.'.format(feature['name']))]
except:
return [
output_api.PresubmitError(
'Failed to parse {} for checks'.format(filename))
]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckRuntimeEnabledFeaturesSorted(input_api, output_api))
results.extend(
_CheckLacrosChromeOSFeatureStatusParity(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
| {
"content_hash": "ba0ac9f8058e396590a327b21505fe06",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 96,
"avg_line_length": 36.49612403100775,
"alnum_prop": 0.6497451146983857,
"repo_name": "ric2b/Vivaldi-browser",
"id": "018410a68206bfe6b731a83d16769aa93f3e474e",
"size": "4870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/renderer/platform/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_round as round
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_columns = {
'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = inv.company_id.currency_id.id
def get_price(cr, uid, inv, company_currency, i_line, price_unit):
cur_obj = self.pool.get('res.currency')
decimal_precision = self.pool.get('decimal.precision')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})
else:
price = price_unit * i_line.quantity
return round(price, decimal_precision.precision_get(cr, uid, 'Account'))
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price':get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price': -1 * get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':cacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
decimal_precision = self.pool.get('decimal.precision')
account_prec = decimal_precision.precision_get(cr, uid, 'Account')
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'],
i_line.price_unit * (1-(i_line.discount or 0.0)/100.0), line['quantity'])['total']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
res += diff_res
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id, context)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type == 'in_invoice':
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
else:
oa = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id
if not oa:
oa = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "62dbc3c3082cd4c6aca2d93b5c439df9",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 205,
"avg_line_length": 71.43975903614458,
"alnum_prop": 0.5217977907074796,
"repo_name": "diogocs1/comps",
"id": "de085e32e30bda1aff27584e1f2168bb689f06c7",
"size": "12899",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "web/addons/account_anglo_saxon/invoice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
from hanlp.common.vocab_tf import VocabTF
from hanlp.metrics.chunking.chunking_f1_tf import ChunkingF1_TF
from hanlp.metrics.chunking.sequence_labeling import get_entities
class BMES_F1_TF(ChunkingF1_TF):
def __init__(self, tag_vocab: VocabTF, from_logits=True, suffix=False, name='f1', dtype=None, **kwargs):
super().__init__(tag_vocab, from_logits, name, dtype, **kwargs)
self.nb_correct = 0
self.nb_pred = 0
self.nb_true = 0
self.suffix = suffix
def update_tags(self, true_tags, pred_tags):
for t, p in zip(true_tags, pred_tags):
self.update_entities(get_entities(t, self.suffix), get_entities(p, self.suffix))
return self.result()
def update_entities(self, true_entities, pred_entities):
true_entities = set(true_entities)
pred_entities = set(pred_entities)
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
self.nb_correct += nb_correct
self.nb_pred += nb_pred
self.nb_true += nb_true
def result(self):
nb_correct = self.nb_correct
nb_pred = self.nb_pred
nb_true = self.nb_true
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score
def reset_states(self):
self.nb_correct = 0
self.nb_pred = 0
self.nb_true = 0
| {
"content_hash": "8cb85170efe65ee39ebc2efefe69eb66",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 108,
"avg_line_length": 35.16279069767442,
"alnum_prop": 0.6064814814814815,
"repo_name": "hankcs/HanLP",
"id": "9b67b93d4f02b75397e36cdfb6fb85926c67f927",
"size": "1578",
"binary": false,
"copies": "1",
"ref": "refs/heads/doc-zh",
"path": "hanlp/metrics/chunking/bmes_tf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "40933"
},
{
"name": "Jupyter Notebook",
"bytes": "566269"
},
{
"name": "Python",
"bytes": "2196905"
}
],
"symlink_target": ""
} |
import os.path
from .. import *
@skip_if('objc' not in test_features, 'skipping objective c++ tests')
class TestObjCxx(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'objcxx'), *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')],
'hello from objective c++!\n')
| {
"content_hash": "a78d89bc069de6977522bae0abb7b477",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.5995423340961098,
"repo_name": "jimporter/bfg9000",
"id": "8f70c132bbec9cbbee9b9dcda2f7f8bd4991204f",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration/languages/test_objcxx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "783"
},
{
"name": "C++",
"bytes": "14009"
},
{
"name": "Fortran",
"bytes": "229"
},
{
"name": "Java",
"bytes": "621"
},
{
"name": "Lex",
"bytes": "579"
},
{
"name": "Objective-C",
"bytes": "148"
},
{
"name": "Objective-C++",
"bytes": "167"
},
{
"name": "Python",
"bytes": "1262127"
},
{
"name": "Roff",
"bytes": "155"
},
{
"name": "Scala",
"bytes": "62"
},
{
"name": "Yacc",
"bytes": "792"
}
],
"symlink_target": ""
} |
from .pylibmount import *
| {
"content_hash": "65056093efcbe6a73000a11907df99a2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 13.5,
"alnum_prop": 0.7407407407407407,
"repo_name": "Shihta/enter-docker",
"id": "09104e2e2133862e683eac65676aa788a02fc62c",
"size": "27",
"binary": false,
"copies": "45",
"ref": "refs/heads/master",
"path": "util-linux-2.27.1/libmount/python/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3925386"
},
{
"name": "C++",
"bytes": "96"
},
{
"name": "Groff",
"bytes": "637118"
},
{
"name": "Python",
"bytes": "8511"
},
{
"name": "Shell",
"bytes": "709722"
}
],
"symlink_target": ""
} |
"""Tests of parameters."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_compression.python.layers import parameters
class ParameterTest:
def test_initial_value_is_reproduced(self):
initial_value = tf.random.uniform(self.shape, dtype=tf.float32)
parameter = self.cls(initial_value, **self.kwargs)
self.assertAllClose(initial_value, parameter(), atol=1e-6, rtol=0)
def test_name_and_value_are_reproduced_after_serialization(self):
initial_value = tf.random.uniform(self.shape, dtype=tf.float32)
parameter = self.cls(initial_value, **self.kwargs)
name_before = parameter.name
value_before = parameter()
json = tf.keras.utils.serialize_keras_object(parameter)
weights = parameter.get_weights()
parameter = tf.keras.utils.deserialize_keras_object(json)
self.assertIsInstance(parameter, self.cls)
self.assertEqual(name_before, parameter.name)
parameter.set_weights(weights)
value_after = parameter()
self.assertAllEqual(value_before, value_after)
self.assertEqual(value_before.dtype.name, value_after.dtype.name)
def test_converts_to_tensor(self):
initial_value = tf.random.uniform(self.shape, dtype=tf.float32)
parameter = self.cls(initial_value, **self.kwargs)
value = parameter()
converted = tf.convert_to_tensor(parameter)
self.assertAllEqual(value, converted)
self.assertEqual(value.dtype.name, converted.dtype.name)
class RDFTParameterTest(ParameterTest, tf.test.TestCase,
parameterized.TestCase):
cls = parameters.RDFTParameter
kwargs = dict(name="rdft_kernel")
shape = (3, 3, 1, 2)
# TODO(jonycgn): Find out why 3D RFFT gradients are not implemented in TF.
@parameterized.parameters((7, 3, 2), (5, 3, 1, 2))
def test_gradients_propagate(self, *shape):
initial_value = tf.random.uniform(shape, dtype=tf.float32)
parameter = self.cls(initial_value, **self.kwargs)
rand = tf.random.uniform(shape)
with tf.GradientTape() as tape:
loss = tf.reduce_sum(rand * parameter())
gradients = tape.gradient(loss, parameter.variables)
self.assertLen(gradients, 2)
self.assertNotAllClose(
tf.zeros_like(gradients[0]), gradients[0], atol=1e-1, rtol=0)
self.assertNotAllClose(
tf.zeros_like(gradients[1]), gradients[1], atol=1e-1, rtol=0)
class GDNParameterTest(ParameterTest, tf.test.TestCase):
cls = parameters.GDNParameter
kwargs = dict(name="gdn_parameter")
shape = (2, 1, 3)
def test_initial_value_is_reproduced_with_minimum(self):
initial_value = tf.random.uniform(self.shape, dtype=tf.float32)
parameter = self.cls(initial_value, minimum=.5)
expected_value = tf.maximum(initial_value, .5)
self.assertAllClose(expected_value, parameter(), atol=1e-6, rtol=0)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "2aac8b632ce9e02f7cc5f2df3a52de9a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 37.5,
"alnum_prop": 0.7101754385964912,
"repo_name": "tensorflow/compression",
"id": "ef161657891d2e14f452528e7d39f4549bfd49b4",
"size": "3527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_compression/python/layers/parameters_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "251322"
},
{
"name": "Jupyter Notebook",
"bytes": "128509"
},
{
"name": "Python",
"bytes": "575673"
},
{
"name": "Starlark",
"bytes": "14557"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertextsrc", parent_name="densitymapbox", **kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "3488c46db976f703cb69a24e0a7e93ee",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 33.785714285714285,
"alnum_prop": 0.6046511627906976,
"repo_name": "plotly/python-api",
"id": "41b8e5c6f2ea3a2b360b0ec557eb790b927fcb71",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/_hovertextsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from slackclient import SlackClient
from flask import Flask, request, Response
from bs4 import BeautifulSoup
import requests
import json
import os
import time
app = Flask(__name__)
API_URL = "https://criclive-api.herokuapp.com/"
def getHTML(url):
html_doc = requests.get(url).json()
return html_doc
def score(team_score):
if team_score:
formatted_score = team_score.replace("&", "&")
return str(formatted_score)
else:
return "Not started"
def display(matches):
#matches = json.loads(matches)
attachments = []
for categories in matches["data"]:
c_summary = []
for match in categories["matches"]:
summary = {
"title": match["team1"]["name"] + "-" + score(match["team1"]["score"]) + " | " + match["team2"]["name"] + "-" + score(match["team2"]["score"]),
"value": match["status"],
#"short": false
}
if(match["team1"]["name"] or match["team2"]["name"]):
c_summary.append(summary)
if(c_summary):
attachment = {
"pretext": categories["category"],
"fields": c_summary,
"mrkdwn_in": ["text", "pretext", "fields"]
}
attachments.append(attachment)
message = {
"text": "Live report of all matches",
"attachments": attachments,
#"mrkdwn": true
}
return message
#return json.dumps(message)
@app.route('/', methods=['POST'])
def main():
soup = getHTML(API_URL)
results = display(soup)
#results = json.dumps(results, indent=4, sort_keys=True)
results = json.dumps(results)
#return results
return Response(results, content_type="text/plain; charset=utf-8")
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| {
"content_hash": "7a4472718a26f059252d354eb8aa57bc",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 161,
"avg_line_length": 29.50769230769231,
"alnum_prop": 0.5615224191866528,
"repo_name": "shubh1m/Slack-Criclive",
"id": "b262d807b0111e4b9e56edbb6d38cb08964ffe5f",
"size": "1918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1918"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask import render_template
from flask import request
from algorithm import *
import yaml
app = Flask(__name__)
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/compute', methods=['GET', 'POST'])
def compute():
if request.method == 'GET':
return render_template('compute.html')
else:
input1 = request.form['input1']
app.logger.debug(input1)
print 'input1: ' + input1
input2 = request.form['input2']
app.logger.debug(input2)
print 'input2: ' + input2
input3 = request.form['input3']
app.logger.debug(input3)
print 'input3: ' + input3
yamlInput1 = yaml.safe_load(input1)
app.logger.debug(yamlInput1)
print 'yamlInput1: ' + str(yamlInput1)
print yamlInput1
yamlInput2 = yaml.safe_load(input2)
app.logger.debug(yamlInput2)
print 'yamlInput2: ' + str(yamlInput2)
print yamlInput2
yamlInput3 = yaml.safe_load(input3)
app.logger.debug(yamlInput3)
print 'yamlInput3: ' + str(yamlInput3)
print yamlInput3
result = func(yamlInput1, yamlInput2, yamlInput3)
print result
return render_template('compute.html', result=result)
| {
"content_hash": "6e58f6cd0bd0bf154ecf56f470529a51",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 63,
"avg_line_length": 25.055555555555557,
"alnum_prop": 0.6400591278640059,
"repo_name": "CagataySurkultay/HW1",
"id": "66e8bcd59536633f0bd14237cf7eb0bb2502fd86",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "395"
},
{
"name": "Makefile",
"bytes": "32"
},
{
"name": "Python",
"bytes": "2240"
}
],
"symlink_target": ""
} |
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = cache.keys()
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or filename[0] + filename[-1] == '<>':
return []
fullname = filename
try:
stat = os.stat(fullname)
except os.error, msg:
basename = os.path.split(filename)[1]
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
if basename.startswith(name.split('.')[-1]+'.'):
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path.
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
pass
else:
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
# No luck
## print '*** Cannot stat', filename, ':', msg
return []
try:
fp = open(fullname, 'rU')
lines = fp.readlines()
fp.close()
except IOError, msg:
## print '*** Cannot open', fullname, ':', msg
return []
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
| {
"content_hash": "ff6c2163326e1d2cac02e434cc759252",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.5427018633540373,
"repo_name": "jjuanda/mechanize",
"id": "f49695ac1cec88c87b428e9d18a7324ffc79ce9a",
"size": "3864",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "test-tools/linecache_copy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4291"
},
{
"name": "JavaScript",
"bytes": "348"
},
{
"name": "Python",
"bytes": "1342076"
}
],
"symlink_target": ""
} |
import abc
class EntityChangeProcessor(object):
""" An entity change processor gets notified whenever there is a
change to an entity so that it can perform related actions.
This may happen out of band (ie. on a queue or batch job), thus
changes may not be applied immediately.
"""
@abc.abstractmethod
def entity_changed(self, entity_id, operation):
""" Notify the plugin that an entity has changed. The plugin
will only receive the ID and must query for the object itself. """
class RelationChangeProcessor(object):
""" A relation change processor gets notified whenever there is a
change to a relation so that it can perform related actions.
This may happen out of band (ie. on a queue or batch job), thus
changes may not be applied immediately.
"""
@abc.abstractmethod
def relation_changed(self, relation_id, operation):
""" Notify the plugin that a relation has changed. The plugin
will only receive the ID and must query for the object itself. """
class ProjectChangeProcessor(object):
""" A project change processor gets notified whenever there is a
change to a project's settings so that it can perform related actions.
This may happen out of band (ie. on a queue or batch job), thus
changes may not be applied immediately.
"""
@abc.abstractmethod
def project_changed(self, project_slug, operation):
""" Notify the plugin that a project has changed. The plugin
will only receive the ID and must query for the object itself. """
class SchemaChangeProcessor(object):
""" A schema change processor gets notified whenever there is a
change to a schema definition so that it can perform related actions.
This may happen out of band (ie. on a queue or batch job), thus
changes may not be applied immediately.
"""
@abc.abstractmethod
def schema_changed(self, project_slug, schema_name, operation):
""" Notify the plugin that a project has changed. The plugin
will only receive the ID and must query for the object itself. """
class Startup(object):
""" This interface will be called when grano is started and allows
plugins to register additional functionality such as flask views.
"""
@abc.abstractmethod
def configure(self, manager):
""" Run this on startup. """
class Periodic(object):
""" This interface will be called periodically, to execute house
keeping jobs. The exact period is up to the type of deployment.
"""
@abc.abstractmethod
def run(self):
""" Run an action. """
| {
"content_hash": "9647cb7a6457673e9520f4daa34b6f32",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 74,
"avg_line_length": 34.1948051948052,
"alnum_prop": 0.6961640714014432,
"repo_name": "4bic/grano",
"id": "9963b3d4e02755765424c17fa31c9c2ddfd4381a",
"size": "2633",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "grano/interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "100"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "237445"
}
],
"symlink_target": ""
} |
import sys
import functools
from typing import Any, Callable, IO, Mapping, Sequence, TypeVar, Text
def get_mapping_type_str(x: Mapping[Any, Any]) -> str:
container_type = type(x).__name__
if not x:
if container_type == 'dict':
return '{}'
else:
return container_type + '([])'
key = next(iter(x))
key_type = get_type_str(key)
value_type = get_type_str(x[key])
if container_type == 'dict':
if len(x) == 1:
return '{%s: %s}' % (key_type, value_type)
else:
return '{%s: %s, ...}' % (key_type, value_type)
else:
if len(x) == 1:
return '%s([(%s, %s)])' % (container_type, key_type, value_type)
else:
return '%s([(%s, %s), ...])' % (container_type, key_type, value_type)
def get_sequence_type_str(x: Sequence[Any]) -> str:
container_type = type(x).__name__
if not x:
if container_type == 'list':
return '[]'
else:
return container_type + '([])'
elem_type = get_type_str(x[0])
if container_type == 'list':
if len(x) == 1:
return '[' + elem_type + ']'
else:
return '[' + elem_type + ', ...]'
else:
if len(x) == 1:
return '%s([%s])' % (container_type, elem_type)
else:
return '%s([%s, ...])' % (container_type, elem_type)
expansion_blacklist = [Text, bytes]
def get_type_str(x: Any) -> str:
if x is None:
return 'None'
elif isinstance(x, tuple):
types = []
for v in x:
types.append(get_type_str(v))
if len(x) == 1:
return '(' + types[0] + ',)'
else:
return '(' + ', '.join(types) + ')'
elif isinstance(x, Mapping):
return get_mapping_type_str(x)
elif isinstance(x, Sequence) and not any(isinstance(x, t) for t in expansion_blacklist):
return get_sequence_type_str(x)
else:
return type(x).__name__
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
def print_types_to(file_obj: IO[str]) -> Callable[[FuncT], FuncT]:
def decorator(func: FuncT) -> FuncT:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
arg_types = [get_type_str(arg) for arg in args]
kwarg_types = [key + "=" + get_type_str(value) for key, value in kwargs.items()]
ret_val = func(*args, **kwargs)
output = "%s(%s) -> %s" % (func.__name__,
", ".join(arg_types + kwarg_types),
get_type_str(ret_val))
print(output, file=file_obj)
return ret_val
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
return decorator
def print_types(func: FuncT) -> FuncT:
return print_types_to(sys.stdout)(func)
| {
"content_hash": "83e7f0c2a54967c86fdd03b21e700440",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 92,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.508298755186722,
"repo_name": "mahim97/zulip",
"id": "36bce2194f29a79e465aa126a53d312e2c782279",
"size": "2893",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "zerver/lib/type_debug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
# Examples:
url(r'^$', 'newsletter.views.home', name='home'),
url(r'^contact/$', 'newsletter.views.contact', name='contact'),
url(r'^about/$', 'trydjango18.views.about', name='about'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('registration.backends.default.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | {
"content_hash": "267ac8ef43c45f82c2e213c77834fc7b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 35.15,
"alnum_prop": 0.7041251778093883,
"repo_name": "arshinator/trydjango-website",
"id": "7a13d5b8916764697a1216503221d205f500b904",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/trydjango18/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "CSS",
"bytes": "43338"
},
{
"name": "HTML",
"bytes": "14099"
},
{
"name": "JavaScript",
"bytes": "79091"
},
{
"name": "Python",
"bytes": "21537"
}
],
"symlink_target": ""
} |
"""Tests for grr.parsers.ie_history."""
import datetime
import os
import StringIO
from grr.lib import flags
from grr.lib import test_lib
from grr.parsers import ie_history
class IEHistoryTest(test_lib.GRRBaseTest):
"""Test parsing of chrome history files."""
def testBasicParsing(self):
"""Test we can parse a standard file."""
hist_file = os.path.join(self.base_path, "index.dat")
c = ie_history.IEParser(open(hist_file, "rb"))
entries = [x for x in c.Parse()]
# Check that our results are properly time ordered
time_results = [x["mtime"] for x in entries]
self.assertEqual(time_results, sorted(time_results))
self.assertEqual(entries[1]["url"],
"Visited: testing@http://www.google.com/chrome/chrome"
"/eula.html")
dt1 = datetime.datetime.utcfromtimestamp(entries[1]["ctime"] / 1e6)
self.assertEqual(str(dt1), "2009-12-11 17:55:46.968000")
dt2 = datetime.datetime.utcfromtimestamp(entries[-1]["ctime"] / 1e6)
self.assertEqual(str(dt2), "2011-06-23 18:57:24.250000")
self.assertEqual(entries[-1]["url"],
"Visited: testing@mshelp://windows/?id=d063548a-3fc9-"
"4723-99f3-b12a0c4354a8")
self.assertEqual(len(entries), 18)
def testErrors(self):
"""Test empty files don't raise errors."""
c = ie_history.IEParser(StringIO.StringIO())
entries = [x for x in c.Parse()]
self.assertEqual(len(entries), 0)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "4ee79aa487cb87e8ed7476e864a14c0d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 30.627450980392158,
"alnum_prop": 0.647247119078105,
"repo_name": "destijl/grr",
"id": "a47aae6bc0b25fa5c58c95a9dc91930c6251ec66",
"size": "1584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/parsers/ie_history_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304794"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26524"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "173692"
},
{
"name": "JavaScript",
"bytes": "63181"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "307091"
},
{
"name": "Python",
"bytes": "6407750"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40334"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import os
import shutil
import sys
import unittest
sys.path.append("..")
os.environ['LANGUAGE'] = 'en'
import entrymgr
class EntryMgrFakeJournalTestCase(unittest.TestCase):
def setUp(self):
entrymgr.ensure_directory_exists("tests/fakejournal")
os.chdir("tests/fakejournal")
def tearDown(self):
# Remove the above directory.
os.chdir("../..")
shutil.rmtree("tests/fakejournal")
class EnsureDirectoryExists(unittest.TestCase):
# Test our entrymgr.ensure_directory_exists() function works.
_date = "2013/09/23"
_target_test_directory = "tests/fakejournal/"
def runTest(self):
# Create the directory structure based on the fake date above.
entrymgr.ensure_directory_exists(
self._target_test_directory + self._date)
self.assertTrue(os.path.isdir(
self._target_test_directory + self._date))
def tearDown(self):
# Remove the above directory.
shutil.rmtree(self._target_test_directory)
class FormulateDateStructureTestCase(unittest.TestCase):
# Make sure that we create proper strings.
def runTest(self):
fake_date = datetime.datetime(2004, 2, 12)
function_result = entrymgr.formulate_directory_structure(fake_date)
self.assertEqual(function_result, "2004/02/12")
class CheckLicenseTestCase(unittest.TestCase):
# Simple dummy test to ensure that even unittest is working.
def runTest(self):
self.assertEqual(entrymgr.__license__, "BSD")
class GenerateDatestampTestCase(unittest.TestCase):
# Ensure that we can generate valid datestamps for entry directories.
def test_datetime_function_return_valid(self):
control = datetime.datetime(2018, 12, 25)
date = entrymgr.generate_datestamp("2018/12/25")
self.assertEqual(date, control)
def test_invalid_date_input_caught(self):
with self.assertRaises(ValueError) as test_exception:
date = entrymgr.generate_datestamp("2008/13/20")
class CheckEntryExistsTestCase(EntryMgrFakeJournalTestCase):
_entry_title = "Checking Entry Exists"
_entry_date = entrymgr.generate_datestamp("2000/01/01")
def runTest(self):
target_filepath = "2000/01/01/checking-entry-exists.md"
self.assertFalse(entrymgr.check_entry_exists(
target_filepath))
entrymgr.create_entry(self._entry_title, self._entry_date)
self.assertTrue(entrymgr.check_entry_exists(
target_filepath))
class EntryLifeCycleTestCase(EntryMgrFakeJournalTestCase):
# Ensure that we can create an entry.
_entry_title = "Testing Lifecycle"
_entry_date = entrymgr.generate_datestamp("2013/05/18")
_target_result = "Testing Lifecycle\n=================\n"
def runTest(self):
target_filepath = "2013/05/18/testing-lifecycle.md"
# Ensure that we can create an entry.
entrymgr.create_entry(self._entry_title, self._entry_date)
# Make sure contents are sane.
entry_file = open(target_filepath, 'r')
entry_text = entry_file.read()
entry_file.close()
self.assertEqual(entry_text, self._target_result)
self.assertTrue(os.path.isfile(target_filepath))
# Can we create an entry with the same name and date as an existing one?
with self.assertRaises(argparse.ArgumentTypeError) as test_exception:
entrymgr.create_entry(self._entry_title, self._entry_date)
self.assertEqual(test_exception.exception.message,
"Entry with filename '%s' exists." % target_filepath)
# Can we delete the entry?
entrymgr.delete_entry(self._entry_title, self._entry_date)
# Is the file gone?
self.assertFalse(os.path.isfile(target_filepath))
# Is the entry's directory still around?
self.assertFalse(os.path.isdir("2013/05/18"))
class ExpungeEmptyDirectoryTestCase(EntryMgrFakeJournalTestCase):
# Test that expunging empty directories works.
_entry_titles = ["Expunging test", "Post number 2"]
_date_as_string = "2019/03/25"
_entry_date = entrymgr.generate_datestamp(_date_as_string)
def test_upward_delete_as_children_go(self):
year, month, day = entrymgr.split_datestamp_string(self._date_as_string)
# Create two entries for the same date.
for title in self._entry_titles:
entrymgr.create_entry(title, self._entry_date)
for title in self._entry_titles:
self.assertEqual(
len(os.listdir(self._date_as_string)),
len(self._entry_titles) - self._entry_titles.index(title))
self.assertTrue(os.path.isdir(self._date_as_string))
entrymgr.delete_entry(title, self._entry_date)
self.assertFalse(os.path.isdir(self._date_as_string))
self.assertFalse(os.path.isdir("%s/%s" % (year, month)))
self.assertFalse(os.path.isdir("%s" % year))
def test_directory_with_children_unremovable(self):
year, month, day = entrymgr.split_datestamp_string(self._date_as_string)
# Create two entries for the same date.
for title in self._entry_titles:
entrymgr.create_entry(title, self._entry_date)
for title in self._entry_titles:
if self._entry_titles.index(title) is 0:
self.assertEqual(len(os.listdir(self._date_as_string)), 2)
self.assertEqual(len(os.listdir("%s/%s" % (year, month))), 1)
entrymgr.expunge_directory_if_empty(self._date_as_string)
self.assertEqual(len(os.listdir(self._date_as_string)), 2)
self.assertEqual(len(os.listdir("%s/%s" % (year, month))), 1)
def test_different_months_removals(self):
_entry_dates = [entrymgr.generate_datestamp("2013/05/16"),
entrymgr.generate_datestamp("2013/06/16"),
entrymgr.generate_datestamp("2013/07/16")]
self._entry_titles.append("Third entry")
for title in self._entry_titles:
# Create three entries with the dates above.
entrymgr.create_entry(title,
_entry_dates[self._entry_titles.index(title)])
# We have three entries with three different months in one year.
self.assertEqual(len(os.listdir("2013")), 3)
# Remove an entry.
entrymgr.delete_entry(self._entry_titles[1], _entry_dates[1])
# We should have two directories under '2013'.
self.assertEqual(len(os.listdir("2013")), 2)
# Third entry no longer required for tests.
self._entry_titles.remove("Third entry")
class SplitDatestampStringTestCase(unittest.TestCase):
def runTest(self):
date = "2019/03/30"
y, m, d = entrymgr.split_datestamp_string(date)
self.assertItemsEqual([y, m, d], ["2019", "03", "30"])
class FormulateEntryFilenameTestCase(unittest.TestCase):
def test_filename_always_lowercase(self):
test_entry_name = "Title"
result = entrymgr.formulate_entry_filename(test_entry_name)
self.assertEqual(test_entry_name.lower() + ".md", result)
def test_filename_spaces_are_dashes(self):
test_entry_name = "Jackie Cane"
result = entrymgr.formulate_entry_filename(test_entry_name)
self.assertEqual("jackie-cane.md", result)
def test_filename_extension_exists(self):
test_entry_name = "Dirty Lenses"
result = entrymgr.formulate_entry_filename(test_entry_name)
self.assertEqual(result[-3:], '.md')
def test_filename_entry_name_not_none(self):
test_entry_name = ""
with self.assertRaises(ValueError) as test_exception:
result = entrymgr.formulate_entry_filename(test_entry_name)
| {
"content_hash": "df00a3e4f2223439a59175baaa23c903",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 80,
"avg_line_length": 37.72463768115942,
"alnum_prop": 0.6542451018056089,
"repo_name": "jpds/entrymgr",
"id": "de7384801e1020ad078c7de87dcd7de1dfe1170a",
"size": "9265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_entrymgr.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17149"
},
{
"name": "Shell",
"bytes": "5188"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
def populate_domain(apps, schema_editor):
threads = apps.get_model('askapp', 'Thread')
for thread in threads.objects.all():
if thread.link:
hostname = urlparse(thread.link)
thread.domain = hostname.netloc
thread.save()
class Migration(migrations.Migration):
dependencies = [
('askapp', '0015_thread_domain'),
]
operations = [
migrations.RunPython(populate_domain, migrations.RunPython.noop)
]
| {
"content_hash": "05c5f1d8c75b26ee88e93a6138d069c7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 24.074074074074073,
"alnum_prop": 0.6584615384615384,
"repo_name": "BanzaiTokyo/akihabara-tokyo",
"id": "ba5c048c2ae8f70d9caceb84a295d82dccfad554",
"size": "674",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "askapp/migrations/0016_populate_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1344"
},
{
"name": "HTML",
"bytes": "38652"
},
{
"name": "Python",
"bytes": "40658"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.