text stringlengths 4 1.02M | meta dict |
|---|---|
from django.db import models
from datetime import datetime, date
from django.contrib.auth.models import User
# Create your models here.
class XUser(User):
date_of_birth = models.DateField("Date of Birth")
blood_type = models.CharField(max_length=5)
allergies = models.CharField(max_length=500)
comments = models.CharField(max_length=500)
def __str__(self):
return self.first_name + self.last_name
def close(self):
self.is_active = False
self.save()
| {
"content_hash": "8efdae8a4e6bd69f35e876a1c5ba55db",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 26.263157894736842,
"alnum_prop": 0.6913827655310621,
"repo_name": "Thyri/TouchSave",
"id": "489dabc9cafba54402fbaac4086d3c87cbce1087",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TouchSave_App/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7320"
},
{
"name": "Python",
"bytes": "10791"
}
],
"symlink_target": ""
} |
"""Accesses the google.datastore.v1 Datastore API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
from google.gax.utils import oneof
import google.gax
from google.cloud.gapic.datastore.v1 import enums
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
class DatastoreClient(object):
"""
Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
SERVICE_ADDRESS = 'datastore.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/datastore', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A DatastoreClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-datastore-v1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'datastore_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.datastore.v1.Datastore',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.datastore_stub = config.create_stub(
datastore_pb2.DatastoreStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._lookup = api_callable.create_api_call(
self.datastore_stub.Lookup, settings=defaults['lookup'])
self._run_query = api_callable.create_api_call(
self.datastore_stub.RunQuery, settings=defaults['run_query'])
self._begin_transaction = api_callable.create_api_call(
self.datastore_stub.BeginTransaction,
settings=defaults['begin_transaction'])
self._commit = api_callable.create_api_call(
self.datastore_stub.Commit, settings=defaults['commit'])
self._rollback = api_callable.create_api_call(
self.datastore_stub.Rollback, settings=defaults['rollback'])
self._allocate_ids = api_callable.create_api_call(
self.datastore_stub.AllocateIds, settings=defaults['allocate_ids'])
# Service calls
def lookup(self, project_id, read_options, keys, options=None):
"""
Looks up entities by key.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> from google.cloud.proto.datastore.v1 import datastore_pb2
>>> from google.cloud.proto.datastore.v1 import entity_pb2
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> read_options = datastore_pb2.ReadOptions()
>>> keys = []
>>> response = api.lookup(project_id, read_options, keys)
Args:
project_id (string): The ID of the project against which to make the request.
read_options (:class:`google.cloud.proto.datastore.v1.datastore_pb2.ReadOptions`): The options for this lookup request.
keys (list[:class:`google.cloud.proto.datastore.v1.entity_pb2.Key`]): Keys of entities to look up.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.LookupResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = datastore_pb2.LookupRequest(
project_id=project_id, read_options=read_options, keys=keys)
return self._lookup(request, options)
def run_query(self,
project_id,
partition_id,
read_options,
query=None,
gql_query=None,
options=None):
"""
Queries for entities.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> from google.cloud.proto.datastore.v1 import datastore_pb2
>>> from google.cloud.proto.datastore.v1 import entity_pb2
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> partition_id = entity_pb2.PartitionId()
>>> read_options = datastore_pb2.ReadOptions()
>>> response = api.run_query(project_id, partition_id, read_options)
Args:
project_id (string): The ID of the project against which to make the request.
partition_id (:class:`google.cloud.proto.datastore.v1.entity_pb2.PartitionId`): Entities are partitioned into subsets, identified by a partition ID.
Queries are scoped to a single partition.
This partition ID is normalized with the standard default context
partition ID.
read_options (:class:`google.cloud.proto.datastore.v1.datastore_pb2.ReadOptions`): The options for this query.
query (:class:`google.cloud.proto.datastore.v1.query_pb2.Query`): The query to run.
gql_query (:class:`google.cloud.proto.datastore.v1.query_pb2.GqlQuery`): The GQL query to run.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.RunQueryResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
oneof.check_oneof(
query=query,
gql_query=gql_query, )
# Create the request object.
request = datastore_pb2.RunQueryRequest(
project_id=project_id,
partition_id=partition_id,
read_options=read_options,
query=query,
gql_query=gql_query)
return self._run_query(request, options)
def begin_transaction(self, project_id, options=None):
"""
Begins a new transaction.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> response = api.begin_transaction(project_id)
Args:
project_id (string): The ID of the project against which to make the request.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.BeginTransactionResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = datastore_pb2.BeginTransactionRequest(project_id=project_id)
return self._begin_transaction(request, options)
def commit(self,
project_id,
mode,
mutations,
transaction=None,
options=None):
"""
Commits a transaction, optionally creating, deleting or modifying some
entities.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> from google.cloud.gapic.datastore.v1 import enums
>>> from google.cloud.proto.datastore.v1 import datastore_pb2
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
>>> mutations = []
>>> response = api.commit(project_id, mode, mutations)
Args:
project_id (string): The ID of the project against which to make the request.
mode (enum :class:`google.cloud.gapic.datastore.v1.enums.CommitRequest.Mode`): The type of commit to perform. Defaults to ``TRANSACTIONAL``.
transaction (bytes): The identifier of the transaction associated with the commit. A
transaction identifier is returned by a call to
``Datastore.BeginTransaction``.
mutations (list[:class:`google.cloud.proto.datastore.v1.datastore_pb2.Mutation`]): The mutations to perform.
When mode is ``TRANSACTIONAL``, mutations affecting a single entity are
applied in order. The following sequences of mutations affecting a single
entity are not permitted in a single ``Commit`` request:
- ``insert`` followed by ``insert``
- ``update`` followed by ``insert``
- ``upsert`` followed by ``insert``
- ``delete`` followed by ``update``
When mode is ``NON_TRANSACTIONAL``, no two mutations may affect a single
entity.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.CommitResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
oneof.check_oneof(transaction=transaction, )
# Create the request object.
request = datastore_pb2.CommitRequest(
project_id=project_id,
mode=mode,
mutations=mutations,
transaction=transaction)
return self._commit(request, options)
def rollback(self, project_id, transaction, options=None):
"""
Rolls back a transaction.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> transaction = b''
>>> response = api.rollback(project_id, transaction)
Args:
project_id (string): The ID of the project against which to make the request.
transaction (bytes): The transaction identifier, returned by a call to
``Datastore.BeginTransaction``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.RollbackResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = datastore_pb2.RollbackRequest(
project_id=project_id, transaction=transaction)
return self._rollback(request, options)
def allocate_ids(self, project_id, keys, options=None):
"""
Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
Example:
>>> from google.cloud.gapic.datastore.v1 import datastore_client
>>> from google.cloud.proto.datastore.v1 import entity_pb2
>>> api = datastore_client.DatastoreClient()
>>> project_id = ''
>>> keys = []
>>> response = api.allocate_ids(project_id, keys)
Args:
project_id (string): The ID of the project against which to make the request.
keys (list[:class:`google.cloud.proto.datastore.v1.entity_pb2.Key`]): A list of keys with incomplete key paths for which to allocate IDs.
No key may be reserved/read-only.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.datastore.v1.datastore_pb2.AllocateIdsResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = datastore_pb2.AllocateIdsRequest(
project_id=project_id, keys=keys)
return self._allocate_ids(request, options)
| {
"content_hash": "b5925c5a761e6e164e3efd6be75ae7ed",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 158,
"avg_line_length": 43.13246753246753,
"alnum_prop": 0.6232686980609419,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "f3bce200828d5f94386eb503fcb37e647622e54b",
"size": "17654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/gapic/datastore/v1/datastore_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
'''
Tashlin Reddy
August 2020
Fuzzy Match strings in Columns of CSV file
'''
#read in dependencies
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz
#read in csv form
form = pd.read_csv('form.csv')
#form = pd.read_csv('https...')
#iterate over each column and check if there is a list to fuzzy match
df_new = pd.DataFrame()
for col_num in range(0,form.shape[1]):
title = form.columns[col_num]
try:
if form.iloc[:,col_num].str.contains(',').any() == True:
matched_series = fuzzy_match_col(col_num)
df_new[title] = matched_series
else:
df_new[title] = form.iloc[:,col_num]
except:
df_new[title] = form.iloc[:,col_num]
#export new matched csv file
df_new.to_csv("fuzz_matched.csv")
#fuzzy match function
def fuzzy_match_col(col_num):
keyword_col = form.iloc[:,col_num]
keywords_lst = keyword_col[0].split(',')
new_lst = []
for keyword_lst in keyword_col:
new_wrds = []
try:
keywords = keyword_lst.split(',')
for word in keywords:
word = word.strip()
word = word.replace(" ", "_")
word = word.lower().capitalize()
if word != '':
new_wrds.append(word)
except:
new_wrds.append('NAN')
new_lst.append(new_wrds)
for i in range(1, len(new_lst)):
for word_list in new_lst[:i]:
for word in word_list:
for match in new_lst[i]:
score = fuzz.ratio(word, match)
if score > 70 and score !=100:
#print(word,'=', match, " score:", score)
new_lst[i] = [w.replace(match, word) for w in new_lst[i]]
matched_words = [', '.join(element) for element in new_lst]
cleaned_words = [w.replace('_', ' ') for w in matched_words]
return cleaned_words
| {
"content_hash": "3eb5e53800a770ef702ef4e468849c23",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 26.84931506849315,
"alnum_prop": 0.5413265306122449,
"repo_name": "TheBrane/sodi-data-acquisition",
"id": "66c7d3282d85ba127113c264105300424936e998",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_matching/fuzzy_match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "331821"
},
{
"name": "Python",
"bytes": "920049"
}
],
"symlink_target": ""
} |
"""Tests for moma_option."""
from unittest import mock
from absl.testing import absltest
import dm_env
from dm_robotics import agentflow as af
from dm_robotics.moma import moma_option
import numpy as np
class MomaOptionTest(absltest.TestCase):
def _create_agentflow_lambda_option(self, on_step_func):
return af.LambdaOption(
delegate=af.FixedOp(
action=np.array([], dtype=np.float64),
num_steps=1,
name='test_option'),
on_step_func=on_step_func)
def test_sequence_of_options(self):
"""This tests a sequence of MomaOption and af.coreOption."""
options_stepped = []
def build_option_name_appender(option_name):
nonlocal options_stepped
def append_option_name(timestep):
nonlocal options_stepped
# When af.Sequence switches from one option to another, the
# previous option gets one more "last" timestep sent to it.
# Ignore this in our counting, since we just want to ensure
# that both options are stepped.
if not timestep.last():
options_stepped.append(option_name)
return append_option_name
first_option = moma_option.MomaOption(
physics_getter=mock.MagicMock(),
effectors=[],
delegate=self._create_agentflow_lambda_option(
build_option_name_appender('first')))
second_option = self._create_agentflow_lambda_option(
build_option_name_appender('second'))
option_to_test = moma_option.MomaOption(
physics_getter=mock.MagicMock(),
effectors=[],
delegate=af.Sequence([
first_option,
second_option,
], allow_stepping_after_terminal=False))
irrelevant_timestep_params = {
'reward': 0,
'discount': 0,
'observation': np.array([], dtype=np.float64)
}
option_to_test.step(dm_env.TimeStep(dm_env.StepType.FIRST,
**irrelevant_timestep_params))
self.assertEqual(options_stepped, ['first'])
option_to_test.step(dm_env.TimeStep(dm_env.StepType.MID,
**irrelevant_timestep_params))
self.assertEqual(options_stepped, ['first', 'second'])
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "6035090a1f54809f802bbce24292bc68",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 70,
"avg_line_length": 31.95774647887324,
"alnum_prop": 0.628470691934773,
"repo_name": "deepmind/dm_robotics",
"id": "63b074da21412de121d31be2104016da62bac69b",
"size": "2865",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/moma/moma_option_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "479450"
},
{
"name": "CMake",
"bytes": "34173"
},
{
"name": "Jupyter Notebook",
"bytes": "106284"
},
{
"name": "Python",
"bytes": "1413203"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
} |
from u2flib_host.constants import APDU_OK, INS_GET_VERSION
from u2flib_host.yubicommon.compat import int2byte
from u2flib_host import exc
import struct
class U2FDevice(object):
"""
A handle to a U2F device.
device.open() needs to be called prior to using the device, and
device.close() should be called when the device is no longer needed, to
ensure that any held resources are released.
As an aternative, the 'with' statement can be used:
with device as dev:
dev.send_apdu(...)
"""
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def open(self):
"""
Opens the device for use.
"""
pass
def close(self):
"""
Closes the device, making it available for use by others.
"""
pass
def get_supported_versions(self):
"""
Gets a list of supported U2F versions from the device.
"""
if not hasattr(self, '_versions'):
try:
self._versions = [self.send_apdu(INS_GET_VERSION).decode()]
except exc.APDUError as e:
# v0 didn't support the instruction.
self._versions = ['v0'] if e.code == 0x6d00 else []
return self._versions
def _do_send_apdu(self, apdu_data):
"""
Sends an APDU to the device, and returns the response.
"""
# Subclasses should implement this.
raise NotImplementedError('_do_send_apdu not implemented!')
def send_apdu(self, ins, p1=0, p2=0, data=b''):
"""
Sends an APDU to the device, and waits for a response.
"""
if data is None:
data = b''
elif isinstance(data, int):
data = int2byte(data)
size = len(data)
l0 = size >> 16 & 0xff
l1 = size >> 8 & 0xff
l2 = size & 0xff
apdu_data = struct.pack('B B B B B B B %is B B' % size,
0, ins, p1, p2, l0, l1, l2, data, 0x00, 0x00)
try:
resp = self._do_send_apdu(apdu_data)
except Exception as e:
# TODO Use six.reraise if/when Six becomes an agreed dependency.
raise exc.DeviceError(e)
status = struct.unpack('>H', resp[-2:])[0]
data = resp[:-2]
if status != APDU_OK:
raise exc.APDUError(status)
return data
| {
"content_hash": "33dea96bd5f7bd2d25ea5e17337217dd",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 77,
"avg_line_length": 29.197674418604652,
"alnum_prop": 0.5448028673835126,
"repo_name": "Yubico/python-u2flib-host",
"id": "183d96bb0104aab14c5af50810e09d38da3ce845",
"size": "3883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "u2flib_host/device.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "70663"
}
],
"symlink_target": ""
} |
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Fieldset, Layout, Submit, Div
from django import forms
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from core.widgets import SimpleMDEWidget
from .models import Experiment, Condition
class ExperimentCreateForm(forms.ModelForm):
class Meta:
model = Experiment
fields = ['name', 'description']
widgets = {
'description': SimpleMDEWidget(),
}
@cached_property
def helper(self):
helper = FormHelper()
helper.form_id = 'new-experiment-form'
helper.layout = Layout(
Fieldset(
'',
'name',
'description',
),
)
helper.include_media = False
return helper
| {
"content_hash": "6ef564720372c7c420c66bc6796cb9ce",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 61,
"avg_line_length": 27.606060606060606,
"alnum_prop": 0.6322722283205269,
"repo_name": "ccwang002/biocloud-server-kai",
"id": "decfdff235dbfdea627328ad6321120ff3f522d6",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/experiments/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4414"
},
{
"name": "HTML",
"bytes": "32093"
},
{
"name": "JavaScript",
"bytes": "11108"
},
{
"name": "Python",
"bytes": "162060"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
class UnionProducts(object):
"""Here, products for a target are the ordered union of the products for its transitive deps."""
def __init__(self):
# A map of target to OrderedSet of product members.
self._products_by_target = defaultdict(OrderedSet)
def add_for_target(self, target, products):
"""Updates the products for a particular target, adding to existing entries."""
self._products_by_target[target].update(products)
def add_for_targets(self, targets, products):
"""Updates the products for the given targets, adding to existing entries."""
# FIXME: This is a temporary helper for use until the classpath has been split.
for target in targets:
self.add_for_target(target, products)
def get_for_target(self, target):
"""Gets the transitive product deps for the given target."""
return self.get_for_targets([target])
def get_for_targets(self, targets):
"""Gets the transitive product deps for the given targets, in order."""
products = OrderedSet()
visited = set()
# Walk the targets transitively to aggregate their products. We do a breadth-first
for target in targets:
for dep in target.closure(bfs=True):
if dep not in visited:
products.update(self._products_by_target[dep])
visited.add(dep)
return products
def target_for_product(self, product):
"""Looks up the target key for a product.
:param product: The product to search for
:return: None if there is no target for the product
"""
for target, products in self._products_by_target.items():
if product in products:
return target
return None
def __str__(self):
return "UnionProducts({})".format(self._products_by_target)
class RootedProducts(object):
"""File products of a build that have a concept of a 'root' directory.
E.g., classfiles, under a root package directory."""
def __init__(self, root):
self._root = root
self._rel_paths = OrderedSet()
def add_abs_paths(self, abs_paths):
for abs_path in abs_paths:
if not abs_path.startswith(self._root):
raise Exception('{} is not under {}'.format(abs_path, self._root))
self._rel_paths.add(os.path.relpath(abs_path, self._root))
def add_rel_paths(self, rel_paths):
self._rel_paths.update(rel_paths)
def root(self):
return self._root
def rel_paths(self):
return self._rel_paths
def abs_paths(self):
for relpath in self._rel_paths:
yield os.path.join(self._root, relpath)
def __bool__(self):
return self._rel_paths
__nonzero__ = __bool__
class MultipleRootedProducts(object):
"""A product consisting of multiple roots, with associated file products."""
def __init__(self):
self._rooted_products_by_root = {}
def add_rel_paths(self, root, rel_paths):
self._get_products_for_root(root).add_rel_paths(rel_paths)
def add_abs_paths(self, root, abs_paths):
self._get_products_for_root(root).add_abs_paths(abs_paths)
def rel_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.rel_paths()
def abs_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.abs_paths()
def _get_products_for_root(self, root):
return self._rooted_products_by_root.setdefault(root, RootedProducts(root))
def __bool__(self):
"""Return True if any of the roots contains products"""
for root, products in self.rel_paths():
if products:
return True
return False
__nonzero__ = __bool__
def __str__(self):
return "MultipleRootedProducts({})".format(self._rooted_products_by_root)
class Products(object):
"""An out-of-band 'dropbox' where tasks can place build product information for later tasks to use.
Historically, the only type of product was a ProductMapping. However this had some issues, as not
all products fit into the (basedir, [files-under-basedir]) paradigm. Also, ProductMapping docs
and varnames refer to targets, and implicitly expect the mappings to be keyed by a target, however
we sometimes also need to map sources to products.
So in practice we ended up abusing this in several ways:
1) Using fake basedirs when we didn't have a basedir concept.
2) Using objects other than strings as 'product paths' when we had a need to.
3) Using things other than targets as keys.
Right now this class is in an intermediate stage, as we transition to a more robust Products concept.
The abuses have been switched to use 'data_products' (see below) which is just a dictionary
of product type (e.g., 'classes_by_target') to arbitrary payload. That payload can be anything,
but the MultipleRootedProducts class is useful for products that do happen to fit into the
(basedir, [files-under-basedir]) paradigm.
The long-term future of Products is TBD. But we do want to make it easier to reason about
which tasks produce which products and which tasks consume them. Currently it's quite difficult
to match up 'requires' calls to the producers of those requirements, especially when the 'typename'
is in a variable, not a literal.
"""
class ProductMapping(object):
"""Maps products of a given type by target. Each product is a map from basedir to a list of
files in that dir.
"""
def __init__(self, typename):
self.typename = typename
self.by_target = defaultdict(lambda: defaultdict(list))
def empty(self):
return len(self.by_target) == 0
def add(self, target, basedir, product_paths=None):
"""
Adds a mapping of products for the given target, basedir pair.
If product_paths are specified, these will over-write any existing mapping for this target.
If product_paths is omitted, the current mutable list of mapped products for this target
and basedir is returned for appending.
"""
if product_paths is not None:
self.by_target[target][basedir].extend(product_paths)
else:
return self.by_target[target][basedir]
def has(self, target):
"""Returns whether we have a mapping for the specified target."""
return target in self.by_target
def get(self, target):
"""
Returns the product mapping for the given target as a tuple of (basedir, products list).
Can return None if there is no mapping for the given target.
"""
return self.by_target.get(target)
def __getitem__(self, target):
"""
Support for subscripting into this mapping. Returns the product mapping for the given target
as a map of <basedir> -> <products list>.
If no mapping exists, returns an empty map whose values default to empty lists. So you
can use the result without checking for None.
"""
return self.by_target[target]
def itermappings(self):
"""
Returns an iterable over all pairs (target, product) in this mapping.
Each product is itself a map of <basedir> -> <products list>.
"""
return self.by_target.iteritems()
def keys_for(self, basedir, product):
"""Returns the set of keys the given mapped product is registered under."""
keys = set()
for key, mappings in self.by_target.items():
for mapped in mappings.get(basedir, []):
if product == mapped:
keys.add(key)
break
return keys
def __repr__(self):
return 'ProductMapping({}) {{\n {}\n}}'.format(self.typename, '\n '.join(
'{} => {}\n {}'.format(str(target), basedir, outputs)
for target, outputs_by_basedir in self.by_target.items()
for basedir, outputs in outputs_by_basedir.items()))
def __bool__(self):
return not self.empty()
__nonzero__ = __bool__
def __init__(self):
self.products = {} # type -> ProductMapping instance.
self.predicates_for_type = defaultdict(list)
self.data_products = {} # type -> arbitrary object.
self.required_data_products = set()
def require(self, typename, predicate=None):
"""Registers a requirement that file products of the given type by mapped.
If target predicates are supplied, only targets matching at least one of the predicates are
mapped.
"""
# TODO(John Sirois): This is a broken API. If one client does a require with no predicate and
# another requires with a predicate, the producer will only produce for the latter. The former
# presumably intended to have all products of this type mapped. Kill the predicate portion of
# the api by moving to the new tuple-based engine where all tasks require data for a specific
# set of targets.
self.predicates_for_type[typename].append(predicate or (lambda target: False))
def isrequired(self, typename):
"""Returns a predicate selecting targets required for the given type if mappings are required.
Otherwise returns None.
"""
predicates = self.predicates_for_type[typename]
if not predicates:
return None
def combine(first, second):
return lambda target: first(target) or second(target)
return reduce(combine, predicates, lambda target: False)
def get(self, typename):
"""Returns a ProductMapping for the given type name."""
return self.products.setdefault(typename, Products.ProductMapping(typename))
def require_data(self, typename):
""" Registers a requirement that data produced by tasks is required.
typename: the name of a data product that should be generated.
"""
self.required_data_products.add(typename)
def is_required_data(self, typename):
""" Checks if a particular data product is required by any tasks."""
return typename in self.required_data_products
def safe_create_data(self, typename, init_func):
"""Ensures that a data item is created if it doesn't already exist."""
# Basically just an alias for readability.
self.get_data(typename, init_func)
def get_data(self, typename, init_func=None):
""" Returns a data product.
If the product isn't found, returns None, unless init_func is set, in which case the product's
value is set to the return value of init_func(), and returned."""
if typename not in self.data_products:
if not init_func:
return None
self.data_products[typename] = init_func()
return self.data_products.get(typename)
| {
"content_hash": "e4dd65f9da806c09f1833f666ef79609",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 103,
"avg_line_length": 37.49473684210526,
"alnum_prop": 0.683230394909227,
"repo_name": "pgroudas/pants",
"id": "431d398fbf0a8bb9e37c5e581225a59f054b0fc9",
"size": "10833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/goal/products.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "10984"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68090"
},
{
"name": "Java",
"bytes": "297674"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "2868194"
},
{
"name": "Scala",
"bytes": "105948"
},
{
"name": "Shell",
"bytes": "39579"
},
{
"name": "Thrift",
"bytes": "2824"
}
],
"symlink_target": ""
} |
import sys
import MySQLdb
def print_scores(cur):
cur.execute("SELECT name, score FROM leaderboard ORDER BY score DESC;")
print "Num high scores: %d" % cur.rowcount
for (name, score) in cur:
print "%s, %d" % (name, int(score))
print "Content-type: text/plain"
print ""
try:
db = MySQLdb.connect(host="",
user="",
db="",
passwd="")
print_scores(db.cursor())
db.close()
except:
print "Fail! Hit the fail button!"
print sys.exc_info()
| {
"content_hash": "dfdd50a248a3c7de503c697ae3339580",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 22.782608695652176,
"alnum_prop": 0.5687022900763359,
"repo_name": "Matt5sean3/moon-defender",
"id": "0ac49b3b4d6e8f624d1b072b72c6914ca92b71cd",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/getscores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "453"
},
{
"name": "HTML",
"bytes": "1663"
},
{
"name": "JavaScript",
"bytes": "100191"
},
{
"name": "Python",
"bytes": "1159"
}
],
"symlink_target": ""
} |
""":mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
.. moduleauthor:: Pierre Gerard-Marchant
"""
# We should make sure that no field is called '_mask','mask','_fieldmask',
# or whatever restricted keywords. An idea would be to no bother in the
# first place, and then rename the invalid fields with a trailing
# underscore. Maybe we could just overload the parser function ?
import warnings
import numpy as np
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
from numpy.core.records import (
fromarrays as recfromarrays, fromrecords as recfromrecords
)
_byteorderconv = np.core.records._byteorderconv
import numpy.ma as ma
from numpy.ma import (
MAError, MaskedArray, masked, nomask, masked_array, getdata,
getmaskarray, filled
)
_check_fill_value = ma.core._check_fill_value
__all__ = [
'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError(f'illegal input names {names!r}')
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray):
"""
Attributes
----------
_data : recarray
Underlying data, as a record array.
_mask : boolean array
Mask of the records. A record is masked when all its fields are
masked.
_fieldmask : boolean recarray
Record array of booleans, setting the mask of each individual field
of each record.
_fill_value : record
Filling values for each field.
"""
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
@property
def _data(self):
"""
Returns the data as a recarray.
"""
return ndarray.view(self, recarray)
@property
def _fieldmask(self):
"""
Alias to mask.
"""
return self._mask
def __len__(self):
"""
Returns the length
"""
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
# attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError) as e:
raise AttributeError(f'record array has no attribute {attr}') from e
# So far, so good
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.names is not None:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
tp_len = len(_mask.dtype)
hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"""
Sets the attribute attr to the value val.
"""
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except Exception:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
raise
else:
# Get the list of names
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError(f'record array has no attribute {attr}')
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""
Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`.
"""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field
if isinstance(indx, str):
# Make sure _sharedmask is True to propagate back to _fieldmask
# Don't use _set_mask, there are some copies being made that
# break propagation Don't force the mask to nomask, that wreaks
# easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements.
# First, the data.
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
def __setitem__(self, indx, value):
"""
Sets the given record to value.
"""
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, str):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"""
Calculates the string representation.
"""
if self.size > 1:
mstr = [f"({','.join([str(i) for i in s])})"
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return f"[{', '.join(mstr)}]"
else:
mstr = [f"{','.join([str(i) for i in s])}"
for s in zip([getattr(self, f) for f in self.dtype.names])]
return f"({', '.join(mstr)})"
def __repr__(self):
"""
Calculates the repr representation.
"""
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
def view(self, dtype=None, type=None):
"""
Returns a view of the mrecarray.
"""
# OK, basic copy-paste from MaskedArray.view.
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again.
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# of subclasses (eg, TimeSeriesRecords), so we'll force a type
# set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"""
Forces the mask to hard.
"""
self._hardmask = True
def soften_mask(self):
"""
Forces the mask to soft
"""
self._hardmask = False
def copy(self):
"""
Returns a copy of the masked record.
"""
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""
Return the data portion of the array as a list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to fill_value. If fill_value is None,
the corresponding entries in the output list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
def __getstate__(self):
"""Return the internal state of the masked array.
This is for pickling.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(),
self._mask.tobytes(),
self._fill_value,
)
return state
def __setstate__(self, state):
"""
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
def __reduce__(self):
"""
Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""
Build a new MaskedArray from the information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
###############################################################################
# Constructors #
###############################################################################
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif mask.ndim == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""
Tries to guess the dtypes of the str_ ndarray `arr`.
Guesses by testing element-wise conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test
is performed on the first line. An exception is raised if the file is
3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if arr.ndim == 2:
arr = arr[0]
elif arr.ndim > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop.
for f in arr:
try:
int(f)
except (ValueError, TypeError):
try:
float(f)
except (ValueError, TypeError):
try:
complex(f)
except (ValueError, TypeError):
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"""
Opens the file handle of file `fname`.
"""
# A file handle
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError(f"No such file: '{fname}'")
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""
Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
fname : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file.
ftext = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
_variables = masked_array([line.strip().split(delimitor) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
# Try to guess the dtype.
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor.
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask.
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array
Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
is None, the new field name is set to 'fi', where `i` is the number of
existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data.
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the existing field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
| {
"content_hash": "c90b20b0c75b6fc0c22d0f4721907be2",
"timestamp": "",
"source": "github",
"line_count": 770,
"max_line_length": 82,
"avg_line_length": 34.64805194805195,
"alnum_prop": 0.552569436635556,
"repo_name": "madphysicist/numpy",
"id": "70087632e9f62cd01943f3343e716dcf972b588f",
"size": "26679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/ma/mrecords.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4953507"
},
{
"name": "C++",
"bytes": "407987"
},
{
"name": "Fortran",
"bytes": "11108"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Objective-C",
"bytes": "427"
},
{
"name": "Python",
"bytes": "9132022"
},
{
"name": "Shell",
"bytes": "9438"
},
{
"name": "Smarty",
"bytes": "4068"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
} |
import datetime
import json
import os
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.sysinfo import SiteName
from ipf.step import Step
from .entity import *
#######################################################################################################################
class AdminDomainStep(Step):
def __init__(self):
Step.__init__(self)
self.description = "a domain containing a number of services"
self._acceptParameter("admin_domain",
"An AdminDomain as a dictionary. See AdminDomain.fromJson() for the keys and values.",
True)
self.time_out = 5
self.produces = [AdminDomain]
def run(self):
try:
doc = self.params["admin_domain"]
except KeyError:
raise StepError("admin_domain not specified")
domain = AdminDomain()
domain.fromJson(doc)
self._output(domain)
#######################################################################################################################
class Domain(Entity):
DEFAULT_VALIDITY = 60*60*24 # seconds
def __init__(self):
Entity.__init__(self)
self.Description = None # string
self.WWW = None # URL
self.ContactID = [] # Contact
self.LocationID = None # Location
#######################################################################################################################
class DomainOgfJson(EntityOgfJson):
data_cls = Domain
def __init__(self, data):
EntityOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
if domain.Description is not None:
doc["Description"] = domain.Description
if domain.WWW is not None:
doc["WWW"] = domain.WWW
associations = {}
if len(domain.ContactID) > 0:
associations["ContactID"] = domain.ContactID
associations["LocationID"] = domain.LocationID
doc["Associations"] = associations
return doc
#######################################################################################################################
class AdminDomain(Domain):
def __init__(self):
Domain.__init__(self)
self.Distributed = None # geographically-distributed resources (boolean)
self.Owner = None # person/entity that owns the resources (string)
self.ServiceID = [] # services managed by this domain (id)
self.ChildDomainID = [] # this domain aggregates others (id)
self.ParentDomainID = None # this domain is part of another
self.ComputingServiceID = [] # (id)
self.StorageServiceID = [] # (id)
#######################################################################################################################
class AdminDomainOgfJson(DomainOgfJson):
data_cls = AdminDomain
def __init__(self, data):
DomainOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = DomainOgfJson.toJson(self)
# AdminDomain
if domain.Distributed is not None:
doc["Distributed"] = domain.Distributed
if domain.Owner is not None:
doc["Owner"] = domain.Owner
associations = {}
if len(domain.ServiceID) > 0:
associations["ServiceID"] = domain.ServiceID
if len(domain.ChildDomainID) > 0:
associations["ChildDomainID"] = domain.ChildDomainID
associations["ParentDomainID"] = domain.ParentDomainID
if len(domain.ComputingServiceID) > 0:
associations["ComputingServiceID"] = domain.ComputingServiceID
if len(domain.StorageServiceID) > 0:
associations["StorageServiceID"] = domain.StorageServiceID
doc["Associations"] = associations
return doc
#######################################################################################################################
| {
"content_hash": "d416e0b27634acd10d7f548acb901f7a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 119,
"avg_line_length": 34.60162601626016,
"alnum_prop": 0.5044642857142857,
"repo_name": "ericblau/ipf-xsede",
"id": "7d9e5b21023994547330573e87f4aade99b1c923",
"size": "5458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipf/glue2/domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1079"
},
{
"name": "Python",
"bytes": "735694"
},
{
"name": "Shell",
"bytes": "15516"
}
],
"symlink_target": ""
} |
from google.cloud import retail_v2beta
def sample_get_product():
# Create a client
client = retail_v2beta.ProductServiceClient()
# Initialize request argument(s)
request = retail_v2beta.GetProductRequest(
name="name_value",
)
# Make the request
response = client.get_product(request=request)
# Handle the response
print(response)
# [END retail_v2beta_generated_ProductService_GetProduct_sync]
| {
"content_hash": "d85714ae2b15e47b6d075e8687fd943d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 23.31578947368421,
"alnum_prop": 0.7042889390519187,
"repo_name": "googleapis/python-retail",
"id": "d6d31565b3c9cc290364d19e4eb1fd2b24e3e3ed",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/retail_v2beta_generated_product_service_get_product_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import datetime
import logging
import httplib
import httplib as http # TODO: Inconsistent usage of aliased import
from dateutil.parser import parse as parse_date
from flask import request
import markupsafe
from modularodm.exceptions import ValidationError, NoResultsFound, MultipleResultsFound
from modularodm import Q
from framework import sentry
from framework.auth import utils as auth_utils
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.exceptions import ChangePasswordError
from framework.auth.views import send_confirm_email
from framework.auth.signals import user_merged
from framework.exceptions import HTTPError, PermissionsError
from framework.flask import redirect # VOL-aware redirect
from framework.status import push_status_message
from website import mails
from website import mailchimp_utils
from website import settings
from website.project.model import Node
from website.project.utils import PROJECT_QUERY, TOP_LEVEL_PROJECT_QUERY
from website.models import ApiOAuth2Application, ApiOAuth2PersonalToken, User
from website.oauth.utils import get_available_scopes
from website.profile import utils as profile_utils
from website.util.time import throttle_period_expired
from website.util import api_v2_url, web_url_for, paths
from website.util.sanitize import escape_html
from website.util.sanitize import strip_html
from website.views import _render_nodes
from website.addons.base import utils as addon_utils
logger = logging.getLogger(__name__)
def get_public_projects(uid=None, user=None):
user = user or User.load(uid)
# In future redesign, should be limited for users with many projects / components
nodes = Node.find_for_user(
user,
subquery=(
TOP_LEVEL_PROJECT_QUERY &
Q('is_public', 'eq', True)
)
)
return _render_nodes(list(nodes))
def get_public_components(uid=None, user=None):
user = user or User.load(uid)
# TODO: This should use User.visible_contributor_to?
# In future redesign, should be limited for users with many projects / components
nodes = list(
Node.find_for_user(
user,
subquery=(
PROJECT_QUERY &
Q('parent_node', 'ne', None) &
Q('is_public', 'eq', True)
)
)
)
return _render_nodes(nodes, show_path=True)
@must_be_logged_in
def current_user_gravatar(size=None, **kwargs):
user_id = kwargs['auth'].user._id
return get_gravatar(user_id, size=size)
def get_gravatar(uid, size=None):
return {'gravatar_url': profile_utils.get_gravatar(User.load(uid), size=size)}
def date_or_none(date):
try:
return parse_date(date)
except Exception as error:
logger.exception(error)
return None
def validate_user(data, user):
"""Check if the user in request is the user who log in """
if 'id' in data:
if data['id'] != user._id:
raise HTTPError(httplib.FORBIDDEN)
else:
# raise an error if request doesn't have user id
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': '"id" is required'})
@must_be_logged_in
def resend_confirmation(auth):
user = auth.user
data = request.get_json()
validate_user(data, user)
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another confirmation email.'})
try:
primary = data['email']['primary']
confirmed = data['email']['confirmed']
address = data['email']['address'].strip().lower()
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if primary or confirmed:
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Cannnot resend confirmation for confirmed emails'})
user.add_unconfirmed_email(address)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
user.email_last_sent = datetime.datetime.utcnow()
user.save()
return _profile_view(user, is_profile=True)
@must_be_logged_in
def update_user(auth):
"""Update the logged-in user's profile."""
# trust the decorator to handle auth
user = auth.user
data = request.get_json()
validate_user(data, user)
# TODO: Expand this to support other user attributes
##########
# Emails #
##########
if 'emails' in data:
emails_list = [x['address'].strip().lower() for x in data['emails']]
if user.username.strip().lower() not in emails_list:
raise HTTPError(httplib.FORBIDDEN)
available_emails = [
each.strip().lower() for each in
user.emails + user.unconfirmed_emails
]
# removals
removed_emails = [
each.strip().lower()
for each in available_emails
if each not in emails_list
]
if user.username.strip().lower() in removed_emails:
raise HTTPError(httplib.FORBIDDEN)
for address in removed_emails:
if address in user.emails:
try:
user.remove_email(address)
except PermissionsError as e:
raise HTTPError(httplib.FORBIDDEN, e.message)
user.remove_unconfirmed_email(address)
# additions
added_emails = [
each['address'].strip().lower()
for each in data['emails']
if each['address'].strip().lower() not in available_emails
]
for address in added_emails:
try:
user.add_unconfirmed_email(address)
except (ValidationError, ValueError):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long="Invalid Email")
)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
############
# Username #
############
# get the first email that is set to primary and has an address
primary_email = next(
(
each for each in data['emails']
# email is primary
if each.get('primary') and each.get('confirmed')
# an address is specified (can't trust those sneaky users!)
and each.get('address')
)
)
if primary_email:
primary_email_address = primary_email['address'].strip().lower()
if primary_email_address not in [each.strip().lower() for each in user.emails]:
raise HTTPError(httplib.FORBIDDEN)
username = primary_email_address
# make sure the new username has already been confirmed
if username and username in user.emails and username != user.username:
mails.send_mail(user.username,
mails.PRIMARY_EMAIL_CHANGED,
user=user,
new_address=username)
# Remove old primary email from subscribed mailing lists
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username)
user.username = username
###################
# Timezone/Locale #
###################
if 'locale' in data:
if data['locale']:
locale = data['locale'].replace('-', '_')
user.locale = locale
# TODO: Refactor to something like:
# user.timezone = data.get('timezone', user.timezone)
if 'timezone' in data:
if data['timezone']:
user.timezone = data['timezone']
user.save()
# Update subscribed mailing lists with new primary email
# TODO: move to user.save()
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
return _profile_view(user, is_profile=True)
def _profile_view(profile, is_profile=False):
# TODO: Fix circular import
from website.addons.badges.util import get_sorted_user_badges
if profile and profile.is_disabled:
raise HTTPError(http.GONE)
if 'badges' in settings.ADDONS_REQUESTED:
badge_assertions = get_sorted_user_badges(profile),
badges = _get_user_created_badges(profile)
else:
# NOTE: While badges, are unused, 'assertions' and 'badges' can be
# empty lists.
badge_assertions = []
badges = []
if profile:
profile_user_data = profile_utils.serialize_user(profile, full=True, is_profile=is_profile)
return {
'profile': profile_user_data,
'assertions': badge_assertions,
'badges': badges,
'user': {
'is_profile': is_profile,
'can_edit': None, # necessary for rendering nodes
'permissions': [], # necessary for rendering nodes
},
}
raise HTTPError(http.NOT_FOUND)
def _get_user_created_badges(user):
from website.addons.badges.model import Badge
addon = user.get_addon('badges')
if addon:
return [badge for badge in Badge.find(Q('creator', 'eq', addon._id)) if not badge.is_system_badge]
return []
@must_be_logged_in
def profile_view(auth):
return _profile_view(auth.user, True)
@collect_auth
def profile_view_id(uid, auth):
user = User.load(uid)
is_profile = auth and auth.user == user
return _profile_view(user, is_profile)
@must_be_logged_in
def edit_profile(**kwargs):
# NOTE: This method is deprecated. Use update_user instead.
# TODO: Remove this view
user = kwargs['auth'].user
form = request.form
ret = {'response': 'success'}
if form.get('name') == 'fullname' and form.get('value', '').strip():
user.fullname = strip_html(form['value']).strip()
user.save()
ret['name'] = user.fullname
return ret
def get_profile_summary(user_id, formatter='long'):
user = User.load(user_id)
return user.get_summary(formatter)
@must_be_logged_in
def user_profile(auth, **kwargs):
user = auth.user
return {
'user_id': user._id,
'user_api_url': user.api_url,
}
@must_be_logged_in
def user_account(auth, **kwargs):
user = auth.user
user_addons = addon_utils.get_addons_by_config_type('user', user)
return {
'user_id': user._id,
'addons': user_addons,
'addons_js': collect_user_config_js([addon for addon in settings.ADDONS_AVAILABLE if 'user' in addon.configs]),
'addons_css': []
}
@must_be_logged_in
def user_account_password(auth, **kwargs):
user = auth.user
old_password = request.form.get('old_password', None)
new_password = request.form.get('new_password', None)
confirm_password = request.form.get('confirm_password', None)
try:
user.change_password(old_password, new_password, confirm_password)
user.save()
except ChangePasswordError as error:
for m in error.messages:
push_status_message(m, kind='warning', trust=False)
else:
push_status_message('Password updated successfully.', kind='success', trust=False)
return redirect(web_url_for('user_account'))
@must_be_logged_in
def user_addons(auth, **kwargs):
user = auth.user
ret = {
'addon_settings': addon_utils.get_addons_by_config_type('accounts', user),
}
accounts_addons = [addon for addon in settings.ADDONS_AVAILABLE if 'accounts' in addon.configs]
ret.update({
'addon_enabled_settings': [addon.short_name for addon in accounts_addons],
'addons_js': collect_user_config_js(accounts_addons),
'addon_capabilities': settings.ADDON_CAPABILITIES,
'addons_css': []
})
return ret
@must_be_logged_in
def user_notifications(auth, **kwargs):
"""Get subscribe data from user"""
return {
'mailing_lists': dict(auth.user.mailchimp_mailing_lists.items() + auth.user.osf_mailing_lists.items())
}
@must_be_logged_in
def oauth_application_list(auth, **kwargs):
"""Return app creation page with list of known apps. API is responsible for tying list to current user."""
app_list_url = api_v2_url("applications/")
return {
"app_list_url": app_list_url
}
@must_be_logged_in
def oauth_application_register(auth, **kwargs):
"""Register an API application: blank form view"""
app_list_url = api_v2_url("applications/") # POST request to this url
return {"app_list_url": app_list_url,
"app_detail_url": ''}
@must_be_logged_in
def oauth_application_detail(auth, **kwargs):
"""Show detail for a single OAuth application"""
client_id = kwargs.get('client_id')
# The client ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
#
record = ApiOAuth2Application.find_one(Q('client_id', 'eq', client_id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
app_detail_url = api_v2_url("applications/{}/".format(client_id)) # Send request to this URL
return {"app_list_url": '',
"app_detail_url": app_detail_url}
@must_be_logged_in
def personal_access_token_list(auth, **kwargs):
"""Return token creation page with list of known tokens. API is responsible for tying list to current user."""
token_list_url = api_v2_url("tokens/")
return {
"token_list_url": token_list_url
}
@must_be_logged_in
def personal_access_token_register(auth, **kwargs):
"""Register a personal access token: blank form view"""
token_list_url = api_v2_url("tokens/") # POST request to this url
return {"token_list_url": token_list_url,
"token_detail_url": '',
"scope_options": get_available_scopes()}
@must_be_logged_in
def personal_access_token_detail(auth, **kwargs):
"""Show detail for a single personal access token"""
_id = kwargs.get('_id')
# The ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
record = ApiOAuth2PersonalToken.find_one(Q('_id', 'eq', _id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
token_detail_url = api_v2_url("tokens/{}/".format(_id)) # Send request to this URL
return {"token_list_url": '',
"token_detail_url": token_detail_url,
"scope_options": get_available_scopes()}
def collect_user_config_js(addon_configs):
"""Collect webpack bundles for each of the addons' user-cfg.js modules. Return
the URLs for each of the JS modules to be included on the user addons config page.
:param list addons: List of user's addon config records.
"""
js_modules = []
for addon_config in addon_configs:
js_path = paths.resolve_addon_path(addon_config, 'user-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_be_logged_in
def user_choose_addons(**kwargs):
auth = kwargs['auth']
json_data = escape_html(request.get_json())
auth.user.config_addons(json_data, auth)
@must_be_logged_in
def user_choose_mailing_lists(auth, **kwargs):
""" Update mailing list subscription on user model and in mailchimp
Example input:
{
"Open Science Framework General": true,
...
}
"""
user = auth.user
json_data = escape_html(request.get_json())
if json_data:
for list_name, subscribe in json_data.items():
# TO DO: change this to take in any potential non-mailchimp, something like try: update_subscription(), except IndexNotFound: update_mailchimp_subscription()
if list_name == settings.OSF_HELP_LIST:
update_osf_help_mails_subscription(user=user, subscribe=subscribe)
else:
update_mailchimp_subscription(user, list_name, subscribe)
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long="Must provide a dictionary of the format {'mailing list name': Boolean}")
)
user.save()
all_mailing_lists = {}
all_mailing_lists.update(user.mailchimp_mailing_lists)
all_mailing_lists.update(user.osf_mailing_lists)
return {'message': 'Successfully updated mailing lists', 'result': all_mailing_lists}, 200
@user_merged.connect
def update_mailchimp_subscription(user, list_name, subscription, send_goodbye=True):
""" Update mailing list subscription in mailchimp.
:param obj user: current user
:param str list_name: mailing list
:param boolean subscription: true if user is subscribed
"""
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
else:
try:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username, send_goodbye=send_goodbye)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
raise HTTPError(http.BAD_REQUEST,
data=dict(message_short="ListNotSubscribedError",
message_long="The user is already unsubscribed from this mailing list.",
error_type="not_subscribed")
)
def mailchimp_get_endpoint(**kwargs):
"""Endpoint that the mailchimp webhook hits to check that the OSF is responding"""
return {}, http.OK
def sync_data_from_mailchimp(**kwargs):
"""Endpoint that the mailchimp webhook sends its data to"""
key = request.args.get('key')
if key == settings.MAILCHIMP_WEBHOOK_SECRET_KEY:
r = request
action = r.values['type']
list_name = mailchimp_utils.get_list_name_from_id(list_id=r.values['data[list_id]'])
username = r.values['data[email]']
try:
user = User.find_one(Q('username', 'eq', username))
except NoResultsFound:
sentry.log_exception()
sentry.log_message("A user with this username does not exist.")
raise HTTPError(404, data=dict(message_short='User not found',
message_long='A user with this username does not exist'))
if action == 'unsubscribe':
user.mailchimp_mailing_lists[list_name] = False
user.save()
elif action == 'subscribe':
user.mailchimp_mailing_lists[list_name] = True
user.save()
else:
# TODO: get tests to pass with sentry logging
# sentry.log_exception()
# sentry.log_message("Unauthorized request to the OSF.")
raise HTTPError(http.UNAUTHORIZED)
@must_be_logged_in
def impute_names(**kwargs):
name = request.args.get('name', '')
return auth_utils.impute_names(name)
def update_osf_help_mails_subscription(user, subscribe):
user.osf_mailing_lists[settings.OSF_HELP_LIST] = subscribe
user.save()
@must_be_logged_in
def serialize_names(**kwargs):
user = kwargs['auth'].user
return {
'full': user.fullname,
'given': user.given_name,
'middle': user.middle_names,
'family': user.family_name,
'suffix': user.suffix,
}
def get_target_user(auth, uid=None):
target = User.load(uid) if uid else auth.user
if target is None:
raise HTTPError(http.NOT_FOUND)
return target
def fmt_date_or_none(date, fmt='%Y-%m-%d'):
if date:
try:
return date.strftime(fmt)
except ValueError:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Year entered must be after 1900')
)
return None
def append_editable(data, auth, uid=None):
target = get_target_user(auth, uid)
data['editable'] = auth.user == target
def serialize_social_addons(user):
ret = {}
for user_settings in user.get_addons():
config = user_settings.config
if user_settings.public_id:
ret[config.short_name] = user_settings.public_id
return ret
@collect_auth
def serialize_social(auth, uid=None, **kwargs):
target = get_target_user(auth, uid)
ret = target.social
append_editable(ret, auth, uid)
if ret['editable']:
ret['addons'] = serialize_social_addons(target)
return ret
def serialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing', False),
}
def serialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing', False),
}
def serialize_contents(field, func, auth, uid=None):
target = get_target_user(auth, uid)
ret = {
'contents': [
func(content)
for content in getattr(target, field)
]
}
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_jobs(auth, uid=None, **kwargs):
ret = serialize_contents('jobs', serialize_job, auth, uid)
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_schools(auth, uid=None, **kwargs):
ret = serialize_contents('schools', serialize_school, auth, uid)
append_editable(ret, auth, uid)
return ret
@must_be_logged_in
def unserialize_names(**kwargs):
user = kwargs['auth'].user
json_data = escape_html(request.get_json())
# json get can return None, use `or` here to ensure we always strip a string
user.fullname = (json_data.get('full') or '').strip()
user.given_name = (json_data.get('given') or '').strip()
user.middle_names = (json_data.get('middle') or '').strip()
user.family_name = (json_data.get('family') or '').strip()
user.suffix = (json_data.get('suffix') or '').strip()
user.save()
def verify_user_match(auth, **kwargs):
uid = kwargs.get('uid')
if uid and uid != auth.user._id:
raise HTTPError(http.FORBIDDEN)
@must_be_logged_in
def unserialize_social(auth, **kwargs):
verify_user_match(auth, **kwargs)
user = auth.user
json_data = escape_html(request.get_json())
for soc in user.SOCIAL_FIELDS.keys():
user.social[soc] = json_data.get(soc)
try:
user.save()
except ValidationError as exc:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long=exc.args[0]
))
def unserialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing'),
}
def unserialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing'),
}
def unserialize_contents(field, func, auth):
user = auth.user
json_data = escape_html(request.get_json())
setattr(
user,
field,
[
func(content)
for content in json_data.get('contents', [])
]
)
user.save()
@must_be_logged_in
def unserialize_jobs(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('jobs', unserialize_job, auth)
# TODO: Add return value
@must_be_logged_in
def unserialize_schools(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('schools', unserialize_school, auth)
# TODO: Add return value
@must_be_logged_in
def request_export(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another account export request.',
'error_type': 'throttle_error'})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_EXPORT,
user=auth.user,
)
user.email_last_sent = datetime.datetime.utcnow()
user.save()
return {'message': 'Sent account export request'}
@must_be_logged_in
def request_deactivation(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(http.BAD_REQUEST,
data={
'message_long': 'Too many requests. Please wait a while before sending another account deactivation request.',
'error_type': 'throttle_error'
})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_DEACTIVATION,
user=auth.user,
)
user.email_last_sent = datetime.datetime.utcnow()
user.save()
return {'message': 'Sent account deactivation request'}
def redirect_to_twitter(twitter_handle):
"""Redirect GET requests for /@TwitterHandle/ to respective the OSF user
account if it associated with an active account
:param uid: uid for requested User
:return: Redirect to User's Twitter account page
"""
try:
user = User.find_one(Q('social.twitter', 'iexact', twitter_handle))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND, data={
'message_short': 'User Not Found',
'message_long': 'There is no active user associated with the Twitter handle: {0}.'.format(twitter_handle)
})
except MultipleResultsFound:
users = User.find(Q('social.twitter', 'iexact', twitter_handle))
message_long = 'There are multiple OSF accounts associated with the ' \
'Twitter handle: <strong>{0}</strong>. <br /> Please ' \
'select from the accounts below. <br /><ul>'.format(markupsafe.escape(twitter_handle))
for user in users:
message_long += '<li><a href="{0}">{1}</a></li>'.format(user.url, markupsafe.escape(user.fullname))
message_long += '</ul>'
raise HTTPError(http.MULTIPLE_CHOICES, data={
'message_short': 'Multiple Users Found',
'message_long': message_long
})
return redirect(user.url)
| {
"content_hash": "6a75514d0edcf7eeef1336a0b7d56b43",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 169,
"avg_line_length": 32.684769775678866,
"alnum_prop": 0.6255960121369745,
"repo_name": "TomHeatwole/osf.io",
"id": "74a9cd205595d7f3f2fe5b397dd03973f1de7aa3",
"size": "27709",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "website/profile/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "140360"
},
{
"name": "HTML",
"bytes": "94857"
},
{
"name": "JavaScript",
"bytes": "1561313"
},
{
"name": "Mako",
"bytes": "659751"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5250038"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
from ambari_agent.ClusterConfiguration import ClusterConfiguration
from mock.mock import MagicMock, patch, mock_open, ANY
from unittest import TestCase
class TestClusterConfigurationCache(TestCase):
o_flags = os.O_WRONLY | os.O_CREAT
perms = 0o600
def setUp(self):
# save original open() method for later use
self.original_open = open
def tearDown(self):
sys.stdout == sys.__stdout__
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=True))
def test_cluster_configuration_cache_initialization(self):
configuration_json = '{ "c1" : { "foo-site" : { "foo" : "bar", "foobar" : "baz" } } }'
open_mock = mock_open(read_data=configuration_json)
with patch("__builtin__.open", open_mock):
cluster_configuration = ClusterConfiguration(os.path.join(os.sep, "foo", "bar", "baz"))
open_mock.assert_called_with(os.sep + "foo" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json", 'r')
self.assertEqual('bar', cluster_configuration.get_configuration_value('c1', 'foo-site/foo') )
self.assertEqual('baz', cluster_configuration.get_configuration_value('c1', 'foo-site/foobar') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID/INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foo') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foobar') )
pass
@patch("ambari_simplejson.dump")
def test_cluster_configuration_update(self, json_dump_mock):
cluster_configuration = self.__get_cluster_configuration()
configuration = {'foo-site' :
{ 'bar': 'rendered-bar', 'baz' : 'rendered-baz' }
}
osopen_mock, osfdopen_mock = self.__update_cluster_configuration(cluster_configuration, configuration)
osopen_mock.assert_called_with(os.sep + "foo" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json",
TestClusterConfigurationCache.o_flags,
TestClusterConfigurationCache.perms);
osfdopen_mock.assert_called_with(11, "w")
json_dump_mock.assert_called_with({'c1': {'foo-site': {'baz': 'rendered-baz', 'bar': 'rendered-bar'}}}, ANY, indent=2)
pass
def __get_cluster_configuration(self):
"""
Gets an instance of the cluster cache where the file read and write
operations have been mocked out
:return:
"""
with patch("__builtin__.open") as open_mock:
open_mock.side_effect = self.open_side_effect
cluster_configuration = ClusterConfiguration(os.path.join(os.sep, "foo", "bar", "baz"))
return cluster_configuration
@patch("os.open")
@patch("os.fdopen")
def __update_cluster_configuration(self, cluster_configuration, configuration, osfdopen_mock, osopen_mock):
"""
Updates the configuration cache, using as mock file as the disk based
cache so that a file is not created during tests
:return:
"""
osopen_mock.return_value = 11
cluster_configuration._update_configurations("c1", configuration)
return osopen_mock, osfdopen_mock
def open_side_effect(self, file, mode):
if mode == 'w':
file_mock = MagicMock()
return file_mock
else:
return self.original_open(file, mode)
| {
"content_hash": "63a7b42141a5a2c9c21637f74c2dc11a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 122,
"avg_line_length": 39.700934579439256,
"alnum_prop": 0.6975047080979284,
"repo_name": "alexryndin/ambari",
"id": "a418f6dbd9485e2882c47f95db23c560770e7056",
"size": "4271",
"binary": false,
"copies": "3",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import os, sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from pip.req import parse_requirements
try:
from loginsightwebhookdemo import __version__ as loginsightwebhookdemoversion # TODO Replace with a static variant?
except ImportError:
loginsightwebhookdemoversion = "0.dev0"
# Hack from https://stackoverflow.com/questions/14399534/how-can-i-reference-requirements-txt-for-the-install-requires-kwarg-in-setuptool
# parse_requirements() returns generator of pip.req.InstallRequirement objects
try:
if os.environ['PYTHONPATH']:
HDIR = os.environ['PYTHONPATH']
except:
try:
if os.environ['TRAVIS_BUILD_DIR']:
HDIR = os.environ['TRAVIS_BUILD_DIR']
except:
HDIR = '.'
install_reqs = parse_requirements(HDIR + '/requirements.txt', session='hack')
test_reqs = parse_requirements(HDIR + '/test-requirements.txt', session='hack')
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
treqs = [str(ir.req) for ir in test_reqs]
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
description = "Run tests in the current environment"
def initialize_options(self):
TestCommand.initialize_options(self)
self.args = []
def run(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
try:
args = shlex.split(self.args)
except AttributeError:
args = []
errno = pytest.main(args)
sys.exit(errno)
class ToxTest(TestCommand):
user_options = [('tox-args=', "t", "Arguments to pass to pytest")]
description = "Run tests in all configured tox environments"
def initialize_options(self):
TestCommand.initialize_options(self)
self.args = []
def run(self):
import shlex
# import here, cause outside the eggs aren't loaded
from tox.__main__ import main
try:
args = shlex.split(self.args)
except AttributeError:
args = []
errno = main(args)
sys.exit(errno)
setup(
name='loginsightwebhookdemo',
version=loginsightwebhookdemoversion,
url='http://github.com/vmw-loginsight/loginsightwebhookdemo/',
license='Apache Software License 2.0',
author='Steve Flanders',
install_requires=reqs,
tests_require=treqs,
description='VMware vRealize Log Insight Webhook Shim',
author_email='stevefl@vmware.com',
long_description=open('README.rst').read(),
packages=find_packages(),
platforms='any',
classifiers=[
'Programming Language :: Python :: 2.7',
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'console_scripts': [
'li = loginsightwebhookdemo.__init__:main'
]
},
cmdclass={'test': PyTest, 'tox': ToxTest}
)
| {
"content_hash": "9a5603fb2ff118a8aee6a262e7d2bd9c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 137,
"avg_line_length": 33.227722772277225,
"alnum_prop": 0.6489868891537545,
"repo_name": "vmw-loginsight/webhook-shims",
"id": "065cf92a9d654bc650273b6e39abf0a6793d00fa",
"size": "3379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128165"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from scipy.integrate import trapz
from itertools import count, izip
import cPickle as pkl
# import imaging_analysis as ia
from ..classes.interval import Interval, ImagingInterval
def calc_activity(
experiment, method, interval=None, dF='from_file', channel='Ch2',
label=None, roi_filter=None, demixed=False, running_kwargs=None,
trans_threshold=95):
"""Calculate various population statistics on each ROI
Takes an BehavioralAnalysis.Experiment object and calculates various
statistics on the imaging data for each ROI, returning a population vector
of the desired activity measure. Each cycle is analyzed individually and an
interval can be passed in to select which frames to include.
Parameters
----------
experiment : behavior_analysis.Experiment
Experiment object to analyze
method : string
Calculation to perform on each ROI
interval : boolean array or start/stop frames, optional
Boolean array of imaging frames to include in analysis, defaults to all
frames
Can have a unique interval for each ROI or cycle, automatically
expanded if a single interval is passed in
df : string, optional
dF/F algorithm to run on imaging data, passed to
behavior_analysis.Experiment.imagingData as 'dFOverF' argument
average_trials : bool, optional
If True, average metric across trials
Returns
-------
a : ndarray
Returns a ndarray of shape (nROIS, nCycles)
"""
# im_shape = experiment.imaging_shape(
# channel=channel, label=label, roi_filter=roi_filter)
# if im_shape[0] == 0:
# return np.empty((0, im_shape[2]))
data = None
if interval is None:
# If no interval passed in, look at the entire imaging sequence
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
interval = np.ones(data.shape, 'bool')
elif interval == 'running':
if running_kwargs:
interval = np.array(experiment.runningIntervals(
returnBoolList=True, **running_kwargs))
else:
interval = np.array(experiment.runningIntervals(
returnBoolList=True))
elif interval == 'non-running':
if running_kwargs:
interval = ~np.array(experiment.runningIntervals(
returnBoolList=True, **running_kwargs))
else:
interval = ~np.array(experiment.runningIntervals(
returnBoolList=True))
elif isinstance(interval, Interval):
num_rois, num_frames, num_cycles = experiment.imaging_shape(
channel=channel, label=label, roi_filter=roi_filter)
if not isinstance(interval, ImagingInterval):
sampling_interval = experiment.frame_period()
interval = ImagingInterval(
interval, sampling_interval=sampling_interval,
num_rois=num_rois)
if interval.shape[1] > num_frames:
interval = interval[:, :num_frames, :]
interval = interval.mask
assert interval.shape == (num_rois, num_frames, num_cycles)
elif interval.dtype is not np.dtype('bool'):
# If interval is not boolean, assume start/stop times and convert
# Must pass in a tuple/list/array of exactly 2 elements
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
inter = np.zeros((data.shape[1], 1), 'bool')
inter[interval[0]:interval[1] + 1] = True
interval = np.tile(inter, (data.shape[0], 1, data.shape[2]))
# If input interval is smaller than shape of data, expand it
if interval.ndim == 1:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
interval = np.reshape(interval, (-1, 1))
interval = np.tile(interval, (data.shape[0], 1, data.shape[2]))
elif interval.ndim == 2 and \
(interval.shape[0] == 1 or interval.shape[1] == 1):
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
interval = np.reshape(interval, (-1, 1))
interval = np.tile(interval, (data.shape[0], 1, data.shape[2]))
elif interval.ndim == 2:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
interval = interval[:, :, np.newaxis]
interval = np.tile(interval, (1, 1, data.shape[2]))
#
# Begin calculations
#
if method == 'mean':
# Mean value of signal during interval
if data is None:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
metric = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_data, roi_int in izip(count(), data, interval):
for cycle_idx, cycle_data, cycle_int in izip(
count(), roi_data.T, roi_int.T):
metric[roi_idx, cycle_idx] = np.nanmean(
cycle_data[cycle_int])
elif method == 'auc':
# Area under curve of signal during interval
if data is None:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
period = experiment.frame_period()
metric = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_data, roi_int in izip(count(), data, interval):
for cycle_idx, cycle_data, cycle_int in izip(
count(), roi_data.T, roi_int.T):
metric[roi_idx, cycle_idx] = nantrapz_1d(
cycle_data[cycle_int], dx=period)
elif method == 'amplitude':
# Average amplitude of transients that peak within interval
if data is None:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
trans = experiment.transientsData(
threshold=trans_threshold, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
inc_trans = includedTransients(trans, interval)
metric = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_data, roi_trans, inc in izip(
count(), data, trans, inc_trans):
for cycle_idx, cycle_data, cycle_trans, cycle_inc in izip(
count(), roi_data.T, roi_trans, inc):
if len(cycle_inc) > 0:
metric[roi_idx, cycle_idx] = np.mean(
cycle_data[cycle_trans['max_indices'][cycle_inc]])
else:
metric[roi_idx, cycle_idx] = np.nan
elif method == 'duration':
# Average duration of transients that peak within interval (in seconds)
trans = experiment.transientsData(
threshold=trans_threshold, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
inc_trans = includedTransients(trans, interval)
metric = np.zeros((len(trans), len(trans[0])))
for roi_idx, roi_trans, inc in izip(count(), trans, inc_trans):
for cycle_idx, cycle_trans, cycle_inc in izip(
count(), roi_trans, inc):
if np.sum(cycle_inc) > 0:
metric[roi_idx, cycle_idx] = np.mean(
cycle_trans['durations_sec'][cycle_inc])
else:
metric[roi_idx, cycle_idx] = np.nan
elif method == 'responseMagnitude':
# Average area under curve of transients that peak within interval
# (in s*dF)
trans_auc = calc_activity(
experiment, 'transient auc2', interval=interval, dF=dF,
channel=channel, label=label, demixed=demixed,
roi_filter=roi_filter)
n_trans = calc_activity(
experiment, 'n transients', interval=interval, dF=dF,
channel=channel, label=label, demixed=demixed,
roi_filter=roi_filter)
metric = trans_auc / n_trans
elif method == 'transient auc':
# Total area under curve of transients during interval
if data is None:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
data *= interval
period = experiment.frame_period()
trans = experiment.transientsData(
threshold=trans_threshold, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
metric = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_trans in enumerate(trans):
for cycle_idx, cycle_trans in enumerate(roi_trans):
if len(cycle_trans['start_indices']):
for start_idx, stop_idx in zip(
cycle_trans['start_indices'],
cycle_trans['end_indices']):
if np.isnan(start_idx):
start_idx = 0
if np.isnan(stop_idx):
stop_idx = data.shape[1]
metric[roi_idx, cycle_idx] += nantrapz_1d(
data[roi_idx, start_idx:stop_idx + 1, cycle_idx],
dx=period)
else:
# If there were no transients in the given interval...
# return NaN, not 0
metric[roi_idx, cycle_idx] = np.nan
elif method == 'norm transient auc':
# Total area under curve of transients during interval normalized to
# length of interval
auc = calc_activity(
experiment, 'transient auc', interval=interval, dF=dF,
channel=channel, label=label, demixed=demixed,
roi_filter=roi_filter)
period = experiment.frame_period()
metric = auc / (np.sum(interval, axis=1) * period)
elif method == 'transient auc2':
# Total area under curve of transients that peak during interval
if data is None:
data = experiment.imagingData(
dFOverF=dF, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
period = experiment.frame_period()
trans = experiment.transientsData(
threshold=trans_threshold, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
inc_trans = includedTransients(trans, interval)
metric = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_trans, inc in izip(count(), trans, inc_trans):
for cycle_idx, cycle_trans, cycle_inc in izip(
count(), roi_trans, inc):
if np.sum(cycle_inc) > 0:
for start_idx, stop_idx in zip(
cycle_trans['start_indices'][cycle_inc],
cycle_trans['end_indices'][cycle_inc]):
if np.isnan(start_idx):
start_idx = 0
if np.isnan(stop_idx):
stop_idx = data.shape[1]
metric[roi_idx, cycle_idx] += nantrapz_1d(
data[roi_idx, start_idx:stop_idx + 1, cycle_idx],
dx=period)
else:
metric[roi_idx, cycle_idx] = 0
elif method == 'norm transient auc2':
# Total area under curve of transients that peak during interval
# normalized to length of interval
auc = calc_activity(
experiment, 'transient auc2', interval=interval, dF=dF,
channel=channel, label=label, demixed=demixed,
roi_filter=roi_filter)
period = experiment.frame_period()
metric = auc / (np.sum(interval, axis=1) * period)
elif method == 'time active':
# Percentage of the interval the cell is active
active = ia.isActive(
experiment, conf_level=trans_threshold, roi_filter=roi_filter, channel=channel,
label=label, demixed=demixed)
metric = np.sum(active & interval, axis=1) / \
np.sum(interval, axis=1).astype('float')
elif method == 'frequency':
# Frequency of transients that peak during interval (in Hz)
period = experiment.frame_period()
n_trans = calc_activity(
experiment, 'n transients', interval=interval, dF=dF,
channel=channel, label=label, demixed=demixed,
roi_filter=roi_filter)
metric = n_trans / (np.sum(interval, axis=1) * period)
elif method == 'n transients':
# Number of transients that peak during interval
trans = experiment.transientsData(
threshold=trans_threshold, roi_filter=roi_filter, channel=channel, label=label,
demixed=demixed)
inc_trans = includedTransients(trans, interval)
n_trans = np.zeros((len(trans), interval.shape[2]))
for roi_idx, inc in enumerate(inc_trans):
for cycle_idx, cycle_inc in enumerate(inc):
if np.sum(cycle_inc) > 0:
n_trans[roi_idx, cycle_idx] = np.sum(cycle_inc)
metric = n_trans.astype('int')
elif method == 'is place cell':
with open(experiment.placeFieldsFilePath(channel=channel), 'rb') as f:
pfs = pkl.load(
f)[label]['demixed' if demixed else 'undemixed']['pfs']
inds = experiment._filter_indices(
roi_filter, channel=channel, label=label)
pfs = np.array(pfs)[np.array(inds)]
pc = []
for roi in pfs:
if len(roi):
pc.append(1)
else:
pc.append(0)
metric = np.array(pc).astype('int')[:, np.newaxis]
elif method == 'time to max peak':
if data is None:
data = experiment.imagingData(dFOverF=dF, roi_filter=roi_filter,
channel=channel, label=label,
demixed=demixed)
period = experiment.frame_period()
trans = experiment.transientsData(threshold=trans_threshold, roi_filter=roi_filter,
channel=channel, label=label,
demixed=demixed)
inc_trans = includedTransients(trans, interval)
auc = np.zeros((data.shape[0], data.shape[2]))
for roi_idx, roi_trans, inc in izip(count(), trans, inc_trans):
for cycle_idx, cycle_trans, cycle_inc in izip(count(), roi_trans,
inc):
if np.sum(cycle_inc) > 0:
idx = cycle_trans['max_amplitudes'][cycle_inc].argmax()
auc[roi_idx, cycle_idx] = \
cycle_trans['start_indices'][cycle_inc][idx]
return auc
else:
raise ValueError('Unrecognized method: ' + str(method))
return metric
def includedTransients(transients, interval):
# Returns an array of logical arrays indicating which transient starts
# within the given interval
# Returned array will be nROIs x nCycles, with each element an nTransients
# length boolean array for logical indexing
#
# Interval must be correct format, not checking at the moment
try:
transients[0]
except IndexError:
return np.empty((0, interval.shape[2]), 'object')
else:
inc = np.empty((len(transients), len(transients[0])), 'object')
for roi_idx, roi_trans, roi_int in izip(count(), transients, interval):
for cycle_idx, cycle_trans, cycle_int in izip(count(), roi_trans, roi_int.T):
inc[roi_idx, cycle_idx] = np.zeros(len(cycle_trans['start_indices']), 'bool')
for trans_idx, idx_max in enumerate(cycle_trans['start_indices']):
inc[roi_idx, cycle_idx][trans_idx] = (idx_max in np.nonzero(cycle_int)[0] )
return inc
def nantrapz_1d(y, x=None, dx=1.0):
if x is None:
x_vals = np.arange(len(y)) * dx
else:
x_vals = x
nans = np.isnan(y)
assert len(nans) == len(x_vals)
assert len(nans) == len(y)
return trapz(y[~nans], x=x_vals[~nans])
def included_transient(x):
"""Return whether the start time is range of tuple pair of interval."""
return (x.int_start <= x.start_frame <= x.int_end)
def included_transients(data, interval):
"""Return filtered dataframe of transients that occur during interval."""
data = pd.merge(data, interval, how='outer')
inc_tran = data.groupby(["roi",
"trial",
"trans_idx"]).apply(included_transient)
return data.loc[inc_tran]
def roi_trials(expt_grp, channel='Ch2', label=None, roi_filter=None):
rois = expt_grp.rois(channel=channel, label=label, roi_filter=roi_filter)
df_list = []
for expt in expt_grp:
for trial in expt.findall('trial'):
for roi in rois[expt]:
df_list.append({'trial': trial, 'roi': roi})
return pd.DataFrame(df_list)
def calc_n_transients(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the number of transients per ROI."""
data_list = [expt.transientsData(threshold=95,
channel=channel,
label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
dataframe=True) for expt in expt_grp]
trans_data = pd.concat(data_list)
if interval is not None:
trans_data = included_transients(trans_data, interval)
n_trans = trans_data.groupby(["roi", "trial"]).count()
n_trans = n_trans.reset_index()[['roi', 'trial', 'trans_idx']]
n_trans.rename(columns={"trans_idx": "n_trans"}, inplace=True)
return n_trans
def transient_auc(x):
"""Return the AUC for the transient."""
period = x.trial.parent.frame_period()
y = np.zeros(x.im_data.shape)
y[x.start_frame:x.stop_frame] = 1
y *= x.im_data
return nantrapz_1d(y, dx=period)
def calc_transient_auc(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the AUC for each transient."""
trim_to_behavior = behaviorSync
data_list = [expt.transientsData(threshold=95,
channel=channel,
label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
dataframe=True) for expt in expt_grp]
im_data = [expt.imagingData(dFOverF='from_file',
demixed=demixed,
roi_filter=roi_filter,
removeNanBoutons=False,
trim_to_behavior=trim_to_behavior,
channel=channel,
label=label,
dataframe=True) for expt in expt_grp]
data_list, im_data = pd.concat(data_list), pd.concat(im_data)
if interval is not None:
data_list = included_transients(data_list, interval)
data = pd.merge(data_list, im_data)
data["AUC"] = data.apply(transient_auc, axis=1)
return data[["roi", "trial", "trans_idx", "AUC"]]
def calc_sum_transient_auc(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the total AUC for each unique ROI trace."""
data = calc_transient_auc(expt_grp=expt_grp, interval=interval,
channel=channel, label=label,
roi_filter=roi_filter, behaviorSync=behaviorSync,
demixed=demixed)
data = data.groupby(["roi", "trial"]).agg({"AUC": np.nansum})
data.reset_index(level=["roi", "trial"], inplace=True)
return data
def calc_average_duration(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""
Return the average transient duration for each unique ROI trace.
The duration returned is in seconds."""
data_list = [expt.transientsData(threshold=95,
channel=channel,
label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
dataframe=True) for expt in expt_grp]
data_list = pd.concat(data_list)
if interval is not None:
data_list = included_transients(data_list, interval)
data = data_list.groupby(["roi", "trial"]).agg({"duration": np.nanmean})
data.reset_index(level=["roi", "trial"], inplace=True)
return data
def calc_average_amplitude(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the average transient amplitude for each unique ROI trace."""
data_list = [expt.transientsData(threshold=95,
channel=channel,
label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
dataframe=True) for expt in expt_grp]
data_list = pd.concat(data_list)
if interval is not None:
data_list = included_transients(data_list, interval)
data = data_list.groupby(["roi",
"trial"]).agg({"max_amplitude": np.nanmean})
data.reset_index(level=["roi", "trial"], inplace=True)
return data
def calc_time_active(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the percentage of time that unique transient is active."""
data_list = [expt.transientsData(threshold=95,
channel=channel,
label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
dataframe=True) for expt in expt_grp]
trans_data = pd.concat(data_list)
if interval is not None:
data_list = included_transients(data_list, interval)
interval["int_len"] = interval["int_start"] - interval["int_end"]
# If no interval is passed, the entire imaging lenght should be
# assumed.
# else:
# interval = ImagingInterval()
summed_len = interval.groupby(["roi", "trial"]).agg({"int_len": np.sum})
summed_len.reset_index(level=["roi", "trial"], inplace=True)
trans_data = pd.merge(trans_data, summed_len)
trans_data["time_active"] = trans_data["duration"] / trans_data["int_len"]
return trans_data[["roi", "trial", "trans_idx", "time_active"]]
def calc_summed_time_active(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the percentage of time that unique ROI trace is active."""
roi_trial_object = roi_trials(expt_grp, channel=channel,
label=label, roi_filter=roi_filter)
trans_data = calc_time_active(expt_grp=expt_grp, interval=interval,
channel=channel, label=label,
roi_filter=roi_filter, demixed=demixed,
behaviorSync=behaviorSync)
summed_trans_data = trans_data.groupby(["roi",
"trial"
]).agg({"time_active": np.sum})
summed_trans_data.reset_index(level=["roi", "trial"], inplace=True)
summed_trans_data = pd.merge(roi_trial_object,
summed_trans_data, how='outer')
return summed_trans_data
def calc_response_magnitude(expt_grp, interval=None, channel='Ch2',
label=None, roi_filter=None, demixed=False,
behaviorSync=False):
"""Return the summed average AUC divided by the number of transients."""
auc = calc_sum_transient_auc(expt_grp=expt_grp, interval=interval,
channel=channel, label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
demixed=demixed)
n_trans = calc_n_transients(expt_grp=expt_grp, interval=interval,
channel=channel, label=label,
roi_filter=roi_filter,
behaviorSync=behaviorSync,
demixed=demixed)
data = pd.merge(auc, n_trans)
data["response_magnitude"] = data["AUC"] / data["n_trans"]
return data[["roi", "trial", "trans_idx", "AUC"]]
| {
"content_hash": "5e5a1ab71a0eb89ca9f2b621b84622b7",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 95,
"avg_line_length": 43.7962962962963,
"alnum_prop": 0.558793004036133,
"repo_name": "losonczylab/Zaremba_NatNeurosci_2017",
"id": "e24f5b374ac637277a16f8b73f5e7532029e7f0a",
"size": "26015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "losonczy_analysis_bundle/lab/analysis/calc_activity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1435233"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
"""
This module defines classes to represent the density of states, etc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 20, 2012"
import collections
import numpy as np
import six
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.util.coord_utils import get_linear_interpolated_value
from pymatgen.serializers.json_coders import PMGSONable
class Dos(PMGSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
Args:
efermi: Fermi level energy
energies: A sequences of energies
densities ({Spin: np.array}): representing the density of states
for each Spin.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
def __init__(self, efermi, energies, densities):
self.efermi = efermi
self.energies = np.array(energies)
self.densities = {k: np.array(d) for k, d in densities.items()}
def get_densities(self, spin=None):
"""
Returns the density of states for a particular spin.
Args:
spin: Spin
Returns:
Returns the density of states for a particular spin. If Spin is
None, the sum of all spins is returned.
"""
if self.densities is None:
result = None
elif spin is None:
if Spin.down in self.densities:
result = self.densities[Spin.up] + self.densities[Spin.down]
else:
result = self.densities[Spin.up]
else:
result = self.densities[spin]
return result
def get_smeared_densities(self, sigma):
"""
Returns the Dict representation of the densities, {Spin: densities},
but with a Gaussian smearing of std dev sigma applied about the fermi
level.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Dict of Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
smeared_dens = {}
diff = [self.energies[i + 1] - self.energies[i]
for i in range(len(self.energies) - 1)]
avgdiff = sum(diff) / len(diff)
for spin, dens in self.densities.items():
smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that energy scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.energies, other.energies)):
raise ValueError("Energies of both DOS are not compatible!")
densities = {spin: self.densities[spin] + other.densities[spin]
for spin in self.densities.keys()}
return Dos(self.efermi, self.energies, densities)
def get_interpolated_value(self, energy):
"""
Returns interpolated density for a particular energy.
Args:
energy: Energy to return the density for.
"""
f = {}
for spin in self.densities.keys():
f[spin] = get_linear_interpolated_value(self.energies,
self.densities[spin],
energy)
return f
def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
energies = self.energies
below_fermi = [i for i in range(len(energies))
if energies[i] < self.efermi and tdos[i] > tol]
above_fermi = [i for i in range(len(energies))
if energies[i] > self.efermi and tdos[i] > tol]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if vbm_start == cbm_start:
return 0.0, self.efermi, self.efermi
else:
# Interpolate between adjacent values
terminal_dens = tdos[vbm_start:vbm_start + 2][::-1]
terminal_energies = energies[vbm_start:vbm_start + 2][::-1]
start = get_linear_interpolated_value(terminal_dens,
terminal_energies, tol)
terminal_dens = tdos[cbm_start - 1:cbm_start + 1]
terminal_energies = energies[cbm_start - 1:cbm_start + 1]
end = get_linear_interpolated_value(terminal_dens,
terminal_energies, tol)
return end - start, end, start
def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
#determine tolerance
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
# find index of fermi energy
i_fermi = 0
while self.energies[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.energies[i_gap_end], self.energies[i_gap_start]
def get_gap(self, tol=0.001, abs_tol=False, spin=None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = ["#{:30s} {:30s} {:30s}".format("Energy",
"DensityUp",
"DensityDown")]
for i, energy in enumerate(self.energies):
stringarray.append("{:.5f} {:.5f} {:.5f}"
.format(energy, self.densities[Spin.up][i],
self.densities[Spin.down][i]))
else:
stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")]
for i, energy in enumerate(self.energies):
stringarray.append("{:.5f} {:.5f}"
.format(energy, self.densities[Spin.up][i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns Dos object from dict representation of Dos.
"""
return Dos(d["efermi"], d["energies"],
{Spin.from_int(int(k)): v
for k, v in d["densities"].items()})
def as_dict(self):
"""
Json-serializable dict representation of Dos.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"energies": list(self.energies),
"densities": {str(spin): list(dens)
for spin, dens in self.densities.items()}}
class CompleteDos(Dos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Mainly used by pymatgen.io.vaspio.Vasprun to create a complete Dos from
a vasprun.xml file. You are unlikely to try to generate this object
manually.
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site:{Orbital:{
Spin:Densities}}}
.. attribute:: structure
Structure associated with the CompleteDos.
.. attribute:: pdos
Dict of partial densities of the form {Site:{Orbital:{Spin:Densities}}}
"""
def __init__(self, structure, total_dos, pdoss):
Dos.__init__(self, total_dos.efermi, energies=total_dos.energies,
densities={k: np.array(d)
for k, d in total_dos.densities.items()})
self.pdos = pdoss
self.structure = structure
def get_site_orbital_dos(self, site, orbital):
"""
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: Orbital in the site.
Returns:
Dos containing densities for orbital of site.
"""
return Dos(self.efermi, self.energies, self.pdos[site][orbital])
def get_site_dos(self, site):
"""
Get the total Dos for a site (all orbitals).
Args:
site: Site in Structure associated with CompleteDos.
Returns:
Dos containing summed orbital densities for site.
"""
site_dos = six.moves.reduce(add_densities, self.pdos[site].values())
return Dos(self.efermi, self.energies, site_dos)
def get_site_spd_dos(self, site):
"""
Get orbital projected Dos of a particular site
Args:
site: Site in Structure associated with CompleteDos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = dict()
for orb, pdos in self.pdos[site].items():
orbital_type = _get_orb_type(orb)
if orbital_type in spd_dos:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
else:
spd_dos[orbital_type] = pdos
return {orb: Dos(self.efermi, self.energies, densities)
for orb, densities in spd_dos.items()}
def get_site_t2g_eg_resolved_dos(self, site):
"""
Get the t2g, eg projected DOS for a particular site.
Args:
site: Site in Structure associated with CompleteDos.
Returns:
A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS
for the site.
"""
t2g_dos = []
eg_dos = []
for s, atom_dos in self.pdos.items():
if s == site:
for orb, pdos in atom_dos.items():
if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz):
t2g_dos.append(pdos)
elif orb in (Orbital.dx2, Orbital.dz2):
eg_dos.append(pdos)
return {"t2g": Dos(self.efermi, self.energies,
six.moves.reduce(add_densities, t2g_dos)),
"e_g": Dos(self.efermi, self.energies,
six.moves.reduce(add_densities, eg_dos))}
def get_spd_dos(self):
"""
Get orbital projected Dos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = {}
for atom_dos in self.pdos.values():
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in spd_dos:
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = \
add_densities(spd_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities)
for orb, densities in spd_dos.items()}
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
for pdos in atom_dos.values():
if el not in el_dos:
el_dos[el] = pdos
else:
el_dos[el] = add_densities(el_dos[el], pdos)
return {el: Dos(self.efermi, self.energies, densities)
for el, densities in el_dos.items()}
def get_element_spd_dos(self, el):
"""
Get element and spd projected Dos
Args:
el: Element in Structure.composition associated with CompleteDos
Returns:
dict of {Element: {"S": densities, "P": densities, "D": densities}}
"""
el = get_el_sp(el)
el_dos = {}
for site, atom_dos in self.pdos.items():
if site.specie == el:
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in el_dos:
el_dos[orbital_type] = pdos
else:
el_dos[orbital_type] = \
add_densities(el_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities)
for orb, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = Dos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for i in range(len(d["pdos"])):
at = struct[i]
orb_dos = {}
for orb_str, odos in d["pdos"][i].items():
orb = Orbital.from_string(orb_str)
orb_dos[orb] = {Spin.from_int(int(k)): v
for k, v in odos["densities"].items()}
pdoss[at] = orb_dos
return CompleteDos(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompleteDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"structure": self.structure.as_dict(),
"energies": list(self.energies),
"densities": {str(spin): list(dens)
for spin, dens in self.densities.items()},
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
dd = {}
for orb, pdos in self.pdos[at].items():
dd[str(orb)] = {"densities": {str(int(spin)): list(dens)
for spin,
dens in pdos.items()}}
d["pdos"].append(dd)
d["atom_dos"] = {str(at): dos.as_dict() for at,
dos in self.get_element_dos().items()}
d["spd_dos"] = {str(orb): dos.as_dict() for orb,
dos in self.get_spd_dos().items()}
return d
def __str__(self):
return "Complete DOS for " + str(self.structure)
def add_densities(density1, density2):
"""
Method to sum two densities.
Args:
density1: First density.
density2: Second density.
Returns:
Dict of {spin: density}.
"""
return {spin: np.array(density1[spin]) + np.array(density2[spin])
for spin in density1.keys()}
def _get_orb_type(orb):
try:
return orb.orbital_type
except AttributeError:
return orb | {
"content_hash": "2e3b75f5399eda12624640e6d0e54a02",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 82,
"avg_line_length": 35.10569105691057,
"alnum_prop": 0.5309170912459472,
"repo_name": "ctoher/pymatgen",
"id": "57d6271062684f9d871a4633c59fef18da2a0a2b",
"size": "17289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/electronic_structure/dos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3688213"
}
],
"symlink_target": ""
} |
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| {
"content_hash": "14c981237813a3d26d56111750aa2e53",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 33.34,
"alnum_prop": 0.6064787042591482,
"repo_name": "lthurlow/Network-Grapher",
"id": "fe3739e8b144ce835273a946d129cef75507b11b",
"size": "1667",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "proj/external/matplotlib-1.2.1/lib/matplotlib/tests/test_dviread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
} |
"""343. Integer Break
https://leetcode.com/problems/integer-break/
Given a positive integer n, break it into the sum of at least two positive
integers and maximize the product of those integers.
Return the maximum product you can get.
Example 1:
Input: 2
Output: 1
Explanation: 2 = 1 + 1, 1 × 1 = 1.
Example 2:
Input: 10
Output: 36
Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
Note: You may assume that n is not less than 2 and not larger than 58.
"""
class Solution:
def integer_break(self, n: int) -> int:
if n == 1:
return 1
if n == 2:
return 1
if n == 3:
return 2
x, y = divmod(n, 3)
if y == 0:
return 3 ** x
if y == 1:
return 3 ** (x - 1) * 4
else:
return 3 ** x * 2
| {
"content_hash": "c4096d6be3589f2763e7cabb39c1ce01",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 21.91891891891892,
"alnum_prop": 0.5450061652281134,
"repo_name": "isudox/leetcode-solution",
"id": "c83d504c72d12e86c96593895d45053603661f1b",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-algorithm/leetcode/problem_343.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "16121"
},
{
"name": "Java",
"bytes": "118043"
},
{
"name": "Python",
"bytes": "151015"
}
],
"symlink_target": ""
} |
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) <= 9:
name = sys.argv[1]
print 'The number of donuts', sys.argv[1]
if len(sys.argv) >= 9:
name = sys.argv[1]
print 'The number of donuts', 'many'
else:
name = 'I don\'t know how many donuts you are talking about'
print 'How many Donuts\?', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main() | {
"content_hash": "d1a208c69192f6e4812d34f9f49a65f2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 30.11111111111111,
"alnum_prop": 0.6476014760147601,
"repo_name": "allenjcochran/google-python-class",
"id": "31d2f51701794939b8fa2ed1ff4a19267e3b8b84",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "donuts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Expose public exceptions & warnings
"""
from pandas._config.config import OptionError
from pandas._libs.tslibs import OutOfBoundsDatetime
class NullFrequencyError(ValueError):
"""
Error raised when a null `freq` attribute is used in an operation
that needs a non-null frequency, particularly `DatetimeIndex.shift`,
`TimedeltaIndex.shift`, `PeriodIndex.shift`.
"""
pass
class PerformanceWarning(Warning):
"""
Warning raised when there is a possible performance impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
on a pandas object, but that function is not supported by
the object e.g. ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
and the index has not been lexsorted. Subclass of `KeyError`.
"""
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in parsing file contents.
This is a generic error raised for errors encountered when functions like
`read_csv` or `read_html` are parsing contents of a file.
See Also
--------
read_csv : Read CSV (comma-separated) file into a DataFrame.
read_html : Read HTML table into a DataFrame.
"""
class DtypeWarning(Warning):
"""
Warning raised when reading different dtypes in a column from a file.
Raised for a dtype incompatibility. This can happen whenever `read_csv`
or `read_table` encounter non-uniform dtypes in a column(s) of a given
CSV file.
See Also
--------
read_csv : Read CSV (comma-separated) file into a DataFrame.
read_table : Read general delimited file into a DataFrame.
Notes
-----
This warning is issued when dealing with larger files because the dtype
checking happens per chunk read.
Despite the warning, the CSV file is read with mixed types in a single
column which will be an object type. See the examples below to better
understand this issue.
Examples
--------
This example creates and reads a large CSV file with a column that contains
`int` and `str`.
>>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 +
... ['1'] * 100000),
... 'b': ['b'] * 300000})
>>> df.to_csv('test.csv', index=False)
>>> df2 = pd.read_csv('test.csv')
... # DtypeWarning: Columns (0) have mixed types
Important to notice that ``df2`` will contain both `str` and `int` for the
same input, '1'.
>>> df2.iloc[262140, 0]
'1'
>>> type(df2.iloc[262140, 0])
<class 'str'>
>>> df2.iloc[262150, 0]
1
>>> type(df2.iloc[262150, 0])
<class 'int'>
One way to solve this issue is using the `dtype` parameter in the
`read_csv` and `read_table` functions to explicit the conversion:
>>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str})
No warning was issued.
>>> import os
>>> os.remove('test.csv')
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered.
"""
class ParserWarning(Warning):
"""
Warning raised when reading a file that doesn't use the default 'c' parser.
Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change
parsers, generally from the default 'c' parser to 'python'.
It happens due to a lack of support or functionality for parsing a
particular attribute of a CSV file with the requested engine.
Currently, 'c' unsupported options include the following parameters:
1. `sep` other than a single character (e.g. regex separators)
2. `skipfooter` higher than 0
3. `sep=None` with `delim_whitespace=False`
The warning can be avoided by adding `engine='python'` as a parameter in
`pd.read_csv` and `pd.read_table` methods.
See Also
--------
pd.read_csv : Read CSV (comma-separated) file into DataFrame.
pd.read_table : Read general delimited file into DataFrame.
Examples
--------
Using a `sep` in `pd.read_csv` other than a single character:
>>> import io
>>> csv = '''a;b;c
... 1;1,8
... 1;2,1'''
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP
... # ParserWarning: Falling back to the 'python' engine...
Adding `engine='python'` to `pd.read_csv` removes the Warning:
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python')
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
class AccessorRegistrationWarning(Warning):
"""
Warning for attribute conflicts in accessor registration.
"""
class AbstractMethodError(NotImplementedError):
"""
Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance, methodtype="method"):
types = {"method", "classmethod", "staticmethod", "property"}
if methodtype not in types:
raise ValueError(
f"methodtype must be one of {methodtype}, got {types} instead."
)
self.methodtype = methodtype
self.class_instance = class_instance
def __str__(self) -> str:
if self.methodtype == "classmethod":
name = self.class_instance.__name__
else:
name = type(self.class_instance).__name__
return f"This {self.methodtype} must be defined in the concrete class {name}"
class NumbaUtilError(Exception):
"""
Error raised for unsupported Numba engine routines.
"""
| {
"content_hash": "43754c18426db0ddcf8bb46250076e00",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 85,
"avg_line_length": 29.485,
"alnum_prop": 0.6413430557910802,
"repo_name": "TomAugspurger/pandas",
"id": "4c4ce9df855435784c273175f82c98de0ecfd4b7",
"size": "5913",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/errors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "257"
},
{
"name": "C",
"bytes": "352075"
},
{
"name": "CSS",
"bytes": "979"
},
{
"name": "Cython",
"bytes": "1029090"
},
{
"name": "Dockerfile",
"bytes": "1756"
},
{
"name": "HTML",
"bytes": "454789"
},
{
"name": "Makefile",
"bytes": "473"
},
{
"name": "Python",
"bytes": "14680580"
},
{
"name": "Shell",
"bytes": "31513"
},
{
"name": "Smarty",
"bytes": "2126"
}
],
"symlink_target": ""
} |
"""
[2017-06-07] Challenge #318 [Intermediate] 2020 - NBA Revolution
https://www.reddit.com/r/dailyprogrammer/comments/6ft96y/20170607_challenge_318_intermediate_2020_nba/
# Description
We are in June 2020 and the [NBA](https://en.wikipedia.org/wiki/National_Basketball_Association) just decided to change
the format of their regular season from the [divisions/conferences
system](https://en.wikipedia.org/wiki/National_Basketball_Association#Regular_season) to one single round robin
tournament.
You are in charge of writing the program that will generate the regular season schedule every year from now on. The NBA
executive committee wants the competition to be as fair as possible, so the round robin tournament has to conform with
the below rules:
1 - The number of teams engaged is maintained to 30.
2 - The schedule is composed of 58 rounds of 15 games. Each team plays 2 games against the other teams - one at home
and the other away - for a total of 58 games. All teams are playing on the same day within a round.
3 - After the first half of the regular season (29 rounds), each team must have played exactly once against all other
teams.
4 - Each team cannot play more than 2 consecutive home games, and playing 2 consecutive home games cannot occur more
than once during the whole season.
5 - Rule 4 also applies to away games.
6 - The schedule generated must be different every time the program is launched.
# Input description
The list of teams engaged (one line per team), you may add the number of teams before the list if it makes the input
parsing easier for you.
# Output description
The complete list of games scheduled for each round, conforming to the 6 rules set out above. For each game, the team
playing at home is named first.
Use your preferred file sharing tool to post your answer if the output is too big to post it locally.
# Sample input
Cleveland Cavaliers
Golden State Warriors
San Antonio Spurs
Toronto raptors
# Sample output
Round 1
San Antonio Spurs - Toronto Raptors
Golden State Warriors - Cleveland Cavaliers
Round 2
San Antonio Spurs - Golden State Warriors
Toronto Raptors - Cleveland Cavaliers
Round 3
Golden State Warriors - Toronto Raptors
Cleveland Cavaliers - San Antonio Spurs
Round 4
Golden State Warriors - San Antonio Spurs
Cleveland Cavaliers - Toronto Raptors
Round 5
Toronto Raptors - Golden State Warriors
San Antonio Spurs - Cleveland Cavaliers
Round 6
Toronto Raptors - San Antonio Spurs
Cleveland Cavaliers - Golden State Warriors
# Challenge input
Atlanta Hawks
Boston Celtics
Brooklyn Nets
Charlotte Hornets
Chicago Bulls
Cleveland Cavaliers
Dallas Mavericks
Denver Nuggets
Detroit Pistons
Golden State Warriors
Houston Rockets
Indiana Pacers
Los Angeles Clippers
Los Angeles Lakers
Memphis Grizzlies
Miami Heat
Milwaukee Bucks
Minnesota Timberwolves
New Orleans Pelicans
New York Knicks
Oklahoma City Thunder
Orlando Magic
Philadelphia 76ers
Phoenix Suns
Portland Trail Blazers
Sacramento Kings
San Antonio Spurs
Toronto Raptors
Utah Jazz
Washington Wizards
# Bonus
Add the scheduled date besides each round number in your output (using format MM/DD/YYYY), given that:
- The competition cannot start before October 1st, 2020 and cannot end after April 30th, 2021.
- There cannot be less than 2 full days between each round (it means that if one round occurs on October 1st, the next
round cannot occur before October 4th).
- The number of rounds taking place over the weekends (on Saturdays or Sundays) must be maximized, to increase audience
incomes.
# Credit
This challenge was suggested by user /u/gabyjunior, many thanks. If you have a challenge idea, please share it in
/r/dailyprogrammer_ideas and there's a good chance we'll use it.
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "eeb75e3de87d14945dcccf5dce24bc49",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 119,
"avg_line_length": 35.41228070175438,
"alnum_prop": 0.7436215011146892,
"repo_name": "DayGitH/Python-Challenges",
"id": "537751060c1cecc478e544eb9334d850c5b89f42",
"size": "4037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20170607B.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
"""
Module to create PKCS#1 v1.5 RSA signatures
See RFC3447__ or the `original RSA Labs specification`__.
This scheme is more properly called ``RSASSA-PKCS1-v1_5``.
For example, a sender can create the signature of a message using
its private RSA key:
>>> from Cryptodome.Signature import pkcs1_15
>>> from Cryptodome.Hash import SHA256
>>> from Cryptodome.PublicKey import RSA
>>>
>>> message = 'To be signed'
>>> key = RSA.importKey(open('private_key.der').read())
>>> h = SHA256.new(message)
>>> signature = pkcs1_15.new(key).sign(h)
At the other side, the receiver can verify the signature (and therefore
the authenticity of the message) using the public RSA key:
>>> key = RSA.importKey(open('public_key.der').read())
>>> h = SHA.new(message)
>>> try:
>>> pkcs1_15.new(key).verify(h, signature):
>>> print "The signature is valid."
>>> except (ValueError, TypeError):
>>> print "The signature is not valid."
:undocumented: __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125
"""
from Cryptodome.Util.py3compat import b, bchr
import Cryptodome.Util.number
from Cryptodome.Util.number import ceil_div, bytes_to_long, long_to_bytes
from Cryptodome.Util.asn1 import DerSequence, DerNull, DerOctetString, DerObjectId
class PKCS115_SigScheme:
"""An instance of the PKCS#1 v1.5 signature scheme for a specific RSA key."""
def __init__(self, rsa_key):
"""Initialize this PKCS#1 v1.5 signature scheme object.
:Parameters:
rsa_key : an RSA key object
Creation of signatures is only possible if this is a *private*
RSA key. Verification of signatures is always possible.
"""
self._key = rsa_key
def can_sign(self):
"""Return True if this object can be used to sign messages."""
return self._key.has_private()
def sign(self, msg_hash):
"""Produce the PKCS#1 v1.5 signature of a message.
This function is named ``RSASSA-PKCS1-V1_5-SIGN``;
it is specified in section 8.2.1 of RFC3447.
:Parameters:
msg_hash : hash object
This is an object created with to the `Cryptodome.Hash` module.
It was used used to hash the message to sign.
:Return: The signature encoded as a byte string.
:Raise ValueError:
If the RSA key is not long enough when combined with the given
hash algorithm.
:Raise TypeError:
If the RSA key has no private half.
"""
# See 8.2.1 in RFC3447
modBits = Cryptodome.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
em = _EMSA_PKCS1_V1_5_ENCODE(msg_hash, k)
# Step 2a (OS2IP)
em_int = bytes_to_long(em)
# Step 2b (RSASP1)
m_int = self._key._decrypt(em_int)
# Step 2c (I2OSP)
signature = long_to_bytes(m_int, k)
return signature
def verify(self, msg_hash, signature):
"""Verify that a certain PKCS#1 v1.5 signature is valid.
This method checks if the message really originates from someone
that holds the RSA private key.
really signed the message.
This function is named ``RSASSA-PKCS1-V1_5-VERIFY``;
it is specified in section 8.2.2 of RFC3447.
:Parameters:
msg_hash : hash object
The hash that was carried out over the message. This is an object
belonging to the `Cryptodome.Hash` module.
signature : byte string
The signature that needs to be validated.
:Raise ValueError:
if the signature is not valid.
"""
# See 8.2.2 in RFC3447
modBits = Cryptodome.Util.number.size(self._key.n)
k = ceil_div(modBits, 8) # Convert from bits to bytes
# Step 1
if len(signature) != k:
raise ValueError("Invalid signature")
# Step 2a (O2SIP)
signature_int = bytes_to_long(signature)
# Step 2b (RSAVP1)
em_int = self._key._encrypt(signature_int)
# Step 2c (I2OSP)
em1 = long_to_bytes(em_int, k)
# Step 3
try:
possible_em1 = [ _EMSA_PKCS1_V1_5_ENCODE(msg_hash, k, True) ]
# MD2/4/5 hashes always require NULL params in AlgorithmIdentifier.
# For all others, it is optional.
try:
algorithm_is_md = msg_hash.oid.startswith('1.2.840.113549.2.')
except AttributeError:
algorithm_is_md = False
if not algorithm_is_md: # MD2/MD4/MD5
possible_em1.append(_EMSA_PKCS1_V1_5_ENCODE(msg_hash, k, False))
except ValueError:
raise ValueError("Invalid signature")
# Step 4
# By comparing the full encodings (as opposed to checking each
# of its components one at a time) we avoid attacks to the padding
# scheme like Bleichenbacher's (see http://www.mail-archive.com/cryptography@metzdowd.com/msg06537).
#
if em1 not in possible_em1:
raise ValueError("Invalid signature")
pass
def _EMSA_PKCS1_V1_5_ENCODE(msg_hash, emLen, with_hash_parameters=True):
"""
Implement the ``EMSA-PKCS1-V1_5-ENCODE`` function, as defined
in PKCS#1 v2.1 (RFC3447, 9.2).
``_EMSA-PKCS1-V1_5-ENCODE`` actually accepts the message ``M`` as input,
and hash it internally. Here, we expect that the message has already
been hashed instead.
:Parameters:
msg_hash : hash object
The hash object that holds the digest of the message being signed.
emLen : int
The length the final encoding must have, in bytes.
with_hash_parameters : bool
If True (default), include NULL parameters for the hash
algorithm in the ``digestAlgorithm`` SEQUENCE.
:attention: the early standard (RFC2313) stated that ``DigestInfo``
had to be BER-encoded. This means that old signatures
might have length tags in indefinite form, which
is not supported in DER. Such encoding cannot be
reproduced by this function.
:Return: An ``emLen`` byte long string that encodes the hash.
"""
# First, build the ASN.1 DER object DigestInfo:
#
# DigestInfo ::= SEQUENCE {
# digestAlgorithm AlgorithmIdentifier,
# digest OCTET STRING
# }
#
# where digestAlgorithm identifies the hash function and shall be an
# algorithm ID with an OID in the set PKCS1-v1-5DigestAlgorithms.
#
# PKCS1-v1-5DigestAlgorithms ALGORITHM-IDENTIFIER ::= {
# { OID id-md2 PARAMETERS NULL }|
# { OID id-md5 PARAMETERS NULL }|
# { OID id-sha1 PARAMETERS NULL }|
# { OID id-sha256 PARAMETERS NULL }|
# { OID id-sha384 PARAMETERS NULL }|
# { OID id-sha512 PARAMETERS NULL }
# }
#
# Appendix B.1 also says that for SHA-1/-2 algorithms, the parameters
# should be omitted. They may be present, but when they are, they shall
# have NULL value.
digestAlgo = DerSequence([ DerObjectId(msg_hash.oid).encode() ])
if with_hash_parameters:
digestAlgo.append(DerNull().encode())
digest = DerOctetString(msg_hash.digest())
digestInfo = DerSequence([
digestAlgo.encode(),
digest.encode()
]).encode()
# We need at least 11 bytes for the remaining data: 3 fixed bytes and
# at least 8 bytes of padding).
if emLen<len(digestInfo)+11:
raise TypeError("Selected hash algorith has a too long digest (%d bytes)." % len(digest))
PS = bchr(0xFF) * (emLen - len(digestInfo) - 3)
return b("\x00\x01") + PS + bchr(0x00) + digestInfo
def new(rsa_key):
"""Return a signature scheme object `PKCS115_SigScheme` that
can create or verify PKCS#1 v1.5 signatures.
:Parameters:
rsa_key : RSA key object
The RSA key to use to sign or verify the message.
This is a `Cryptodome.PublicKey.RSA` object.
Signing is only possible if ``rsa_key`` is a private RSA key.
"""
return PKCS115_SigScheme(rsa_key)
| {
"content_hash": "b83bc012ce52361d49fda1188ddee7ad",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 108,
"avg_line_length": 37.3304347826087,
"alnum_prop": 0.5963195900302819,
"repo_name": "mchristopher/PokemonGo-DesktopMap",
"id": "3b75f044dbfe5c54c1a34883b090030fafdccde4",
"size": "10127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/pylibs/win32/Cryptodome/Signature/pkcs1_15.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "31739"
},
{
"name": "JavaScript",
"bytes": "53009"
},
{
"name": "Python",
"bytes": "13306214"
},
{
"name": "Shell",
"bytes": "4175"
}
],
"symlink_target": ""
} |
__author__ = "ymotongpoo <ymotongpoo@gmail.com>"
__date__ = "$2010/09/20 22:43:10$"
__version__ = "$Revision: 0.10"
__credits__ = "0x7d8 -- programming training"
from time import sleep
import os
import urllib
import re
from StringIO import StringIO
from lxml import etree
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
twitter_api = 'http://api.twitter.com/1/statuses/user_timeline.xml?'
default_options = {'screen_name':'ymotongpoo',
'trim_user':'false',
'include_rts':'true',
'include_entities':'true',
'max_id':'9'*1,
'since_id':'0',
'count':'200',
}
interval = 25 # interval sec. for each HTTP request
def update_option(**options):
"""
update GET options for Twitter API
@param options dictionary of options
"""
api_options = default_options
for k,v in option.iteritems():
if k in default_options:
api_options[k] = v
return api_options
def retreive_xml(**options):
"""
retreive timeline in xml format via twitter api
@param options dictionary of options
"""
get_query = []
for k, v in options.iteritems():
get_query.append(k + '=' + v)
url = twitter_api + '&'.join(get_query)
p = urllib.urlopen(url)
content = p.read()
return content
def minimum_id(tweets):
"""
find minimum id from xml
@param tweets retreived xml
"""
try:
tree = etree.parse(StringIO(tweets), etree.XMLParser())
statuses = tree.xpath('//statuses')
id_str = statuses[0].xpath('./status/id/text()')
ids = []
for id in id_str:
ids.append(int(id))
return str(min(ids))
except IndexError, e:
raise e
except ValueError, e:
raise e
def maximum_id(tweets):
"""
find maximum id from xml
@param tweets retreived xml
"""
try:
tree = etree.parse(StringIO(tweets), etree.XMLParser())
statuses = tree.xpath('//statuses')
id_str = statuses[0].xpath('./status/id/text()')
ids = []
for id in id_str:
ids.append(int(id))
return str(max(ids))
except IndexError, e:
raise e
except ValueError, e:
raise e
def delete_first_line(string):
"""
delete head line from assigned lines
@param lines string
"""
lines = string.split('\n')
return '\n'.join(lines[1:])
def sort_status_by_id(statuses):
"""
sort status by status id.
@param statuses list of statuses (all <status> elements in <statuses> tag)
"""
def status_cmp(x, y):
return id(x.xpath('./id/text()')) - id(x.xpath('./id/text()'))
st_list = statuses[0].xpath('./status')
st_list.sort(status_cmp)
return '\n'.join([st.tostring() for st in st_list])
def _past_retreiver(max_id):
options = default_options
if 'since_id' in options:
del options['since_id']
options['max_id'] = str(max_id)
print options
return retreive_xml(**options)
def _future_retreiver(since_id):
options = default_options
if 'max_id' in options:
del options['max_id']
options['since_id'] = str(since_id)
return retreive_xml(**options)
def runner(id = -1, filename = 'twitter.log', direction = 'past'):
"""
runner() retreives all tweets
"""
if os.path.isfile(filename):
fp = open(filename, 'a+')
else:
fp = open(filename, 'w+')
fp.write(xml_header)
fp.close()
fp = open(filename, 'a+')
try:
xml = 'initial string...'
if id==-1:
print '...done'
return False
else:
print direction + " : " + str(id)
if direction == 'past':
xml = _past_retreiver(id)
elif direction == 'future':
xml = _future_retreiver(id)
else:
return id
fp.write(xml)
fp.close()
min_id = minimum_id(xml)
print 'minimum id : ' + min_id
sleep(interval)
print 'passing ' + str(int(min_id)-1)
return int(min_id)-1
# Exception is for "Twitter is over capacity"
except IndexError, e:
print xml + ' -> ' + str(e)
fp.close()
sleep(interval)
return id
except ValueError, e:
print xml + ' -> ' + str(e)
fp.close()
sleep(interval)
return False
except Exception, e:
print xml + ' -> ' + str(e)
fp.close()
sleep(interval)
return id
if __name__ == '__main__':
ret = 99999999999
direction = 'past'
while (ret != False):
ret = runner(id = ret, direction = direction)
| {
"content_hash": "1f59b20aa91a4dbe86d904c0a876fc06",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 78,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.5390349968937668,
"repo_name": "ymotongpoo/restroom",
"id": "fd41169b9973166da9606e16040705b90bda4862",
"size": "4927",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "0x7d8/twitterxmllogger/twitterxmllogger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4513"
},
{
"name": "C++",
"bytes": "26009"
},
{
"name": "CSS",
"bytes": "1634"
},
{
"name": "D",
"bytes": "838"
},
{
"name": "Go",
"bytes": "11639"
},
{
"name": "HTML",
"bytes": "705490"
},
{
"name": "JavaScript",
"bytes": "10224"
},
{
"name": "Makefile",
"bytes": "4858"
},
{
"name": "OCaml",
"bytes": "10006"
},
{
"name": "Python",
"bytes": "224305"
},
{
"name": "Shell",
"bytes": "462"
},
{
"name": "TypeScript",
"bytes": "2406"
}
],
"symlink_target": ""
} |
import time
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from PyQt4.QtNetwork import *
# Class for Website-Rendering. Uses QWebPage, which
# requires a running QtGui to work.
class WebkitRenderer(QObject):
"""A class that helps to create 'screenshots' of webpages using
Qt's QWebkit. Requires PyQt4 library.
Use "render()" to get a 'QImage' object, render_to_bytes() to get the
resulting image as 'str' object or render_to_file() to write the image
directly into a 'file' resource.
These methods have to be called from within Qt's main (GUI) thread.
An example on how to use this is the __qt_main() method at the end
of the libraries source file. More generic examples:
def qt_main():
while go_on():
do_something_meaningful()
while QApplication.hasPendingEvents():
QApplication.processEvents()
QApplication.quit()
app = init_qtgui()
QTimer.singleShot(0, qt_main)
sys.exit(app.exec_())
Or let Qt handle event processing using a QTimer instance:
def qt_main_loop():
if not go_on():
QApplication.quit()
return
do_something_meaningful()
app = init_qtgui()
main_timer = QTimer()
QObject.connect(main_timer, QtCore.SIGNAL("timeout()"), qt_main_loop)
sys.exit(app.exec_())
Avaible properties:
width -- The width of the "browser" window. 0 means autodetect (default).
height -- The height of the window. 0 means autodetect (default).
timeout -- Seconds after that the request is aborted (default: 0)
wait -- Seconds to wait after loading has been finished (default: 0)
scaleToWidth -- The resulting image is scaled to this width.
scaleToHeight -- The resulting image is scaled to this height.
scaleRatio -- The image is scaled using this method. Possible values are:
keep
expand
crop
ignore
grabWhileWindow -- If this is True a screenshot of the whole window is taken. Otherwise only the current frame is rendered. This is required for plugins to be visible, but it is possible that another window overlays the current one while the screenshot is taken. To reduce this possibility, the window is activated just before it is rendered if this property is set to True (default: False).
qWebSettings -- Settings that should be assigned to the created QWebPage instance. See http://doc.trolltech.com/4.6/qwebsettings.html for possible keys. Defaults:
JavascriptEnabled: False
PluginsEnabled: False
PrivateBrowsingEnabled: True
JavascriptCanOpenWindows: False
"""
def __init__(self,**kwargs):
"""Sets default values for the properties."""
if not QApplication.instance():
raise RuntimeError(self.__class__.__name__ + " requires a running QApplication instance")
QObject.__init__(self)
# Initialize default properties
self.width = kwargs.get('width', 0)
self.height = kwargs.get('height', 0)
self.timeout = kwargs.get('timeout', 0)
self.wait = kwargs.get('wait', 0)
self.scaleToWidth = kwargs.get('scaleToWidth', 0)
self.scaleToHeight = kwargs.get('scaleToHeight', 0)
self.scaleRatio = kwargs.get('scaleRatio', 'keep')
self.format = kwargs.get('format', 'png')
self.logger = kwargs.get('logger', None)
# Set this to true if you want to capture flash.
# Not that your desktop must be large enough for
# fitting the whole window.
self.grabWholeWindow = kwargs.get('grabWholeWindow', False)
self.renderTransparentBackground = kwargs.get('renderTransparentBackground', False)
self.ignoreAlert = kwargs.get('ignoreAlert', True)
self.ignoreConfirm = kwargs.get('ignoreConfirm', True)
self.ignorePrompt = kwargs.get('ignorePrompt', True)
self.interruptJavaScript = kwargs.get('interruptJavaScript', True)
self.encodedUrl = kwargs.get('encodedUrl', False)
# Set some default options for QWebPage
self.qWebSettings = {
QWebSettings.JavascriptEnabled : False,
QWebSettings.PluginsEnabled : False,
QWebSettings.PrivateBrowsingEnabled : True,
QWebSettings.JavascriptCanOpenWindows : False
}
def render(self, url):
"""Renders the given URL into a QImage object"""
# We have to use this helper object because
# QApplication.processEvents may be called, causing
# this method to get called while it has not returned yet.
helper = _WebkitRendererHelper(self)
helper._window.resize( self.width, self.height )
image = helper.render(url)
# Bind helper instance to this image to prevent the
# object from being cleaned up (and with it the QWebPage, etc)
# before the data has been used.
image.helper = helper
return image
def render_to_file(self, url, file_object):
"""Renders the image into a File resource.
Returns the size of the data that has been written.
"""
format = self.format # this may not be constant due to processEvents()
image = self.render(url)
qBuffer = QBuffer()
image.save(qBuffer, format)
file_object.write(qBuffer.buffer().data())
return qBuffer.size()
def render_to_bytes(self, url):
"""Renders the image into an object of type 'str'"""
format = self.format # this may not be constant due to processEvents()
image = self.render(url)
qBuffer = QBuffer()
image.save(qBuffer, format)
return qBuffer.buffer().data()
class _WebkitRendererHelper(QObject):
"""This helper class is doing the real work. It is required to
allow WebkitRenderer.render() to be called "asynchronously"
(but always from Qt's GUI thread).
"""
def __init__(self, parent):
"""Copies the properties from the parent (WebkitRenderer) object,
creates the required instances of QWebPage, QWebView and QMainWindow
and registers some Slots.
"""
QObject.__init__(self)
# Copy properties from parent
for key,value in parent.__dict__.items():
setattr(self,key,value)
# Create and connect required PyQt4 objects
self._page = CustomWebPage(logger=self.logger, ignore_alert=self.ignoreAlert,
ignore_confirm=self.ignoreConfirm, ignore_prompt=self.ignorePrompt,
interrupt_js=self.interruptJavaScript)
self._view = QWebView()
self._view.setPage(self._page)
self._window = QMainWindow()
self._window.setCentralWidget(self._view)
# Import QWebSettings
for key, value in self.qWebSettings.iteritems():
self._page.settings().setAttribute(key, value)
# Connect required event listeners
self.connect(self._page, SIGNAL("loadFinished(bool)"), self._on_load_finished)
self.connect(self._page, SIGNAL("loadStarted()"), self._on_load_started)
self.connect(self._page.networkAccessManager(), SIGNAL("sslErrors(QNetworkReply *,const QList<QSslError>&)"), self._on_ssl_errors)
self.connect(self._page.networkAccessManager(), SIGNAL("finished(QNetworkReply *)"), self._on_each_reply)
# The way we will use this, it seems to be unesseccary to have Scrollbars enabled
self._page.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
self._page.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
self._page.settings().setUserStyleSheetUrl(QUrl("data:text/css,html,body{overflow-y:hidden !important;}"))
# Show this widget
self._window.show()
def __del__(self):
"""Clean up Qt4 objects. """
self._window.close()
del self._window
del self._view
del self._page
def render(self, url):
"""The real worker. Loads the page (_load_page) and awaits
the end of the given 'delay'. While it is waiting outstanding
QApplication events are processed.
After the given delay, the Window or Widget (depends
on the value of 'grabWholeWindow' is drawn into a QPixmap
and postprocessed (_post_process_image).
"""
self._load_page(url, self.width, self.height, self.timeout)
# Wait for end of timer. In this time, process
# other outstanding Qt events.
if self.wait > 0:
if self.logger: self.logger.debug("Waiting %d seconds " % self.wait)
waitToTime = time.time() + self.wait
while time.time() < waitToTime:
if QApplication.hasPendingEvents():
QApplication.processEvents()
if self.renderTransparentBackground:
# Another possible drawing solution
image = QImage(self._page.viewportSize(), QImage.Format_ARGB32)
image.fill(QColor(255,0,0,0).rgba())
# http://ariya.blogspot.com/2009/04/transparent-qwebview-and-qwebpage.html
palette = self._view.palette()
palette.setBrush(QPalette.Base, Qt.transparent)
self._page.setPalette(palette)
self._view.setAttribute(Qt.WA_OpaquePaintEvent, False)
painter = QPainter(image)
painter.setBackgroundMode(Qt.TransparentMode)
self._page.mainFrame().render(painter)
painter.end()
else:
if self.grabWholeWindow:
# Note that this does not fully ensure that the
# window still has the focus when the screen is
# grabbed. This might result in a race condition.
self._view.activateWindow()
image = QPixmap.grabWindow(self._window.winId())
else:
image = QPixmap.grabWidget(self._window)
return self._post_process_image(image)
def _load_page(self, url, width, height, timeout):
"""
This method implements the logic for retrieving and displaying
the requested page.
"""
# This is an event-based application. So we have to wait until
# "loadFinished(bool)" raised.
cancelAt = time.time() + timeout
self.__loading = True
self.__loadingResult = False # Default
if self.encodedUrl:
self._page.mainFrame().load(QUrl.fromEncoded(url))
else:
self._page.mainFrame().load(QUrl(url))
while self.__loading:
if timeout > 0 and time.time() >= cancelAt:
raise RuntimeError("Request timed out on %s" % url)
while QApplication.hasPendingEvents() and self.__loading:
QCoreApplication.processEvents()
if self.logger: self.logger.debug("Processing result")
if self.__loading_result == False:
if self.logger: self.logger.warning("Failed to load %s" % url)
# Set initial viewport (the size of the "window")
size = self._page.mainFrame().contentsSize()
if self.logger: self.logger.debug("contentsSize: %s", size)
if width > 0:
size.setWidth(width)
if height > 0:
size.setHeight(height)
self._window.resize(size)
def _post_process_image(self, qImage):
"""If 'scaleToWidth' or 'scaleToHeight' are set to a value
greater than zero this method will scale the image
using the method defined in 'scaleRatio'.
"""
if self.scaleToWidth > 0 or self.scaleToHeight > 0:
# Scale this image
if self.scaleRatio == 'keep':
ratio = Qt.KeepAspectRatio
elif self.scaleRatio in ['expand', 'crop']:
ratio = Qt.KeepAspectRatioByExpanding
else: # 'ignore'
ratio = Qt.IgnoreAspectRatio
qImage = qImage.scaled(self.scaleToWidth, self.scaleToHeight, ratio)
if self.scaleRatio == 'crop':
qImage = qImage.copy(0, 0, self.scaleToWidth, self.scaleToHeight)
return qImage
def _on_each_reply(self,reply):
"""Logs each requested uri"""
self.logger.debug("Received %s" % (reply.url().toString()))
# Eventhandler for "loadStarted()" signal
def _on_load_started(self):
"""Slot that sets the '__loading' property to true."""
if self.logger: self.logger.debug("loading started")
self.__loading = True
# Eventhandler for "loadFinished(bool)" signal
def _on_load_finished(self, result):
"""Slot that sets the '__loading' property to false and stores
the result code in '__loading_result'.
"""
if self.logger: self.logger.debug("loading finished with result %s", result)
self.__loading = False
self.__loading_result = result
# Eventhandler for "sslErrors(QNetworkReply *,const QList<QSslError>&)" signal
def _on_ssl_errors(self, reply, errors):
"""Slot that writes SSL warnings into the log but ignores them."""
for e in errors:
if self.logger: self.logger.warn("SSL: " + e.errorString())
reply.ignoreSslErrors()
class CustomWebPage(QWebPage):
def __init__(self, **kwargs):
super(CustomWebPage, self).__init__()
self.logger = kwargs.get('logger', None)
self.ignore_alert = kwargs.get('ignore_alert', True)
self.ignore_confirm = kwargs.get('ignore_confirm', True)
self.ignore_prompt = kwargs.get('ignore_prompt', True)
self.interrupt_js = kwargs.get('interrupt_js', True)
def javaScriptAlert(self, frame, message):
if self.logger: self.logger.debug('Alert: %s', message)
if not self.ignore_alert:
return super(CustomWebPage, self).javaScriptAlert(frame, message)
def javaScriptConfirm(self, frame, message):
if self.logger: self.logger.debug('Confirm: %s', message)
if not self.ignore_confirm:
return super(CustomWebPage, self).javaScriptConfirm(frame, message)
else:
return False
def javaScriptPrompt(self, frame, message, default, result):
"""This function is called whenever a JavaScript program running inside frame tries to prompt
the user for input. The program may provide an optional message, msg, as well as a default value
for the input in defaultValue.
If the prompt was cancelled by the user the implementation should return false;
otherwise the result should be written to result and true should be returned.
If the prompt was not cancelled by the user, the implementation should return true and
the result string must not be null.
"""
if self.logger: self.logger.debug('Prompt: %s (%s)' % (message, default))
if not self.ignore_prompt:
return super(CustomWebPage, self).javaScriptPrompt(frame, message, default, result)
else:
return False
def shouldInterruptJavaScript(self):
"""This function is called when a JavaScript program is running for a long period of time.
If the user wanted to stop the JavaScript the implementation should return true; otherwise false.
"""
if self.logger: self.logger.debug("WebKit ask to interrupt JavaScript")
return self.interrupt_js
| {
"content_hash": "c1aa73666bf79e1ecbb5b804d2078644",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 395,
"avg_line_length": 43.15126050420168,
"alnum_prop": 0.6400519311911717,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "8e7d3c5fb0cdaaec4d2c3bbd0480baefec77b908",
"size": "16386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Dataset/python/webkit2png.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
__updated__ = "2016-05-10"
import json
from protoExt.models import CustomDefinition, ViewDefinition
from protoExt.views import validateRequest
from protoExt.utils.utilsWeb import JsonError
from protoExt.views.protoGetPci import getBasePci
from protoLib.getStuff import getDjangoModel
from protoExt.utils.utilsBase import list2dict
DOCUMENTS = ('Artefact', 'Capacity', 'Requirement')
def doBuildRaiConfig(request, queryset):
"""
Build Rai Config
"""
cBase, msgReturn = validateRequest(request)
if msgReturn:
return msgReturn
# Do Menu
retSt, msgReturn = doBuildRaiMenu(cBase, queryset)
if not retSt:
return JsonError(msgReturn)
# Do single documents Pci's
retSt, msgReturn = doSingleDocsMeta(cBase, queryset)
if not retSt:
return JsonError(msgReturn)
# Do tree documents Pci's
retSt, msgReturn = doTreeDocsMeta(cBase)
if not retSt:
return JsonError(msgReturn)
return {'success': True, 'message': 'Ok'}
def doFinalDetails(cBase, document, docFields):
# Details config
doDetailsConf(cBase, document)
# Form config
doFormConf(cBase, document, docFields)
# Update definition
cBase.protoDef.metaDefinition = cBase.protoMeta
cBase.protoDef.description = cBase.protoMeta['description']
cBase.protoDef.save()
def doTreeDocsMeta(cBase):
viewIcon = 'icon-tree'
for document in DOCUMENTS:
cBase.viewCode = 'rai01ref.{0}.tree'.format(document)
cBase.viewEntity = 'rai01ref.{0}'.format(document)
try:
cBase.model = getDjangoModel(cBase.viewCode)
getBasePci(cBase, False, True)
except:
return False, 'model not found: {0}'.format(cBase.viewCode)
# DocType conf
docFields = list2dict(cBase.protoMeta['fields'], 'name')
docFields['docType']['zoomFilter'] = "document, ={0}".format( document )
# Get Dopcument info fields from document definition rai01ref
docFields = cBase.model.getJfields(None, document)[0]
for lKey in docFields.keys():
cBase.protoMeta['fields'].append(docFields[lKey])
# Add IconField
cBase.protoMeta['fields'].append({"name": "iconCls", "crudType": "readOnly", })
# Tree Config and Form selector
cBase.protoMeta.update({
"pciStyle": "tree",
"treeRefField": "ref{0}".format(document),
"formSelector": "docType_id",
"jsonField": "info",
"description": 'Tree {0}'.format(document),
"viewIcon": viewIcon,
})
# do User interface and save
doFinalDetails(cBase, document, {})
return True, ''
def doSingleDocsMeta(cBase, queryset):
for pDoc in queryset:
idType = str(pDoc.pk)
cBase.viewCode = 'rai01ref.{0}.{1}'.format(pDoc.document, idType)
cBase.viewEntity = 'rai01ref.{0}'.format(pDoc.document)
shortTitle = pDoc.dtype
try:
cBase.model = getDjangoModel(cBase.viewCode)
getBasePci(cBase, False, True)
except:
return False, 'model not found: {0}'.format(cBase.viewCode)
# DocType conf
docFields = list2dict(cBase.protoMeta['fields'], 'name')
docFields['docType_id']['prpDefault'] = idType
docFields['docType']['prpDefault'] = shortTitle
docFields['docType']['zoomFilter'] = "document, ={0}".format( pDoc.document )
cBase.protoMeta['gridConfig']['baseFilter'].append({'property': 'docType', 'filterStmt': '=' + idType})
# varias
cBase.protoMeta['jsonField'] = "info"
cBase.protoMeta['shortTitle'] = shortTitle
cBase.protoMeta['description'] = '{0}: {1}'.format(pDoc.document, shortTitle)
# Get Dopcument info fields from instance definition rai01ref
docFields = cBase.model.getJfields(idType)[0]
for lKey in docFields.keys():
cBase.protoMeta['fields'].append(docFields[lKey])
# do User interface and save
doFinalDetails(cBase, pDoc.document, docFields)
return True, ''
def doBuildRaiMenu(cBase, queryset):
#-- RAI Auto Menu ( documents and selected documents )
lMenu = {}
Ix = 0
# Trees
viewIcon = 'icon-tree'
for document in DOCUMENTS:
viewCode = 'rai01ref.{0}.{1}'.format(document, 'tree')
lMenu[viewCode] = {
'viewCode': viewCode,
'text': document,
'index': Ix,
'iconCls': viewIcon,
'leaf': True,
}
Ix += 1
# Documents config
for document in DOCUMENTS:
viewIcon = 'rai_{}'.format(document[:3].lower())
lMenu[document] = {
'text': document,
'expanded': True,
'index': Ix,
'iconCls': viewIcon,
'leaf': False,
'children': [],
}
Ix += 1
for pDoc in queryset:
viewCode = 'rai01ref.{0}.{1}'.format(pDoc.document, str(pDoc.pk))
viewIcon = 'rai_{}'.format(pDoc.__str__())
model_dict = {
'viewCode': viewCode,
'text': pDoc.dtype,
'index': Ix,
'iconCls': viewIcon,
'leaf': True,
}
Ix += 1
lMenu[pDoc.document]['children'].append(model_dict)
# Delete previous definition
ViewDefinition.objects.filter(code=viewCode).delete()
#-- Update Menu in customDefinition --------------------------------------
viewCode = '__menu'
protoDef = CustomDefinition.objects.get_or_create(
code=viewCode, smOwningTeam=cBase.userProfile.userTeam,
defaults={'active': False, 'code': viewCode,
'smOwningTeam': cBase.userProfile.userTeam}
)[0]
# El default solo parece funcionar al insertar en la Db
if not protoDef.active:
return False, 'Menu not found'
menuData = protoDef.metaDefinition
#-- Update Db ------------------------------------------------------
try:
raiMenu = menuData[0]
if str(raiMenu['text']) == str('RAI MENU'):
raiMenu = menuData.pop(0)
except:
pass
raiMenu = {
'text': 'RAI MENU',
'expanded': False,
'index': 0,
'leaf': False,
'children': [],
}
for lKey in lMenu.keys():
raiMenu['children'].append(lMenu[lKey])
menuData.insert(0, raiMenu)
protoDef.metaDefinition = json.dumps(menuData)
protoDef.save()
return True, ''
def doDetailsConf(cBase, document):
if document == 'Capacity':
cBase.protoMeta["detailsConfig"] = [{
"detailName": "artefactcapacity",
"menuText": "Artefacts",
"masterField": "pk",
"detailField": "capacity__pk",
"conceptDetail": "rai01ref.ArtefactCapacity",
}, {
"detailName": "projectcapacity",
"menuText": "Projects",
"masterField": "pk",
"detailField": "capacity__pk",
"conceptDetail": "rai01ref.ProjectCapacity",
}, {
"detailName": "copyto",
"menuText": "Copies",
"masterField": "pk",
"detailField": "copyFrom_id",
"conceptDetail": "rai01ref.Capacity",
}]
elif document == 'Requirement':
cBase.protoMeta["detailsConfig"] = [{
"menuText": "Artefacts",
"detailName": "artefactrequirement",
"masterField": "pk",
"detailField": "requirement__pk",
"conceptDetail": "rai01ref.ArtefactRequirement",
}, {
"menuText": "Projects",
"detailName": "projectrequirement",
"masterField": "pk",
"detailField": "requirement__pk",
"conceptDetail": "rai01ref.ProjectRequirement",
}, {
"menuText": "Copies",
"detailName": "copyto",
"masterField": "pk",
"detailField": "copyFrom_id",
"conceptDetail": "rai01ref.Requirement",
}]
elif document == 'Artefact':
cBase.protoMeta["detailsConfig"] = [{
"menuText": "Composition",
"detailField": "containerArt__pk",
"conceptDetail": "rai01ref.ArtefactComposition",
"masterField": "pk",
"detailName": "artefactcomposition.containerArt",
}, {
"menuText": "Requirements",
"detailField": "artefact__pk",
"conceptDetail": "rai01ref.ArtefactRequirement",
"masterField": "pk",
"detailName": "artefactrequirement.artefact",
}, {
"menuText": "Capacities",
"detailField": "artefact__pk",
"conceptDetail": "rai01ref.ArtefactCapacity",
"masterField": "pk",
"detailName": "artefactcapacity.artefact",
}, {
"menuText": "Projects",
"detailField": "artefact__pk",
"conceptDetail": "rai01ref.ProjectArtefact",
"masterField": "pk",
"detailName": "projectartefact.artefact",
}, {
"menuText": "Sources",
"detailField": "artefact__pk",
"conceptDetail": "rai01ref.ArtefactSource",
"masterField": "pk",
"detailName": "artefactsource.artefact",
}, {
"menuText": "Copies",
"detailName": "copyto",
"masterField": "pk",
"detailField": "copyFrom_id",
"conceptDetail": "rai01ref.Artefact",
}]
def doFormConf(cBase, document, docFields):
udfs = []
if document == 'Artefact':
udfs.append({'__ptType': 'formField', "name": "capacity"})
udfs.append({'__ptType': 'formField', "name": "requirement"})
for lKey in docFields.keys():
udfs.append({'__ptType': 'formField', "name": lKey})
cBase.protoMeta["formConfig"] = {
"items": [
{
'__ptType': 'fieldset',
"fsLayout": "2col",
"items": [
{'__ptType': 'formField', "name": "code"},
{'__ptType': 'formField', "name": "docType"},
{'__ptType': 'formField',
"name": "description", "prpLength": "1", },
{'__ptType': 'formField', "name": "ref{0}".format(document)},
{'__ptType': 'formField', "name": "copyFrom"}
],
},
{
'__ptType': 'fieldset',
"fsLayout": "2col",
"items": udfs,
},
{
'__ptType': 'fieldset',
"fsLayout": "2col",
"title": "Admin",
"collapsible": True,
"collapsed": True,
"items": [
{'__ptType': 'formField', "name": "smOwningUser"},
{'__ptType': 'formField', "name": "smOwningTeam"},
{'__ptType': 'formField', "name": "smCreatedBy"},
{'__ptType': 'formField', "name": "smModifiedOn"},
{'__ptType': 'formField', "name": "smCreatedOn"},
{'__ptType': 'formField', "name": "smModifiedBy"},
],
}
]
}
| {
"content_hash": "b665482bd12e9d67d48c9f2b94d926af",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 111,
"avg_line_length": 31.475,
"alnum_prop": 0.5384343835495543,
"repo_name": "DarioGT/docker-carra",
"id": "3354045b186f4c1b4a36dd3ead49640edf209fa0",
"size": "11355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rai01ref/actions/buildRaiMenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64175"
},
{
"name": "Dockerfile",
"bytes": "739"
},
{
"name": "HTML",
"bytes": "14125"
},
{
"name": "JavaScript",
"bytes": "21266785"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "851053"
},
{
"name": "Shell",
"bytes": "2934"
},
{
"name": "Visual Basic",
"bytes": "7788"
}
],
"symlink_target": ""
} |
"""Executed when package directory is called as a script"""
from .dlcli import main
main()
| {
"content_hash": "c395fbf2e98a2777b7f14ddde97fc3b7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 59,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "dataloop/dlcli",
"id": "5713fce1586ea572241f7b6548ca3ce773abe0cf",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlcli/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "94913"
}
],
"symlink_target": ""
} |
'''
validation.py
Created by Thomas Mangin on 2013-03-18.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
'''
__all__ = ["validation", "ValidationError"]
FORMAT = 3
DEBUG = False
from collections import deque
from collections import OrderedDict
from exabgp.data import check
TYPE=check.TYPE
PRESENCE=check.PRESENCE
class ValidationError (Exception):
internal_error = 'invalid configuration definition (internal error)'
mandatory_error = 'missing mandatory configuration field'
type_error = 'the data for this configuration option is not what was expected'
configuration_error = 'the configuration is missing this information'
conflicting_error = 'the configuration has conflicting information'
def __init__ (self,location,message):
self.location = location
self.message = message
def __str__ (self):
location = ','.join(self.location) if self.location else 'root'
return 'location ' + location + ' : ' + self.message
_attributes = OrderedDict((
('next-hop', (TYPE.string, PRESENCE.optional, '', check.ipv4)),
('origin' , (TYPE.string, PRESENCE.optional, '', ['igp','egp','incomplete'])),
('as-path' , (TYPE.array, PRESENCE.optional, '', check.aspath)),
('as-sequence' , (TYPE.array, PRESENCE.optional, '', check.assequence)),
('local-preference', (TYPE.integer, PRESENCE.optional, '', check.localpreference)),
('med', (TYPE.integer, PRESENCE.optional, '', check.med)),
('aggregator' , (TYPE.string , PRESENCE.optional, '', check.ipv4)),
('aggregator-id' , (TYPE.string , PRESENCE.optional, '', check.ipv4)),
('atomic-aggregate' , (TYPE.boolean , PRESENCE.optional, '', check.nop)),
('community' , (TYPE.array , PRESENCE.optional, '', check.community)),
('extended-community' , (TYPE.array , PRESENCE.optional, '', check.extendedcommunity)),
('aigp', (TYPE.integer, PRESENCE.optional, '', check.aigp)),
('label' , (TYPE.array , PRESENCE.optional, '', check.label)),
('cluster-list' , (TYPE.array , PRESENCE.optional, '', check.clusterlist)),
('originator-id' , (TYPE.string , PRESENCE.optional, '', check.originator)),
('path-information' , (TYPE.string|TYPE.integer , PRESENCE.optional, '', check.pathinformation)),
('route-distinguisher' , (TYPE.string , PRESENCE.optional, '', check.distinguisher)),
('split' , (TYPE.integer , PRESENCE.optional, '', check.split)),
('watchdog' , (TYPE.string , PRESENCE.optional, '', check.watchdog)),
('withdrawn' , (TYPE.boolean , PRESENCE.optional, '', check.nop)),
))
_definition = (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('exabgp' , (TYPE.integer, PRESENCE.mandatory, '', [FORMAT,])),
('neighbor' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('tcp' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('bind' , (TYPE.string, PRESENCE.mandatory, '', check.ip)),
('connect' , (TYPE.string, PRESENCE.mandatory, '', check.ip)),
('ttl-security' , (TYPE.integer, PRESENCE.optional, '', check.uint8)),
('md5' , (TYPE.string, PRESENCE.optional, '', check.md5)),
('passive' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
('api' , (TYPE.object, PRESENCE.optional, 'api', OrderedDict((
('<*>' , (TYPE.array, PRESENCE.mandatory, '', ['neighbor-changes','send-packets','receive-packets','receive-routes'])),
)))),
('session' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('router-id' , (TYPE.string, PRESENCE.mandatory, '', check.ipv4)),
('hold-time' , (TYPE.integer, PRESENCE.mandatory, '', check.uint16)),
('asn' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('local' , (TYPE.integer, PRESENCE.mandatory, '', check.uint32)),
('peer' , (TYPE.integer, PRESENCE.mandatory, '', check.uint32)),
)))),
('feature' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('updates' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('group' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('flush' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
('rib' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('adj-rib-out' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
)))),
('capability' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('family' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('ipv4' , (TYPE.array, PRESENCE.optional, '', ['unicast','multicast','nlri-mpls','mpls-vpn','flow-vpn','flow'])),
('ipv6' , (TYPE.array, PRESENCE.optional, '', ['unicast','flow'])),
('alias' , (TYPE.string, PRESENCE.optional, '', ['all','minimal'])),
)))),
('asn4' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('route-refresh' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('graceful-restart' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('multi-session' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('add-path' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('aigp' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
)))),
('announce' , (TYPE.array, PRESENCE.optional, ['update,prefix','update,flow'], check.string)),
)))),
)))),
('api' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('encoder' , (TYPE.string, PRESENCE.optional, '', ['json','text'])),
('program' , (TYPE.string, PRESENCE.mandatory, '', check.nop)),
)))),
)))),
('attribute' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', _attributes)),
)))),
('flow' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('filtering-condition' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('source' , (TYPE.array|TYPE.string, PRESENCE.optional, '', check.flow_ipv4_range)),
('destination' , (TYPE.array|TYPE.string, PRESENCE.optional, '', check.flow_ipv4_range)),
('port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('source-port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('destination-port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('protocol' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['udp','tcp'])), # and value of protocols ...
('packet-length' , (TYPE.array, PRESENCE.optional, '', check.flow_length)),
('packet-fragment' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['not-a-fragment', 'dont-fragment', 'is-fragment', 'first-fragment', 'last-fragment'])),
('icmp-type' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['unreachable', 'echo-request', 'echo-reply'])),
# TODO : missing type
('icmp-code' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['host-unreachable', 'network-unreachable'])),
# TODO : missing code
('tcp-flags' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['fin', 'syn', 'rst', 'push', 'ack', 'urgent'])),
('dscp' , (TYPE.array|TYPE.integer, PRESENCE.optional, '', check.dscp)),
# TODO: MISSING SOME MORE ?
)))),
)))),
('filtering-action' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('rate-limit' , (TYPE.integer, PRESENCE.optional, '', check.float)),
('discard' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('redirect' , (TYPE.string, PRESENCE.optional, '', check.redirect)),
('community' , (TYPE.array , PRESENCE.optional, '', check.community)),
('extended-community' , (TYPE.array , PRESENCE.optional, '', check.extendedcommunity)),
)))),
)))),
)))),
('update' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('prefix' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, 'attribute', OrderedDict(( # name of route
('<*>' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict(( # name of attributes referenced
('<*>' , (TYPE.object, PRESENCE.optional, '', _attributes)), # prefix
)))),
)))),
)))),
('flow' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, 'flow,filtering-condition', OrderedDict(( # name of the dos
('<*>' , (TYPE.string, PRESENCE.mandatory, 'flow,filtering-action', check.nop)),
)))),
)))),
)))),
)))
# Lookup in the definition all the keyword we used to make sure that users can not use them
# This allows us to be able to index on those words and to be sure of the underlying data
_reserved_keywords = set()
def _reserved (reserved_keywords,definition):
kind,_,_,od = definition
if kind & TYPE.object:
for key in od:
reserved_keywords.update([key])
_reserved(reserved_keywords,od[key])
_reserved(_reserved_keywords,_definition)
# Name are are long string and cause high memory usage use integer instead
# regenreate the _definition with indexes
_indexes_byname = dict()
_indexes_byid = dict()
for index,name in enumerate(_reserved_keywords):
_indexes_byname[name] = index
_indexes_byid[id] = name
# TODO: Now need to rewrite the whole definiton to use the indexes
# TODO: and update the reference to do to the lookup in _indexes_by...
# check that the configuration has the reference
def _reference (root,references,json,location):
if not references:
return
ref = references if check.array(references) else [references,]
jsn = json if check.array(json) else json.keys() if check.object(json) else [json,]
valid = []
for reference in ref:
compare = root
for path in reference.split(','):
compare = compare.get(path,{})
# prevent name conflict where we can not resolve which object is referenced.
add = compare.keys()
for k in add:
if k in valid:
raise ValidationError(location, "duplicate reference in " % ', '.join(references))
return False
valid.extend(add)
for option in jsn:
if not option in valid:
destination = ' or '.join(references) if type(references) == type ([]) else references
raise ValidationError(location, "the referenced data in %s is not present" % destination)
return True
def _validate (root,json,definition,location=[]):
kind,presence,references,contextual = definition
# ignore missing optional elements
if not json:
if presence == PRESENCE.mandatory:
raise ValidationError(location, ValidationError.mandatory_error)
return
# check that the value of the right type
if not check.kind(kind,json):
raise ValidationError(location, ValidationError.type_error)
# for object check all the elements inside
if kind & TYPE.object and check.object(json):
subdefinition = contextual
keys = deque(subdefinition.keys())
while keys:
key = keys.popleft()
if DEBUG: print " "*len(location) + key
if key.startswith('_'):
continue
if type(json) != type({}):
raise ValidationError(location, ValidationError.type_error)
if key == '<*>':
keys.extendleft(json.keys())
continue
_reference (root,references,json,location)
star = subdefinition.get('<*>',None)
subtest = subdefinition.get(key,star)
if subtest is None:
raise ValidationError(location, ValidationError.configuration_error)
_validate(root,json.get(key,None),subtest,location + [key])
# for list check all the element inside
elif kind & TYPE.array and check.array(json):
test = contextual
# This is a function
if hasattr(test, '__call__'):
for data in json:
if not test(data):
raise ValidationError(location, ValidationError.type_error)
# This is a list of valid option
elif type(test) == type([]):
for data in json:
if not data in test:
raise ValidationError(location, ValidationError.type_error)
# no idea what the data is - so something is wrong with the program
else:
raise ValidationError(location,ValidationError.internal_error)
# for non container object check the value
else:
test = contextual
# check that the value of the data
if hasattr(test, '__call__'):
if not test(json):
raise ValidationError(location, ValidationError.type_error)
# a list of valid option
elif type(test) == type([]):
if not json in test:
raise ValidationError(location, ValidationError.type_error)
else:
raise ValidationError(location,ValidationError.internal_error)
_reference (root,references,json,location)
def _inet (json):
conflicts = {
'alias': ['inet','inet4','inet6'],
'inet': ['inet4','inet6']
}
for name in json['neighbor']:
inet = [_ for _ in json['neighbor'][name]['session']['capability']['family'].keys() if not _.startswith('_')]
for conflict in conflicts:
if conflict in inet:
raise ValidationError(['neighbor',name,'session','capability','family'], ValidationError.conflicting_error)
def validation (json):
_validate(json,json,_definition)
_inet(json)
| {
"content_hash": "108eaaff04dc29ddb02f7c1ccd1d3c38",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 161,
"avg_line_length": 42.11842105263158,
"alnum_prop": 0.6611215245235864,
"repo_name": "jbfavre/exabgp",
"id": "7457b9da9c91a4c34d750ad0ff0af4fef547c2c9",
"size": "12822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/test/validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "838070"
},
{
"name": "Shell",
"bytes": "17096"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from aldryn_apphooks_config.managers.base import ManagerMixin, QuerySetMixin
try:
from parler.managers import TranslatableManager, TranslatableQuerySet
except ImportError:
raise ImportError(
'Parler can not be found. Use pip install '
'aldryn-apphooks-config[parler] or just install django-parler.'
)
class AppHookConfigTranslatableQueryset(TranslatableQuerySet, QuerySetMixin):
def create(self, **kwargs):
# Pass language setting to the object, as people start assuming
# things like .language('xx').create(..) which is a nice API
# after all.
#
# TODO: this create is copy of TranslatableQuerySet.create which
# in someway is not called when using .language('en').create(..)
# and instead is called Django Manager.create. I not figured why
# it is acting like that.
if self._language:
kwargs['_current_language'] = self._language
return super(TranslatableQuerySet, self).create(**kwargs)
class AppHookConfigTranslatableManager(TranslatableManager, ManagerMixin):
"""
Manager intended to use in TranslatableModels that has relations
to apphooks configs. Add the namespace method to manager and queryset
that should be used to filter objects by it namespace.
"""
queryset_class = AppHookConfigTranslatableQueryset
def get_queryset(self):
return self.queryset_class(self.model, using=self.db)
| {
"content_hash": "a092632d825a7362c3d83f63b3ce55f2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 38.35,
"alnum_prop": 0.711212516297262,
"repo_name": "Venturi/oldcms",
"id": "3c7d8b78491a2808c81eed5e365a0807e31fe658",
"size": "1558",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/aldryn_apphooks_config/managers/parler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "418090"
},
{
"name": "HTML",
"bytes": "467117"
},
{
"name": "JavaScript",
"bytes": "916100"
},
{
"name": "PHP",
"bytes": "2231"
},
{
"name": "Python",
"bytes": "15786894"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "3743"
},
{
"name": "XSLT",
"bytes": "157892"
}
],
"symlink_target": ""
} |
"""Proximal stochastic dual coordinate ascent optimizer for linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import threading
import uuid
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.load_library import load_op_library
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits
from tensorflow.python.platform import resource_loader
__all__ = ['SdcaModel']
_sdca_ops = None
_sdca_ops_lock = threading.Lock()
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def _maybe_load_sdca_ops():
with _sdca_ops_lock:
global _sdca_ops
if not _sdca_ops:
_sdca_ops = load_op_library(os.path.join(
resource_loader.get_data_files_path(), '_sdca_ops.so'))
assert _sdca_ops, 'Could not load _sdca_ops.so'
# TODO(rohananil): add op_scope to appropriate methods.
class SdcaModel(object):
"""Stochastic dual coordinate ascent solver for linear models.
This class currently only supports a single machine (multi-threaded)
implementation. We expect the weights and duals to fit in a single machine.
Loss functions supported:
* Binary logistic loss
* Squared loss
* Hinge loss
This class defines an optimizer API to train a linear model.
### Usage
```python
# Create a solver with the desired parameters.
lr = tf.contrib.linear_optimizer.SdcaModel(
container, examples, variables, options)
opt_op = lr.minimize()
predictions = lr.predictions(examples)
# Primal loss + L1 loss + L2 loss.
regularized_loss = lr.regularized_loss(examples)
# Primal loss only
unregularized_loss = lr.unregularized_loss(examples)
container: Name of the container (eg a hex-encoded UUID) where internal
state of the optimizer can be stored. The container can be safely shared
across many models.
examples: {
sparse_features: list of SparseTensors of value type float32.
dense_features: list of dense tensors of type float32.
example_labels: a tensor of type float32 and shape [Num examples]
example_weights: a tensor of type float32 and shape [Num examples]
example_ids: a tensor of type string and shape [Num examples]
}
variables: {
sparse_features_weights: list of tensors of shape [vocab size]
dense_features_weights: list of tensors of shape [1]
}
options: {
symmetric_l1_regularization: 0.0
symmetric_l2_regularization: 1.0
loss_type: "logistic_loss"
}
```
In the training program you will just have to run the returned Op from
minimize(). You should also eventually cleanup the temporary state used by
the model, by resetting its (possibly shared) container.
```python
# Execute opt_op and train for num_steps.
for _ in xrange(num_steps):
opt_op.run()
# You can also check for convergence by calling
# lr.approximate_duality_gap()
```
"""
def __init__(self, container, examples, variables, options):
"""Create a new sdca optimizer."""
_maybe_load_sdca_ops()
if not container or not examples or not variables or not options:
raise ValueError('All arguments must be specified.')
supported_losses = ('logistic_loss', 'squared_loss', 'hinge_loss')
if options['loss_type'] not in supported_losses:
raise ValueError('Unsupported loss_type: ', options['loss_type'])
self._assertSpecified(
['example_labels', 'example_weights', 'example_ids', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
self._assertSpecified(
['sparse_features_weights', 'dense_features_weights'], variables)
self._assertList(
['sparse_features_weights', 'dense_features_weights'], variables)
self._assertSpecified(
['loss_type', 'symmetric_l2_regularization',
'symmetric_l1_regularization'], options)
for name in ['symmetric_l1_regularization', 'symmetric_l2_regularization']:
value = options[name]
if value < 0.0:
raise ValueError('%s should be non-negative. Found (%f)' %
(name, value))
self._container = container
self._examples = examples
self._variables = variables
self._options = options
self._solver_uuid = uuid.uuid4().hex
self._create_slots()
def _symmetric_l2_regularization(self):
# Algorithmic requirement (for now) is to have minimal l2 of 1.0
return max(self._options['symmetric_l2_regularization'], 1.0)
# TODO(rohananil): Use optimizer interface to make use of slot creation logic.
def _create_slots(self):
# Make internal variables which have the updates before applying L1
# regularization.
self._slots = {
'unshrinked_sparse_features_weights': [],
'unshrinked_dense_features_weights': [],
}
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
self._slots['unshrinked_' + name].append(var_ops.Variable(
array_ops.zeros_like(var.initialized_value(), dtypes.float32)))
def _assertSpecified(self, items, check_in):
for x in items:
if check_in[x] is None:
raise ValueError(check_in[x] + ' must be specified.')
def _assertList(self, items, check_in):
for x in items:
if not isinstance(check_in[x], list):
raise ValueError(x + ' must be a list.')
def _l1_loss(self):
"""Computes the l1 loss of the model."""
with name_scope('l1_loss'):
sum = 0.0
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
sum += math_ops.reduce_sum(math_ops.abs(weights))
# SDCA L1 regularization cost is: l1 * sum(|weights|)
return self._options['symmetric_l1_regularization'] * sum
def _l2_loss(self, l2):
"""Computes the l2 loss of the model."""
with name_scope('l2_loss'):
sum = 0.0
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
sum += math_ops.reduce_sum(math_ops.square(weights))
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
return l2 * sum / 2
def _convert_n_to_tensor(self, input_list, as_ref=False):
"""Converts input list to a set of tensors."""
return [convert_to_tensor(x, as_ref=as_ref) for x in input_list]
def _linear_predictions(self, examples):
"""Returns predictions of the form w*x."""
with name_scope('sdca/prediction'):
sparse_variables = self._convert_n_to_tensor(self._variables[
'sparse_features_weights'])
predictions = 0
for st_i, sv in zip(examples['sparse_features'], sparse_variables):
ei, fi = array_ops.split(1, 2, st_i.indices)
ei = array_ops.reshape(ei, [-1])
fi = array_ops.reshape(fi, [-1])
fv = array_ops.reshape(st_i.values, [-1])
# TODO(rohananil): This does not work if examples have empty features.
predictions += math_ops.segment_sum(
math_ops.mul(
array_ops.gather(sv, fi), fv), array_ops.reshape(ei, [-1]))
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
for i in range(len(dense_variables)):
predictions += dense_features[i] * dense_variables[i]
return predictions
def predictions(self, examples):
"""Add operations to compute predictions by the model.
If logistic_loss is being used, predicted probabilities are returned.
Otherwise, (raw) linear predictions (w*x) are returned.
Args:
examples: Examples to compute predictions on.
Returns:
An Operation that computes the predictions for examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_weights', 'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
predictions = self._linear_predictions(examples)
if self._options['loss_type'] == 'logistic_loss':
# Convert logits to probability for logistic loss predictions.
with name_scope('sdca/logistic_prediction'):
predictions = math_ops.sigmoid(predictions)
return predictions
def minimize(self, global_step=None, name=None):
"""Add operations to train a linear model by minimizing the loss function.
Args:
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables passed in the constructor.
"""
# Technically, the op depends on a lot more than the variables,
# but we'll keep the list short.
with op_scope([], name, 'sdca/minimize'):
sparse_features_indices = []
sparse_features_values = []
for sf in self._examples['sparse_features']:
sparse_features_indices.append(convert_to_tensor(sf.indices))
sparse_features_values.append(convert_to_tensor(sf.values))
step_op = _sdca_ops.sdca_solver(
sparse_features_indices,
sparse_features_values,
self._convert_n_to_tensor(self._examples['dense_features']),
convert_to_tensor(self._examples['example_weights']),
convert_to_tensor(self._examples['example_labels']),
convert_to_tensor(self._examples['example_ids']),
self._convert_n_to_tensor(
self._slots['unshrinked_sparse_features_weights'],
as_ref=True),
self._convert_n_to_tensor(
self._slots['unshrinked_dense_features_weights'],
as_ref=True),
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization(),
# TODO(rohananil): Provide empirical evidence for this. It is better
# to run more than one iteration on single mini-batch as we want to
# spend more time in compute. SDCA works better with larger
# mini-batches and there is also recent work that shows its better to
# reuse old samples than train on new samples.
# See: http://arxiv.org/abs/1602.02136.
num_inner_iterations=2,
loss_type=self._options['loss_type'],
container=self._container,
solver_uuid=self._solver_uuid)
with ops.control_dependencies([step_op]):
assign_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
assign_ops.append(var.assign(slot_var))
assign_group = control_flow_ops.group(*assign_ops)
with ops.control_dependencies([assign_group]):
shrink_l1 = _sdca_ops.sdca_shrink_l1(
self._convert_n_to_tensor(
self._variables['sparse_features_weights'],
as_ref=True),
self._convert_n_to_tensor(
self._variables['dense_features_weights'],
as_ref=True),
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization())
if not global_step:
return shrink_l1
with ops.control_dependencies([shrink_l1]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1, name=name).op
def approximate_duality_gap(self):
"""Add operations to compute the approximate duality gap.
Returns:
An Operation that computes the approximate duality gap over all
examples.
"""
(primal_loss, dual_loss, example_weights) = _sdca_ops.sdca_training_stats(
container=self._container,
solver_uuid=self._solver_uuid)
# Note that example_weights is guaranteed to be positive by
# sdca_training_stats so dividing by it is safe.
return (primal_loss + dual_loss + math_ops.to_double(self._l1_loss()) +
(2.0 * math_ops.to_double(self._l2_loss(
self._symmetric_l2_regularization())))) / example_weights
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_labels', 'example_weights', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = self._linear_predictions(examples)
labels = convert_to_tensor(examples['example_labels'])
weights = convert_to_tensor(examples['example_weights'])
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.mul(
sigmoid_cross_entropy_with_logits(
predictions, labels), weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] == 'hinge_loss':
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.sub(2 * labels, all_ones)
all_zeros = array_ops.zeros_like(predictions)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = math_ops.maximum(all_zeros, math_ops.sub(
all_ones, math_ops.mul(adjusted_labels, predictions)))
weighted_error = math_ops.mul(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.sub(labels, predictions)
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
def regularized_loss(self, examples):
"""Add operations to compute the loss with regularization loss included.
Args:
examples: Examples to compute loss on.
Returns:
An Operation that computes mean (regularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_labels', 'example_weights', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/regularized_loss'):
weights = convert_to_tensor(examples['example_weights'])
return (((
self._l1_loss() +
# Note that here we are using the raw regularization
# (as specified by the user) and *not*
# self._symmetric_l2_regularization().
self._l2_loss(self._options['symmetric_l2_regularization'])) /
math_ops.reduce_sum(weights)) +
self.unregularized_loss(examples))
| {
"content_hash": "8512056d70d3fedae3892a8b299f111c",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 80,
"avg_line_length": 40.214814814814815,
"alnum_prop": 0.6569042794867072,
"repo_name": "panmari/tensorflow",
"id": "5820794f35adb6391e6ff09b2f89e2fece5ac6eb",
"size": "16964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "153226"
},
{
"name": "C++",
"bytes": "7360924"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "683163"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771416"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "102168"
},
{
"name": "Python",
"bytes": "4526515"
},
{
"name": "Shell",
"bytes": "117381"
},
{
"name": "TypeScript",
"bytes": "340911"
}
],
"symlink_target": ""
} |
import os
import requests
from collections import defaultdict
data_directory = os.path.dirname(__file__) + '/data/'
iso639_data_directory = data_directory + '/iso639/'
try:
os.mkdir(data_directory)
except FileExistsError: pass
try:
os.mkdir(iso639_data_directory)
except FileExistsError: pass
iso_639_files = {
'iso-639-3.tab': ['part3', 'part2b', 'part2t', 'part1', 'scope', 'language_type', 'ref_name', 'comment'],
'iso-639-3_Name_Index.tab': ['part3', 'print_name', 'inverted_name'],
'iso-639-3-macrolanguages.tab': ['m_id', 'part3'],
}
def _get_ISO_639_data_files(iso_639_file):
url_base = 'http://www-01.sil.org/iso639-3/'
with open(iso639_data_directory + iso_639_file, 'w', encoding='utf-8') as file:
r = requests.get(url_base + iso_639_file)
r.encoding = 'utf-8'
file.write(r.text)
def update():
"""Updates ISO 639 data files to latest version from SIL"""
for iso_639_file in iso_639_files:
_get_ISO_639_data_files(iso_639_file)
iso639_dict = defaultdict(lambda : defaultdict(list))
def _initialize_ISO_639():
for iso_639_file in iso_639_files:
if os.path.exists(iso639_data_directory + iso_639_file): pass
else: _get_ISO_639_data_files(iso_639_file)
with open(iso639_data_directory + 'iso-639-3.tab', 'r') as data_file:
for line in data_file:
line = line.strip('\ufeff\uffef')
if line.startswith('Id') or not line.strip(): continue
for i, attr in enumerate(iso_639_files['iso-639-3.tab']):
splitline = [s.strip() for s in line.split('\t')]
if len(splitline[0]) == 3:
iso639_dict[splitline[0]][attr] = splitline[i]
with open(iso639_data_directory + 'iso-639-3_Name_Index.tab', 'r') as data_file:
for line in data_file:
line = line.strip('\ufeff\uffef')
if line.startswith('Id') or not line.strip(): continue
for i, attr in enumerate(iso_639_files['iso-639-3_Name_Index.tab'][1:], start=1):
splitline = [s.strip() for s in line.split('\t')]
if len(splitline[0]) == 3:
iso639_dict[splitline[0]][attr].append(splitline[i])
iso639_macro_dict = defaultdict(list)
def _initialize_ISO_639_macro():
for iso_639_file in iso_639_files:
if os.path.exists(iso639_data_directory + iso_639_file): pass
else: _get_ISO_639_data_files(iso_639_file)
with open(iso639_data_directory + 'iso-639-3-macrolanguages.tab', 'r') as data_file:
for line in data_file:
line = line.strip('\ufeff\uffef')
if line.startswith('M_Id') or not line.strip(): continue
splitline = [s.strip() for s in line.split('\t')]
if len(splitline[0]) == 3:
iso639_macro_dict[splitline[0]].append(splitline[1])
def convert(string, outtype='part3', intype='print_name', exact=False):
"""Takes an input string and returns a list of matching ISO 639 codes or
language/variety names.
Args:
string: string to convert
outtype: output format. defaults to 'part3'. can be:
'part1' : ISO 639-1 (two letter)
'part2t' : ISO 639-2/T (three letter, terminological)
'part2b' : ISO 639-2/B (three letter, bibliographic)
'part3' : ISO 639-3 (three letter)
'print_name' : name of language or variety used in most contexts
(e.g. "Isthmus Zapotec")
'inverted_name' : form of name with language name root fronted
(e.g. "Zapotec, Isthmus")
'ref_name' : form of name by which the language or variety is
identified in the standard.
intype: input format (same possibilities as outtype).
defaults to 'print_name'.
exact: if True, will only return matches matching input string exactly.
if False, will search within strings to find matches.
defaults to False.
Returns:
returns a list of matches (as strings) found within the ISO 639
database. if no matches are found, returns an empty list.
Examples:
>>> convert('Armenian')
['aen', 'axm', 'hye', 'xcl']
>>> convert('Armenian', 'part2b')
['arm']
>>> convert('arm', 'print_name')
['Darmiya', 'Marma', 'Suarmin', 'Utarmbung', 'Zarma']
>>> convert('xcl', 'inverted_name', 'part3')
['Armenian, Classical']
>>> convert('Armenian', exact=True)
['hye']
"""
if not iso639_dict: _initialize_ISO_639()
output = []
if exact:
for code in iso639_dict:
if intype in ['inverted_name', 'print_name']:
if string in iso639_dict[code][intype]:
if outtype in ['inverted_name', 'print_name']:
output.extend(iso639_dict[code][outtype])
else:
output.append(iso639_dict[code][outtype])
else:
if string == iso639_dict[code][intype]:
if outtype in ['inverted_name', 'print_name']:
output.extend(iso639_dict[code][outtype])
else:
output.append(iso639_dict[code][outtype])
else:
for code in iso639_dict:
if intype in ['inverted_name', 'print_name']:
for name in iso639_dict[code][intype]:
if string in name:
if outtype in ['inverted_name', 'print_name']:
output.extend(iso639_dict[code][outtype])
else:
output.append(iso639_dict[code][outtype])
else:
if string in iso639_dict[code][intype]:
if outtype in ['inverted_name', 'print_name']:
output.extend(iso639_dict[code][outtype])
else:
output.append(iso639_dict[code][outtype])
return [out for out in sorted(set(output)) if out]
def to_name(code):
if len(code) == 2:
return convert(code, outtype='print_name', intype='part1')
else:
return convert(code, outtype='print_name', intype='part3')
def to_code(name, exact=True):
output = []
for intype in ['print_name', 'inverted_name', 'ref_name']:
output.extend(convert(name, outtype='part3', intype=intype, exact=exact))
return [out for out in sorted(set(output)) if out]
def expand_macrolanguage(part3, include_self=False):
"""Takes an ISO 639-3 macrolanguage code and returns a list of all
individual languages and varieties within.
Args:
part3: An ISO 639-3 macrolanguage code (as a string)
include_self: if True, includes the macrolanguage code itself in the
output. defaults to False.
Returns:
if code is found, returns a list of ISO 639-3 codes (as strings)
within the macrolanguage code. if code is not found, returns an empty
string.
Examples:
>>> expand_macrolanguage('bal')
['bcc', 'bgn', 'bgp']
>>> expand_macrolanguage('bal', include_self=True)
['bal', 'bcc', 'bgn', 'bgp']
"""
if not iso639_macro_dict: _initialize_ISO_639_macro()
output = iso639_macro_dict[part3][:]
if include_self: output += [part3]
return sorted(set(output))
def get_macrolanguage(code):
if not iso639_macro_dict: _initialize_ISO_639_macro()
if len(code) == 2:
part3 = convert(code, outtype='part3', intype='part1')[0]
else:
part3 = code
for macro_code, part3_list in iso639_macro_dict.items():
if part3 in part3_list:
return(macro_code)
return('') | {
"content_hash": "760fa56070421be7e103f8f4d668a2f5",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 109,
"avg_line_length": 38.504901960784316,
"alnum_prop": 0.573520050922979,
"repo_name": "longnow/panlex-tools",
"id": "0f481f93de330357294b5d91a23571359c3293df",
"size": "7899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libpython/ben/iso639.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5457"
},
{
"name": "HTML",
"bytes": "20392"
},
{
"name": "JavaScript",
"bytes": "10136"
},
{
"name": "PHP",
"bytes": "10473"
},
{
"name": "Perl",
"bytes": "253434"
},
{
"name": "Python",
"bytes": "337902"
},
{
"name": "Roff",
"bytes": "11763"
},
{
"name": "Shell",
"bytes": "1639"
}
],
"symlink_target": ""
} |
import sys
from . import main
main(sys.argv[1:])
| {
"content_hash": "9e4cec50058cb2aef6fc4c9fe6e580b9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 18,
"avg_line_length": 12.5,
"alnum_prop": 0.7,
"repo_name": "sswan/thresh",
"id": "aa0d21e167fcae5137ecd9fc28f7b2d904820f48",
"size": "50",
"binary": false,
"copies": "1",
"ref": "refs/heads/better_parser",
"path": "thresh/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39681"
}
],
"symlink_target": ""
} |
import datetime
import os
import django
from django.utils.translation import ugettext_lazy as _
# Which settings are we using?
# Useful for debugging.
SETTINGS = 'base'
# Base paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Debugging
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/'
LOGIN_REDIRECT_URL = '/admin/'
SITE_ID = 1
# Default connection to socket
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': 'localhost',
'PORT': '5432',
'NAME': 'panda',
'USER': 'panda',
'PASSWORD': 'panda'
}
}
TIME_ZONE = 'Etc/UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = False
LOCALE_PATHS = (os.path.join(SITE_ROOT, 'locale'),)
# Media
STATIC_ROOT = os.path.join(SITE_ROOT, 'media')
STATIC_URL = '/site_media/'
ADMIN_MEDIA_PREFIX = '/site_media/admin/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Uploads
MEDIA_ROOT = '/tmp/panda'
EXPORT_ROOT = '/tmp/panda_exports'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-lyd+@8@=9oni01+gjvb(txz3%hh_7a9m5*n0q^ce5+&c1fkm('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.core.context_processors.i18n'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'panda.middleware.CsrfCookieUsedMiddleware'
)
ROOT_URLCONF = 'config.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
INSTALLED_APPS = (
'longerusername',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.sites',
'django.contrib.staticfiles',
'south',
'tastypie',
'djcelery',
'compressor',
'livesettings',
'jumpstart',
'panda',
'client'
)
SESSION_COOKIE_AGE = 2592000 # 30 days
AUTH_PROFILE_MODULE = 'panda.UserProfile'
# Django-compressor
COMPRESS_ENABLED = False
# Celery
import djcelery
djcelery.setup_loader()
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_URL = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERY_RESULT_DBURI = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_CONCURRENCY = 1
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERYBEAT_SCHEDULE_FILENAME = 'celerybeat-schedule'
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'purge_orphaned_uploads': {
'task': 'panda.tasks.cron.purge_orphaned_uploads',
'schedule': crontab(minute=0, hour=2),
'kwargs': { 'fake': False }
},
'run_subscriptions': {
'task': 'panda.tasks.cron.run_subscriptions',
'schedule': crontab(minute=30, hour=2)
},
'run_admin_alerts': {
'task': 'panda.tasks.cron.run_admin_alerts',
'schedule': crontab(minute=0, hour=4)
}
}
# South
SOUTH_TESTS_MIGRATE = False
# Hack, see: http://stackoverflow.com/questions/3898239/souths-syncdb-migrate-creates-pages-of-output
import south.logger
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'default': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/panda.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'backend_handler': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
},
'loggers': {
'': {
'handlers': ['default', 'console'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler', 'console'],
'level': 'DEBUG',
'propagate': False
},
'django.db': {
'handlers': ['backend_handler'],
'level': 'DEBUG',
'propagate': False
},
'south': {
'handlers': ['console'],
'level': 'INFO',
'propogate': False
},
'keyedcache': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
},
'requests.packages.urllib3.connectionpool': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
}
}
}
# Solr
SOLR_ENDPOINT = 'http://localhost:8983/solr'
SOLR_DATA_CORE = 'data'
SOLR_DATASETS_CORE = 'datasets'
SOLR_DIRECTORY = '/var/solr'
# Miscellaneous configuration
PANDA_VERSION = '1.1.2'
PANDA_DEFAULT_SEARCH_GROUPS = 10
PANDA_DEFAULT_SEARCH_ROWS_PER_GROUP = 5
PANDA_DEFAULT_SEARCH_ROWS = 50
PANDA_SNIFFER_MAX_SAMPLE_SIZE = 1024 * 100 # 100 KB
PANDA_SAMPLE_DATA_ROWS = 5
PANDA_SCHEMA_SAMPLE_ROWS = 100
PANDA_ACTIVATION_PERIOD = datetime.timedelta(days=30)
PANDA_AVAILABLE_SPACE_WARN = 1024 * 1024 * 1024 * 2 # 2GB
PANDA_AVAILABLE_SPACE_CRITICAL = 1024 * 1024 * 1024 * 1 # 1GB
PANDA_NOTIFICATIONS_TO_SHOW = 50
PANDA_UNCATEGORIZED_ID = 0
PANDA_UNCATEGORIZED_SLUG = 'uncategorized'
# running this through gettext causes file uploads not to work, so disabled until solved!
PANDA_UNCATEGORIZED_NAME = _('Uncategorized')
MOMENT_LANGUAGE_MAPPING = {
'en': None,
'es': 'es',
'de': 'de'
}
# Allow for local (per-user) override
try:
from local_settings import *
except ImportError:
pass
| {
"content_hash": "66deff472b15eb12363e6487b3eb45c0",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 101,
"avg_line_length": 26.309963099630995,
"alnum_prop": 0.617671809256662,
"repo_name": "ibrahimcesar/panda",
"id": "327ad60ff8f26678bc26a5203575e5700a3212b0",
"size": "7153",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "config/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14837"
},
{
"name": "HTML",
"bytes": "51564"
},
{
"name": "Java",
"bytes": "256"
},
{
"name": "JavaScript",
"bytes": "759191"
},
{
"name": "Python",
"bytes": "877718"
},
{
"name": "Shell",
"bytes": "15468"
}
],
"symlink_target": ""
} |
import re
from zope.interface import Interface, implements
re_flags = re.M | re.U | re.I | re.S
class IWebBuilder(Interface):
"""."""
class WebBuilder(object):
implements(IWebBuilder)
configurations = {}
class Data(object):
def __init__(self, request):
self.__dict__.update(request.matchdict)
def compute_value(self, value):
if not value:
return False
value = value.strip().lower()
if value in ['y', 'yes', 'o', 'on', 't', 'enable', 'true']:
return True
if 'checkbox_enabled' in value:
value = True
if value == 'n':
return False
for b in 'off', 'false':
if value.startswith(b):
return False
return True
def get_id(self, value):
special_chars = ['.', '_', '-', ' ', '(', ')', ',']
for c in special_chars:
value = value.replace(c, '')
return value
def rewrite_description(self, value):
url = re.compile(
'(?P<url>(?<!>)(?<!href=[\'"])https?\:\//[^$\s"]*)', re_flags)
yn = re.compile('y/n\s*[?]?$', re_flags)
see = re.compile(',\s*see\s*(ht.*)$', re_flags)
if yn.search(value):
value = yn.sub('', value)
if see.search(value):
value = see.sub('\\1', value)
match_obj = url.search(value)
while match_obj:
durl = match_obj.groupdict()
value = value.replace(durl['url'], '')
value = "%s%s" % (
value,
'<a class="option-desc-url" target="_blank" '
'href="%(url)s" '
'alt="link"> </a>' % durl
)
match_obj = url.search(value)
return value.strip()
root = WebBuilder()
def get_root(environ):
return root
| {
"content_hash": "ad61504346f8902ff9f28517a666869e",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 27.16176470588235,
"alnum_prop": 0.4840281537628587,
"repo_name": "hellfish2/collective.generic.webbuilder",
"id": "27d2c03732e98c2878bd0b2d674090e305e86c67",
"size": "1847",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/collective/generic/webbuilder/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "147995"
},
{
"name": "JavaScript",
"bytes": "335659"
},
{
"name": "Python",
"bytes": "79817"
},
{
"name": "Shell",
"bytes": "3596"
}
],
"symlink_target": ""
} |
"""
@author: Disa Mhembere
@organization: Johns Hopkins University
@contact: disa@jhu.edu
@summary: A module to alter/update the MRdjango database as necessary
"""
'''
FileField stores files e.g. to media/documents based MEDIA_ROOT
Generally, each model maps to a single database table.
'''
from django.db import models
from django.contrib import admin
from django.conf import settings
from django.contrib.auth.models import User
class BuildGraphModel(models.Model):
'''
Allows us to store data on build graph view
'''
project_name = models.CharField(max_length=255)
site = models.CharField(max_length=255,)
subject = models.CharField(max_length=255,)
session = models.CharField(max_length=255,)
scanId = models.CharField(max_length=255)
location = models.TextField()
owner = models.ForeignKey(to=User, to_field='username', null=True) # Many-to-one. Many here, other in auth_user
def __repr__(self):
_repr = '\n' + "project_name: " + str(self.project_name) + '\n' + \
"site: " + str(self.site) + '\n' + \
"subject: " + str(self.subject) + '\n' + \
"session: " + str(self.session) + '\n' + \
"scanId: " + str(self.scanId) + '\n' + \
"location: " + str(self.location) + '\n' + \
"owner: " + str(self.owner) + '\n'
return super(BuildGraphModel, self).__repr__() + _repr
class OwnedProjects(models.Model):
'''
This will let us keep track of owned projects for
integrity constraints & sharing
'''
project_name = models.CharField(max_length=255)
owner = models.ForeignKey(settings.AUTH_USER_MODEL) # Many-to-one .Many here, other in auth_user
is_private = models.BooleanField(null=False)
owner_group = models.CharField(max_length=255, null=True) # Will reference other table soon
# Really should be --> owner_groups = models.ForeignKey(to=User, to_field='groups')
class SharingTokens(models.Model):
token = models.CharField(max_length=64)
issued_by = models.ForeignKey(User, 'username') # Many-to-one . Many here, other in auth_user
project_name = models.ManyToManyField(to=BuildGraphModel, related_name='st_project_name')
issue_date = models.DateTimeField(auto_now_add=True)
expire_date = models.DateField(null=True)
class GraphDownloadModel(models.Model):
filepath = models.CharField(max_length=255, null=False, primary_key=True, unique=True)
genus = models.CharField(max_length=128)
region = models.CharField(max_length=128)
# verbose name is what is rendered in the html
project = models.CharField(max_length=255) # This is only relevant to big graphs
numvertex = models.BigIntegerField(null=False, verbose_name="# Nodes")
numedge = models.BigIntegerField(null=False, verbose_name="# Edges")
graphattr = models.TextField(verbose_name="Graph Attrs") # CSVs here
vertexattr = models.TextField(verbose_name="Node Attrs") # CSVs here
edgeattr = models.TextField(verbose_name="Edge Attrs") #CSVs here
sensor = models.CharField(max_length=128)
source = models.CharField(max_length=256, verbose_name="Source host url")
mtime = models.FloatField() # Modification Time
url = models.URLField(max_length=2048, verbose_name="Download url")
class RawUploadModel(models.Model):
mpragepath = models.CharField(max_length=255, null=False)
dtipath = models.CharField(max_length=255, null=False)
atlas = models.CharField(max_length=255, null=False)
graphsize = models.CharField(max_length=8, null=False)
email = models.EmailField(null=False)
admin.site.register(BuildGraphModel)
admin.site.register(OwnedProjects)
admin.site.register(SharingTokens)
| {
"content_hash": "973647748581378c5eb5b820689d720e",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 113,
"avg_line_length": 41.50574712643678,
"alnum_prop": 0.712545001384658,
"repo_name": "neurodata/ndgrutedb",
"id": "3e98ddb3cc911e1017e1ab11fb58d7bc2e31acd0",
"size": "4249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MR-OCP/MROCPdjango/pipeline/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2662"
},
{
"name": "C++",
"bytes": "2431"
},
{
"name": "CSS",
"bytes": "35060"
},
{
"name": "HTML",
"bytes": "243779"
},
{
"name": "JavaScript",
"bytes": "18578"
},
{
"name": "M",
"bytes": "514"
},
{
"name": "Makefile",
"bytes": "970"
},
{
"name": "Matlab",
"bytes": "19890"
},
{
"name": "Nginx",
"bytes": "3000"
},
{
"name": "Python",
"bytes": "505669"
},
{
"name": "R",
"bytes": "19939"
},
{
"name": "Shell",
"bytes": "8494"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class CertificatesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_group_with_http_info(**kwargs)
else:
(data) = self.get_api_group_with_http_info(**kwargs)
return data
def get_api_group_with_http_info(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_group_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/certificates.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| {
"content_hash": "25b422abb8811aeebab4de914339b13b",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 34.72950819672131,
"alnum_prop": 0.5383526079773424,
"repo_name": "mbohlool/client-python",
"id": "56f30084efdc5b2fda66928829cdb50c6de3c71c",
"size": "4254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/apis/certificates_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
"""Tools for serializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
def _serialize_function_spec(function_spec, coder):
"""Serialize a FunctionSpec object into its proto representation."""
if function_spec.is_method and not function_spec.fullargspec.args:
raise NotImplementedError(
"Missing support to serialize a method function without a named "
"'self' argument.")
proto = saved_object_graph_pb2.FunctionSpec()
# Intentionally skip encoding annotations of a function because function
# annotations are mainly for optional type checking during development
# and does not affect runtime behavior.
# https://www.python.org/dev/peps/pep-3107/
# https://docs.python.org/3/library/inspect.html#inspect.getfullargspec
proto.fullargspec.CopyFrom(
coder.encode_structure(
function_spec.fullargspec._replace(annotations={})))
proto.is_method = function_spec.is_method
proto.input_signature.CopyFrom(
coder.encode_structure(function_spec.input_signature))
# See `tf.function` and the JitCompile proto for details.
proto.jit_compile = {
None: saved_object_graph_pb2.FunctionSpec.JitCompile.DEFAULT,
True: saved_object_graph_pb2.FunctionSpec.JitCompile.ON,
False: saved_object_graph_pb2.FunctionSpec.JitCompile.OFF,
}.get(function_spec.jit_compile)
return proto
def serialize_concrete_function(concrete_function, node_ids, coder):
"""Build a SavedConcreteFunction."""
bound_inputs = []
try:
for capture in concrete_function.captured_inputs:
bound_inputs.append(node_ids[capture])
except KeyError:
raise KeyError(
"Failed to add concrete function %s to object based saved model as it "
"captures tensor %s which is unsupported or not reachable from root. "
"One reason could be that a stateful object or a variable that the "
"function depends on is not assigned to an attribute of the serialized "
"trackable object "
"(see SaveTest.test_captures_unreachable_variable)."
% (concrete_function.name, capture))
concrete_function_proto = saved_object_graph_pb2.SavedConcreteFunction()
structured_outputs = func_graph_module.convert_structure_to_signature(
concrete_function.structured_outputs)
concrete_function_proto.canonicalized_input_signature.CopyFrom(
coder.encode_structure(concrete_function.structured_input_signature))
concrete_function_proto.output_signature.CopyFrom(
coder.encode_structure(structured_outputs))
concrete_function_proto.bound_inputs.extend(bound_inputs)
return concrete_function_proto
def serialize_bare_concrete_function(concrete_function, name_map):
"""Build a SavedBareConcreteFunction."""
# pylint: disable=protected-access
name = name_map.get(compat.as_text(concrete_function.name),
concrete_function.name)
proto = saved_object_graph_pb2.SavedBareConcreteFunction(
concrete_function_name=name,
allowed_positional_arguments=concrete_function._num_positional_args,
argument_keywords=concrete_function._arg_keywords)
if concrete_function._pre_initialized_function_spec is not None:
coder = nested_structure_coder.StructureCoder()
proto.function_spec.CopyFrom(
_serialize_function_spec(
concrete_function._pre_initialized_function_spec, coder))
return proto
# pylint: enable=protected-access
def serialize_function(function, name_map):
"""Build a SavedFunction proto."""
coder = nested_structure_coder.StructureCoder()
proto = saved_object_graph_pb2.SavedFunction()
function_spec_proto = _serialize_function_spec(function.function_spec, coder)
proto.function_spec.CopyFrom(function_spec_proto)
all_concrete_functions = \
function._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
for concrete_function in all_concrete_functions:
proto.concrete_functions.append(
name_map.get(compat.as_text(concrete_function.name),
concrete_function.name))
return proto
def wrap_cached_variables(concrete_function):
"""Wraps the concrete function if it uses cached read tensors.
This function creates a new concrete function that captures variables
instead of the cached read tensors.
Args:
concrete_function: A Concrete function that maybe captures cached read
tensors.
Returns:
A concrete function that wraps the original concrete function, which
captures variables instead. If the original function did not capture any
cached values, then the function is not wrapped and the original object is
returned.
"""
outer_graph = func_graph_module.FuncGraph(
"{}_no_cache".format(concrete_function.graph.name))
captures = concrete_function.graph._captures # pylint: disable=protected-access
mapped_captures = None
remapped_captures = {}
# Update the external captures to use read tensors generated in the outer
# graph.
with outer_graph.as_default():
for capture, placeholder in concrete_function.graph.captures:
cached_variable = getattr(capture, "_cached_variable", None)
if cached_variable is None:
continue
cached_variable = cached_variable()
new_cached_value = cached_variable.read_value()
remapped_captures[id(capture)] = captures[id(capture)]
captures[id(capture)] = (new_cached_value, placeholder)
mapped_captures = True
if not mapped_captures:
return concrete_function
inner_concrete = defun.ConcreteFunction(concrete_function.graph)
def wrap_function(*args):
return inner_concrete._call_flat(args, inner_concrete.captured_inputs) # pylint:disable=protected-access
args = nest.flatten(concrete_function.structured_input_signature,
expand_composites=True)
func_graph_module.func_graph_from_py_func(
None, wrap_function, args=tuple(args), kwargs={},
func_graph=outer_graph)
fn = defun.ConcreteFunction(
outer_graph, function_spec=concrete_function._function_spec) # pylint: disable=protected-access
fn._arg_keywords = concrete_function._arg_keywords # pylint: disable=protected-access
fn._num_positional_args = concrete_function._num_positional_args # pylint: disable=protected-access
# Return the captures to their original values
for key, capture in remapped_captures.items():
captures[key] = capture
return fn
| {
"content_hash": "b7675ff2144584b523658ebc8d915127",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 109,
"avg_line_length": 42.0679012345679,
"alnum_prop": 0.7393983859134262,
"repo_name": "cxxgtxy/tensorflow",
"id": "aaaead7c3fe935748e2fed6afed7dea96bcf6272",
"size": "7504",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/saved_model/function_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "24882047"
},
{
"name": "CMake",
"bytes": "164374"
},
{
"name": "Go",
"bytes": "854846"
},
{
"name": "HTML",
"bytes": "564161"
},
{
"name": "Java",
"bytes": "307246"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "225621"
},
{
"name": "Python",
"bytes": "22009999"
},
{
"name": "Shell",
"bytes": "341543"
},
{
"name": "TypeScript",
"bytes": "797437"
}
],
"symlink_target": ""
} |
'''
Created on Jul 26, 2016
@author: cesar
'''
from matplotlib import pyplot as plt
def levelMarkGraphsLabels(marksDictionary,date0,datef,rates=True,save=None):
"""
Plots of the marks
"""
levels = marksDictionary.keys()
levels.sort()
levelToIndices = dict(zip(levels,range(len(levels))))
fig, axes = plt.subplots(nrows=len(levels),ncols=1, figsize=(6,6))
for wa, dateTimes in marksDictionary.iteritems():
marks = [(d-date0).total_seconds() for d in dateTimes]
w = levelToIndices[wa]
axes[w].vlines(marks, ymin=0,ymax=1)
axes[w].get_xaxis().set_visible(False)
ax2 = axes[w].twinx()
ax2.set_ylabel(str(w))
ax2.set_yticks([])
if(rates):
axes[w].set_ylabel(len(marks))
axes[w].set_yticks([])
axes[w].tick_params(axis=u'both', which=u'both',length=0)
if save != None:
plt.savefig(save)
def levelMarkGraphs(marksDictionary,date0,datef):
"""
Plots of the marks
"""
levels = marksDictionary.keys()
levels.sort()
levelToIndices = dict(zip(levels,range(len(levels))))
fig, axes = plt.subplots(nrows=len(levels),ncols=1, figsize=(6,6))
for wa, dateTimes in marksDictionary.iteritems():
marks = [(d-date0).total_seconds() for d in dateTimes]
w = levelToIndices[wa]
axes[w].vlines(marks, ymin=0,ymax=1)
axes[w].get_xaxis().set_visible(False)
axes[w].get_yaxis().set_visible(False)
plt.show() | {
"content_hash": "fa32f3502f1f5440a7b522d94c67380b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6159468438538206,
"repo_name": "cesarali/Tag2Hierarchy",
"id": "8102c9801c92f39012762d7bdc7f02ceaac855b1",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tag2hierarchy/utils/poissonPlots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10441"
},
{
"name": "Jupyter Notebook",
"bytes": "229068"
},
{
"name": "Python",
"bytes": "73604"
}
],
"symlink_target": ""
} |
import testtools
from kmip.core import exceptions
from kmip.core import primitives
from kmip.core import utils
class TestBase(testtools.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.stream = utils.BytearrayStream()
self.bad_init = 'Bad Base initialization: attribute {0} missing'
self.bad_write = exceptions.ErrorStrings.BAD_EXP_RECV.format(
'primitives.Base.{0}', 'write', '{1}', '{2}')
self.bad_encoding = exceptions.ErrorStrings.BAD_ENCODING.format(
'primitives.Base.{0}', 'write')
self.bad_match = exceptions.ErrorStrings.BAD_EXP_RECV.format(
'primitives.Base.{0}', 'comparison', '{1}', '{2}')
def tearDown(self):
super(TestBase, self).tearDown()
def test_is_oversized(self):
base = primitives.Base()
base.is_oversized(self.stream)
def test_is_oversized_error(self):
self.stream.write(b'\x00')
base = primitives.Base()
self.assertRaises(
exceptions.StreamNotEmptyError,
base.is_oversized,
self.stream
)
def test_read_tag(self):
encoding = (b'\x42\x00\x00')
base = primitives.Base()
self.stream = utils.BytearrayStream(encoding)
base.read_tag(self.stream)
def test_read_tag_invalid(self):
encoding = (b'\x42\x00\x01')
base = primitives.Base()
self.stream = utils.BytearrayStream(encoding)
self.assertRaises(
exceptions.ReadValueError,
base.read_tag,
self.stream
)
def test_read_type(self):
self.stream.write(b'\x00')
base = primitives.Base()
base.read_type(self.stream)
def test_read_type_error(self):
self.stream.write(b'\x01')
base = primitives.Base()
self.assertRaises(
exceptions.ReadValueError,
base.read_type,
self.stream
)
def test_read_type_underflow(self):
base = primitives.Base()
self.assertRaises(
exceptions.ReadValueError,
base.read_type,
self.stream
)
def test_read_type_overflow(self):
self.stream.write(b'\x00\x00')
base = primitives.Base()
base.read_type(self.stream)
def test_read_length(self):
self.stream.write(b'\x00\x00\x00\x04')
base = primitives.Base()
base.length = 4
base.read_length(self.stream)
def test_read_length_underflow(self):
self.stream.write(b'\x00')
base = primitives.Base()
base.length = 4
self.assertRaises(
exceptions.ReadValueError,
base.read_length,
self.stream
)
def test_read_length_overflow(self):
self.stream.write(b'\x00\x00\x00\x04\x00')
base = primitives.Base()
base.length = 4
base.read_length(self.stream)
def test_read_value(self):
base = primitives.Base()
self.assertRaises(
NotImplementedError, base.read_value, self.stream)
def test_read(self):
self.stream.write(b'\x42\x00\x00\x00\x00\x00\x00\x04')
base = primitives.Base()
base.length = 4
base.read(self.stream)
def test_write_tag(self):
encoding = (b'\x42\x00\x00')
base = primitives.Base()
base.write_tag(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(
len_exp, len_rcv,
self.bad_write.format(
'tag', '{0} bytes'.format(len_exp),
'{0} bytes'.format(len_rcv)))
self.assertEqual(encoding, result, self.bad_encoding.format('tag'))
def test_write_type(self):
encoding = b'\x00'
base = primitives.Base()
base.write_type(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(
len_exp, len_rcv,
self.bad_write.format(
'type', '{0} bytes'.format(len_exp),
'{0} bytes'.format(len_rcv)))
self.assertEqual(encoding, result, self.bad_encoding.format('type'))
def test_write_type_invalid(self):
base = primitives.Base()
base.type = ''
self.assertRaises(TypeError, base.write_type, self.stream)
def test_write_length(self):
encoding = b'\x00\x00\x00\x04'
base = primitives.Base()
base.length = 4
base.write_length(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(
len_exp, len_rcv,
self.bad_write.format(
'length', '{0} bytes'.format(len_exp),
'{0} bytes'.format(len_rcv)))
self.assertEqual(encoding, result, self.bad_encoding.format('length'))
def test_write_length_invalid(self):
base = primitives.Base()
base.length = ''
self.assertRaises(TypeError, base.write_length, self.stream)
def test_write_length_overflow(self):
self.skipTest(
'No easy way to test with a number requiring more than '
'2 ** 0xffffffff bytes for representation. Test preserved '
'for completeness.'
)
def test_write_value(self):
base = primitives.Base()
self.assertRaises(
NotImplementedError, base.write_value, self.stream)
def test_write(self):
encoding = b'\x42\x00\x00\x00\x00\x00\x00\x04'
base = primitives.Base()
base.length = 4
base.write(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(
len_exp, len_rcv,
self.bad_write.format(
'type/length', '{0} bytes'.format(len_exp),
'{0} bytes'.format(len_rcv)))
self.assertEqual(
encoding, result, self.bad_encoding.format('type/length'))
def test_is_tag_next(self):
encoding = (b'\x42\x00\x00')
base = primitives.Base()
self.stream = utils.BytearrayStream(encoding)
self.assertTrue(
base.is_tag_next(base.tag, self.stream),
self.bad_match.format('tag', 'match', 'mismatch'))
def test_is_tag_next_invalid(self):
encoding = (b'\x42\x00\x01')
base = primitives.Base()
self.stream = utils.BytearrayStream(encoding)
self.assertFalse(
base.is_tag_next(base.tag, self.stream),
self.bad_match.format('tag', 'mismatch', 'match'))
| {
"content_hash": "d079bdc252248e310f81f6b44eab18a2",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 78,
"avg_line_length": 30.857798165137616,
"alnum_prop": 0.5724691541548982,
"repo_name": "OpenKMIP/PyKMIP",
"id": "0dfbdce7bedc1bb7a57479839b562df3305b9439",
"size": "7373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmip/tests/unit/core/primitives/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5552888"
},
{
"name": "Shell",
"bytes": "1214"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_lamp_tbl_s01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_lamp_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "709e19b96f93c7c7cb290bccc7d99d57",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 24,
"alnum_prop": 0.6923076923076923,
"repo_name": "anhstudios/swganh",
"id": "9bd4719429d0bcffd6530acfc47b8d3b70a80cae",
"size": "457",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/furniture/all/shared_frn_all_lamp_tbl_s01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from oslo_utils import excutils
from tempest_lib.common.utils import data_utils
from tempest.common import fixed_network
from tempest.common import service_client
from tempest.common import waiters
from tempest import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
volume_backed=False, **kwargs):
"""Common wrapper utility returning a test server.
This method is a common wrapper returning a test server that can be
pingable or sshable.
:param clients: Client manager which provides OpenStack Tempest clients.
:param validatable: Whether the server will be pingable or sshable.
:param validation_resources: Resources created for the connection to the
server. Include a keypair, a security group and an IP.
:param tenant_network: Tenant network to be used for creating a server.
:param wait_until: Server status to wait for the server to reach after
its creation.
:param volume_backed: Whether the instance is volume backed or not.
:returns a tuple
"""
# TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
if 'name' in kwargs:
name = kwargs.pop('name')
else:
name = data_utils.rand_name(__name__ + "-instance")
flavor = kwargs.pop('flavor', CONF.compute.flavor_ref)
image_id = kwargs.pop('image_id', CONF.compute.image_ref)
kwargs = fixed_network.set_networks_kwarg(
tenant_network, kwargs) or {}
if CONF.validation.run_validation and validatable:
# As a first implementation, multiple pingable or sshable servers will
# not be supported
if 'min_count' in kwargs or 'max_count' in kwargs:
msg = ("Multiple pingable or sshable servers not supported at "
"this stage.")
raise ValueError(msg)
if 'security_groups' in kwargs:
kwargs['security_groups'].append(
{'name': validation_resources['security_group']['name']})
else:
try:
kwargs['security_groups'] = [
{'name': validation_resources['security_group']['name']}]
except KeyError:
LOG.debug("No security group provided.")
if 'key_name' not in kwargs:
try:
kwargs['key_name'] = validation_resources['keypair']['name']
except KeyError:
LOG.debug("No key provided.")
if CONF.validation.connect_method == 'floating':
if wait_until is None:
wait_until = 'ACTIVE'
if volume_backed:
volume_name = data_utils.rand_name('volume')
volumes_client = clients.volumes_v2_client
if CONF.volume_feature_enabled.api_v1:
volumes_client = clients.volumes_client
volume = volumes_client.create_volume(
display_name=volume_name,
imageRef=image_id)
volumes_client.wait_for_volume_status(volume['volume']['id'],
'available')
bd_map_v2 = [{
'uuid': volume['volume']['id'],
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': True}]
kwargs['block_device_mapping_v2'] = bd_map_v2
# Since this is boot from volume an image does not need
# to be specified.
image_id = ''
body = clients.servers_client.create_server(name=name, imageRef=image_id,
flavorRef=flavor,
**kwargs)
# handle the case of multiple servers
servers = []
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
body_servers = clients.servers_client.list_servers()
servers = \
[s for s in body_servers['servers'] if s['name'].startswith(name)]
else:
body = service_client.ResponseBody(body.response, body['server'])
servers = [body]
# The name of the method to associate a floating IP to as server is too
# long for PEP8 compliance so:
assoc = clients.floating_ips_client.associate_floating_ip_to_server
if wait_until:
for server in servers:
try:
waiters.wait_for_server_status(
clients.servers_client, server['id'], wait_until)
# Multiple validatable servers are not supported for now. Their
# creation will fail with the condition above (l.58).
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
assoc(floating_ip=validation_resources[
'floating_ip']['ip'],
server_id=servers[0]['id'])
except Exception:
with excutils.save_and_reraise_exception():
if ('preserve_server_on_error' not in kwargs
or kwargs['preserve_server_on_error'] is False):
for server in servers:
try:
clients.servers_client.delete_server(
server['id'])
except Exception:
LOG.exception('Deleting server %s failed'
% server['id'])
return body, servers
| {
"content_hash": "78aad95d2e4b54d13533e0e12a83570e",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 39.90909090909091,
"alnum_prop": 0.5756089013492203,
"repo_name": "liucode/tempest-master",
"id": "41b0529e8b63302e26f9e7fce92a290a7280d99c",
"size": "6367",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/common/compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2834934"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import os
import json
import boto3
table = boto3.resource("dynamodb").Table(os.environ.get("RAMBLINGS_TABLE_NAME"))
def _log_dynamo(response):
print("HTTPStatusCode:{}, RetryAttempts:{}, ScannedCount:{}, Count:{}".format(
response.get("ResponseMetadata").get("HTTPStatusCode"),
response.get("ResponseMetadata").get("RetryAttempts"),
response.get("ScannedCount"),
response.get("Count")
))
def get_ramblings(event, context):
response = table.scan(Limit=10)
_log_dynamo(response)
return {
"statusCode": 200,
"body": json.dumps(response["Items"], indent=1),
"headers": {"Access-Control-Allow-Origin": "*"}
}
| {
"content_hash": "f79d5010c384c4acc4aa3fd5db80cf97",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 28.708333333333332,
"alnum_prop": 0.6415094339622641,
"repo_name": "Vilsepi/infinimonkey",
"id": "9abd7bf8e2009fbdf32f76a5f4741c73ccd70600",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/ramblings-api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "362"
},
{
"name": "HTML",
"bytes": "579"
},
{
"name": "JavaScript",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9732"
},
{
"name": "Shell",
"bytes": "1378"
}
],
"symlink_target": ""
} |
import re
import math
import json
import collections
from functools import partial
def mean(xs):
s = 0.0
for x in xs:
s += x
return s / len(xs)
def std(xs):
return math.sqrt(mean([x**2 for x in xs]) - mean(xs)**2)
def stat_mean(arr, default=float('NaN'), ndigits=0):
return round(mean(arr), ndigits) if len(arr) > 0 else default
def stat_std(arr, default=float('NaN'), ndigits=0):
return round(std(arr), ndigits) if len(arr) > 0 else default
def stat_max(arr, default=float('NaN'), ndigits=0):
return round(max(arr), ndigits) if len(arr) > 0 else default
def stat_min(arr, default=float('NaN'), ndigits=0):
return round(min(arr), ndigits) if len(arr) > 0 else default
class Scanner:
def __init__(self, token_spec):
self._ts = {}
for token, spec in token_spec.items():
self._ts[token] = {"cmd": spec["cmd"], "re": re.compile(spec["re"])}
def scan(self, text):
for line in text.splitlines():
for token, spec in self._ts.items():
match = spec["re"].match(line)
if match is not None:
action = spec["cmd"]
action(match)
class JSONParser:
def __init__(self):
self.reset()
def reset(self):
self._context = {}
def parse(self, content):
self._context.update(json.loads(content))
def result(self):
return self._context
class GCTimeParser:
def __init__(self):
def parse_int(key, match):
self._context[key] += [int(match.group(key))]
token_spec = {
"FULL_TIME" : {"cmd": partial(parse_int, "full_time"), "re": "Completed in (?P<full_time>\d*) ms"},
"GC_TIME" : {"cmd": partial(parse_int, "gc_time"), "re": "Time spent in gc (?P<gc_time>\d*) ms"},
"STW_TIME" : {"cmd": partial(parse_int, "stw_time"), "re": "Average pause time (?P<stw_time>\d*) us"},
"GC_COUNT" : {"cmd": partial(parse_int, "gc_count"), "re": "Completed (?P<gc_count>\d*) collections"}
}
self._scanner = Scanner(token_spec)
self.reset()
def reset(self):
self._context = collections.defaultdict(list)
def parse(self, content):
self._scanner.scan(content)
def result(self):
return {
"full_time": {
"mean": stat_mean(self._context["full_time"], ndigits=0),
"std" : stat_std(self._context["full_time"], ndigits=3)
},
"gc_time": {
"mean": stat_mean(self._context["gc_time"], ndigits=0),
"std" : stat_std(self._context["gc_time"], ndigits=3)
},
"stw_time": {
"mean": self._us_to_ms(stat_mean(self._context["stw_time"]), ndigits=3),
"std" : self._us_to_ms(stat_std(self._context["stw_time"]), ndigits=3),
"min" : self._us_to_ms(stat_min(self._context["stw_time"]), ndigits=3),
"max" : self._us_to_ms(stat_max(self._context["stw_time"]), ndigits=3)
},
"gc_count": stat_mean(self._context["gc_count"], default=0, ndigits=0)
}
@staticmethod
def _us_to_ms(us, ndigits=3):
return round(float(us) / 1000, ndigits)
class GCHeapParser:
def __init__(self):
def parse_int(key, match):
self._context[key] += [int(match.group(key))]
token_spec = {
"USED" : {"cmd": partial(parse_int, "used"), "re": "mem used: \s*(?P<used>\d*)"},
"LIVE" : {"cmd": partial(parse_int, "live"), "re": "mem live: \s*(?P<live>\d*)"},
"EXTRA": {"cmd": partial(parse_int, "extra"), "re": "mem extra: \s*(?P<extra>\d*)"},
"GC_START": {"cmd": partial(parse_int, "gctime"), "re": "GC start time: (?P<gctime>\d*)"},
"GC_FINISH": {"cmd": partial(parse_int, "gctime"), "re": "GC finish time: (?P<gctime>\d*)"}
}
self._scanner = Scanner(token_spec)
self.reset()
def reset(self):
self._context = collections.defaultdict(list)
def parse(self, test_output):
self._scanner.scan(test_output)
def result(self):
return {
"heapsize" : self._context["live"],
"heapextra": [(used + extra - live) for used, live, extra in zip(self._context["used"], self._context["live"], self._context["extra"])],
"gctime": self._context["gctime"]
}
class GCPauseTimeParser:
def __init__(self):
def parse_pause(match):
pause = int(match.group("pause"))
pause = round(pause / 1000, 1)
self._context["pause"] += [pause]
token_spec = {
"PAUSE": {"cmd": parse_pause, "re": "pause time: \s*(?P<pause>\d*)"}
}
self._scanner = Scanner(token_spec)
self.reset()
def reset(self):
self._context = collections.defaultdict(list)
def parse(self, test_output):
self._scanner.scan(test_output)
def result(self):
return self._context
class BoehmStatsParser:
def __init__(self):
def parse_pause(match):
pause = int(match.group("pause"))
if pause == 0:
pause = 0.5
self._context["pause"] += [pause]
def parse_heap(match):
before_gc = int(match.group("size")) * 1024
after_gc = before_gc - int(match.group("freed"))
self._context["heapsize"] += [0, 0]
self._context["heapextra"] += [before_gc, after_gc]
def parse_time(match):
self._context["gctime"] += [int(match.group("gctime"))]
token_spec = {
"HEAP" : {"cmd": parse_heap, "re": "GC #\d* freed (?P<freed>\d*) bytes, heap (?P<size>\d*) KiB"},
"PAUSE": {"cmd": parse_pause, "re": "Complete collection took (?P<pause>\d*) msecs"},
"GC_START": {"cmd": parse_time, "re": "GC start time: (?P<gctime>\d*)"},
"GC_FINISH": {"cmd": parse_time, "re": "GC finish time: (?P<gctime>\d*)"}
}
self._scanner = Scanner(token_spec)
self.reset()
def reset(self):
self._context = collections.defaultdict(list)
def parse(self, content):
self._scanner.scan(content)
def result(self):
return self._context
class MassifParser:
def __init__(self):
def parse_int(key, match):
self._context[key] += [int(match.group(key))]
token_spec = {
"HEAP_SIZE" : {"cmd": partial(parse_int, "heapsize") , "re": "mem_heap_B=(?P<heapsize>\d*)"},
"HEAP_EXTRA": {"cmd": partial(parse_int, "heapextra"), "re": "mem_heap_extra_B=(?P<heapextra>\d*)"},
"GC_TIME": {"cmd": partial(parse_int, "gctime"), "re": "time=(?P<gctime>\d*)"}
}
self._scanner = Scanner(token_spec)
self.reset()
def reset(self):
self._context = collections.defaultdict(list)
def parse(self, content):
self._scanner.scan(content)
def result(self):
return self._context | {
"content_hash": "b0a19e2dee7b28112f2c0f06304c60e3",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 148,
"avg_line_length": 30.5,
"alnum_prop": 0.5302430751837196,
"repo_name": "eucpp/allocgc",
"id": "a303a871e86f707b480ca80b48b4010503b66f39",
"size": "7076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/parsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "337"
},
{
"name": "C#",
"bytes": "3785"
},
{
"name": "C++",
"bytes": "570114"
},
{
"name": "CMake",
"bytes": "21201"
},
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "2978"
}
],
"symlink_target": ""
} |
"""
Test that expr will time out and allow other threads to run if it blocks.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ExprDoesntDeadlockTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17946')
@add_test_categories(["basic_process"])
def test_with_run_command(self):
"""Test that expr will time out and allow other threads to run if it blocks."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint at source line before call_me_to_get_lock
# gets called.
main_file_spec = lldb.SBFileSpec("locking.cpp")
breakpoint = target.BreakpointCreateBySourceRegex(
'Break here', main_file_spec)
if self.TraceOn():
print("breakpoint:", breakpoint)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be on self.line1 and the break condition should hold.
from lldbsuite.test.lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
frame0 = thread.GetFrameAtIndex(0)
var = frame0.EvaluateExpression("call_me_to_get_lock(get_int())")
self.assertTrue(var.IsValid())
self.assertEqual(var.GetValueAsSigned(0), 567)
| {
"content_hash": "98b57b562675b854717e26a6dba567b8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 35.70175438596491,
"alnum_prop": 0.654054054054054,
"repo_name": "endlessm/chromium-browser",
"id": "d7d963390b051e01fe8079d003be89605c4d23e3",
"size": "2035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/commands/expression/no-deadlock/TestExprDoesntBlock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
quote= "I think there is a world market for maybe five computers."
print("Original quote:")
print(quote)
print("\nIn uppercase:")
print(quote.upper())
print("\nIn lowercase:")
print(quote.lower())
print("\nAs a title:")
print(quote.title())
print("\nWith a minor replacement:")
print(quote.replace("five", "millions of"))
print("\nOriginal Quote is still:")
print(quote)
input("\nPress the enter key to exit")
| {
"content_hash": "bfe003822e2ab1d927c8f476609fdc81",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.7050359712230215,
"repo_name": "Ry09/Python-projects",
"id": "e517baba1ae846884d62463520f5269e81035867",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programs & Challenges from Python for beginners book/Chapter 2/quoteManipulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13815"
}
],
"symlink_target": ""
} |
import xxx, yyy
| {
"content_hash": "9541b63048ed698088514c386f4677e9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 15,
"avg_line_length": 16,
"alnum_prop": 0.75,
"repo_name": "github/codeql",
"id": "4a22939b33361c5bb0b1555b1fa2349604714829",
"size": "16",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/src/Imports/ImportTwiceOnALine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
from google.cloud import vision_v1p3beta1
async def sample_delete_reference_image():
# Create a client
client = vision_v1p3beta1.ProductSearchAsyncClient()
# Initialize request argument(s)
request = vision_v1p3beta1.DeleteReferenceImageRequest(
name="name_value",
)
# Make the request
await client.delete_reference_image(request=request)
# [END vision_v1p3beta1_generated_ProductSearch_DeleteReferenceImage_async]
| {
"content_hash": "5b435b459782f6d82cd54205f2d6c039",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 26.823529411764707,
"alnum_prop": 0.7456140350877193,
"repo_name": "googleapis/python-vision",
"id": "64870e36ab75550565cc3bc3e6d26a235289ac39",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/vision_v1p3beta1_generated_product_search_delete_reference_image_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3254393"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, abort, redirect, request, \
g, url_for
from lektor.admin.utils import fs_path_to_url_path
from lektor.environment import PRIMARY_ALT
from werkzeug.wsgi import extract_path_info
bp = Blueprint('dash', __name__, url_prefix='/admin')
endpoints = [
('/', 'index'),
('/publish', 'publish'),
('/<path>/edit', 'edit'),
('/<path>/delete', 'delete'),
('/<path>/preview', 'preview'),
('/<path>/add-child', 'add_child'),
('/<path>/upload', 'add_attachment'),
]
@bp.route('/edit')
def edit_redirect():
record = None
# Find out where we wanted to go to. We need to chop off the leading
# /admin on this URL as this is where the admin thinks it's placed.
path = extract_path_info(request.url_root.rstrip('/').rsplit('/', 1)[0],
request.args.get('path', '/'))
if path is not None:
record = g.admin_context.pad.resolve_url_path(path, alt_fallback=False)
if record is None:
abort(404)
path = fs_path_to_url_path(record.path)
if record.alt != PRIMARY_ALT:
path += '+' + record.alt
return redirect(url_for('dash.edit', path=path))
def generic_endpoint(**kwargs):
"""This function is invoked by all dash endpoints."""
return render_template('dash.html')
for path, endpoint in endpoints:
bp.add_url_rule(path, endpoint, generic_endpoint)
| {
"content_hash": "2b7b04f6b26d894f3d67cc045a143aec",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 29.395833333333332,
"alnum_prop": 0.6236711552090716,
"repo_name": "mitsuhiko/lektor",
"id": "23e1e04b73dd152a9f66aeb8586f2be81cb336eb",
"size": "1411",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lektor/admin/modules/dash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "49"
},
{
"name": "CSS",
"bytes": "12727"
},
{
"name": "HTML",
"bytes": "4512"
},
{
"name": "JavaScript",
"bytes": "135255"
},
{
"name": "Makefile",
"bytes": "5009"
},
{
"name": "Python",
"bytes": "345731"
},
{
"name": "Shell",
"bytes": "8114"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from . import _TestHasProps, _TestModel
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.core.property.pandas as bcpp
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'PandasDataFrame',
'PandasGroupBy',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_PandasDataFrame(object):
def test_valid(self, pd):
prop = bcpp.PandasDataFrame()
assert prop.is_valid(pd.DataFrame())
def test_invalid(self):
prop = bcpp.PandasDataFrame()
assert not prop.is_valid(None)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
class Test_PandasGroupBy(object):
def test_valid(self, pd):
prop = bcpp.PandasGroupBy()
assert prop.is_valid(pd.core.groupby.GroupBy(pd.DataFrame()))
def test_invalid(self):
prop = bcpp.PandasGroupBy()
assert not prop.is_valid(None)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpp, ALL)
| {
"content_hash": "a470ddd07fa401f363cd4ae84a3ea694",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 82,
"avg_line_length": 32.03896103896104,
"alnum_prop": 0.3980543169841913,
"repo_name": "timsnyder/bokeh",
"id": "86e2cb060789348ae409483cfe6bf3d54296c160",
"size": "2971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/core/property/tests/test_pandas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
"""
Surge unit tests.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import os
from decimal import Decimal, ROUND_HALF_EVEN, getcontext
import psycopg2 as db
import psycopg2.extensions as ext
from psycopg2.extras import RealDictCursor
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir, "surge"))
from surge import Surge
getcontext().rounding = ROUND_HALF_EVEN
getcontext().prec = 28
BITCOINAVERAGE_API = "https://api.bitcoinaverage.com/"
CRYPTOCOINCHARTS_API = "http://www.cryptocoincharts.info/v2/api/"
BITTREX_API = "https://bittrex.com/api/v1.1/"
class TestSurge(unittest.TestCase):
def setUp(self):
self.surge = Surge(verbose=False,
update_all_coins=True,
coin_list=None,
interval=120,
max_retry=-1,
database_check=False)
def test_bittrex_orderbook_snapshot(self):
# self.surge.bittrex_orderbook_snapshot()
pass
def test_update_bitcoinaverage(self):
# self.surge.update_bitcoinaverage()
pass
def test_update_cryptocoincharts(self):
# self.surge.update_cryptocoincharts()
pass
def test_update_loop(self):
pass
def tearDown(self):
del self.surge
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestSurge)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "c92412921789a10495d4b181f24610b8",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 26.82758620689655,
"alnum_prop": 0.6458868894601543,
"repo_name": "tinybike/Surge",
"id": "5015e2abc6d34b572e395bb8bf1a54aa4f6d5aac",
"size": "1578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18756"
}
],
"symlink_target": ""
} |
"""Manila base exception handling.
Includes decorator for re-raising Manila-type exceptions.
SHOULD include dedicated exception logging.
"""
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
import six
import webob.exc
from manila.i18n import _
from manila.i18n import _LE
LOG = log.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Whether to make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
ProcessExecutionError = processutils.ProcessExecutionError
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class ManilaException(Exception):
"""Base Manila Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, detail_data={}, **kwargs):
self.kwargs = kwargs
self.detail_data = detail_data
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in six.iteritems(self.kwargs):
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation.'))
for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"), {
'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
if re.match('.*[^\.]\.\.$', message):
message = message[:-1]
self.msg = message
super(ManilaException, self).__init__(message)
class NetworkException(ManilaException):
message = _("Exception due to network failure.")
class NetworkBadConfigurationException(NetworkException):
message = _("Bad network configuration: %(reason)s.")
class NotAuthorized(ManilaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges.")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class Conflict(ManilaException):
message = _("%(err)s")
code = 409
class Invalid(ManilaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidDriverMode(Invalid):
message = _("Invalid driver mode: %(driver_mode)s.")
class InvalidAPIVersionString(Invalid):
message = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
message = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
message = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
class InvalidCapacity(Invalid):
message = _("Invalid capacity: %(name)s = %(value)s.")
class NotFound(ManilaException):
message = _("Resource could not be found.")
code = 404
safe = True
class InUse(ManilaException):
message = _("Resource is in use.")
class AvailabilityZoneNotFound(NotFound):
message = _("Availability zone %(id)s could not be found.")
class ShareNetworkNotFound(NotFound):
message = _("Share network %(share_network_id)s could not be found.")
class ShareServerNotFound(NotFound):
message = _("Share server %(share_server_id)s could not be found.")
class ShareServerNotFoundByFilters(ShareServerNotFound):
message = _("Share server could not be found by "
"filters: %(filters_description)s.")
class ShareServerInUse(InUse):
message = _("Share server %(share_server_id)s is in use.")
class InvalidShareServer(Invalid):
message = _("Share server %(share_server_id)s is not valid.")
class ShareMigrationFailed(ManilaException):
message = _("Share migration failed: %(reason)s")
class ServiceIPNotFound(ManilaException):
message = _("Share migration failed: %(reason)s")
class ShareServerNotCreated(ManilaException):
message = _("Share server %(share_server_id)s failed on creation.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class ServiceIsDown(Invalid):
message = _("Service %(service)s is down.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler host filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler host weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found.")
class QuotaExists(ManilaException):
message = _("Quota exists for project %(project_id)s, "
"resource %(resource)s.")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
message = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(ManilaException):
message = _("Quota exceeded for resources: %(overs)s.")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class MigrationError(ManilaException):
message = _("Migration error: %(reason)s.")
class MalformedRequestBody(ManilaException):
message = _("Malformed message body: %(reason)s.")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s.")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s.")
class NoValidHost(ManilaException):
message = _("No valid host was found. %(reason)s.")
class WillNotSchedule(ManilaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(ManilaException):
message = _("Quota exceeded: code=%(code)s.")
code = 413
headers = {'Retry-After': 0}
safe = True
class ShareSizeExceedsAvailableQuota(QuotaError):
message = _("Requested share exceeds allowed gigabytes quota.")
class SnapshotSizeExceedsAvailableQuota(QuotaError):
message = _("Requested snapshot exceeds allowed gigabytes quota.")
class ShareLimitExceeded(QuotaError):
message = _("Maximum number of shares allowed (%(allowed)d) exceeded.")
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded.")
class ShareNetworksLimitExceeded(QuotaError):
message = _("Maximum number of share-networks "
"allowed (%(allowed)d) exceeded.")
class GlusterfsException(ManilaException):
message = _("Unknown Gluster exception.")
class InvalidShare(Invalid):
message = _("Invalid share: %(reason)s.")
class InvalidShareInstance(Invalid):
message = _("Invalid share instance: %(reason)s.")
class ManageInvalidShare(InvalidShare):
message = _("Manage existing share failed due to "
"invalid share: %(reason)s")
class UnmanageInvalidShare(InvalidShare):
message = _("Unmanage existing share failed due to "
"invalid share: %(reason)s")
class PortLimitExceeded(QuotaError):
message = _("Maximum number of ports exceeded.")
class ShareAccessExists(ManilaException):
message = _("Share access %(access_type)s:%(access)s exists.")
class InvalidShareAccess(Invalid):
message = _("Invalid access_rule: %(reason)s.")
class InvalidShareAccessLevel(Invalid):
message = _("Invalid or unsupported share access level: %(level)s.")
class ShareIsBusy(ManilaException):
message = _("Deleting $(share_name) share that used.")
class ShareBackendException(ManilaException):
message = _("Share backend error: %(msg)s.")
class ShareSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareSnapshotNotSupported(ManilaException):
message = _("Share %(share_name)s does not support snapshots.")
class ShareSnapshotIsBusy(ManilaException):
message = _("Deleting snapshot %(snapshot_name)s that has "
"dependent shares.")
class InvalidShareSnapshot(Invalid):
message = _("Invalid share snapshot: %(reason)s.")
class ShareMetadataNotFound(NotFound):
message = _("Metadata item is not found.")
class InvalidShareMetadata(Invalid):
message = _("Invalid metadata.")
class InvalidShareMetadataSize(Invalid):
message = _("Invalid metadata size.")
class SecurityServiceNotFound(NotFound):
message = _("Security service %(security_service_id)s could not be found.")
class ShareNetworkSecurityServiceAssociationError(ManilaException):
message = _("Failed to associate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class ShareNetworkSecurityServiceDissociationError(ManilaException):
message = _("Failed to dissociate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class InvalidVolume(Invalid):
message = _("Invalid volume.")
class InvalidShareType(Invalid):
message = _("Invalid share type: %(reason)s.")
class InvalidExtraSpec(Invalid):
message = _("Invalid extra_spec: %(reason)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareTypeNotFound(NotFound):
message = _("Share type %(share_type_id)s could not be found.")
class ShareTypeAccessNotFound(NotFound):
message = _("Share type access not found for %(share_type_id)s / "
"%(project_id)s combination.")
class ShareTypeNotFoundByName(ShareTypeNotFound):
message = _("Share type with name %(share_type_name)s "
"could not be found.")
class ShareTypeExtraSpecsNotFound(NotFound):
message = _("Share Type %(share_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ShareTypeInUse(ManilaException):
message = _("Share Type %(share_type_id)s deletion is not allowed with "
"shares present with the type.")
class ShareTypeExists(ManilaException):
message = _("Share Type %(id)s already exists.")
class ShareTypeAccessExists(ManilaException):
message = _("Share type access for %(share_type_id)s / "
"%(project_id)s combination already exists.")
class ShareTypeCreateFailed(ManilaException):
message = _("Cannot create share_type with "
"name %(name)s and specs %(extra_specs)s.")
class ManageExistingShareTypeMismatch(ManilaException):
message = _("Manage existing share failed due to share type mismatch: "
"%(reason)s")
class ShareExtendingError(ManilaException):
message = _("Share %(share_id)s could not be extended due to error "
"in the driver: %(reason)s")
class ShareShrinkingError(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to error "
"in the driver: %(reason)s")
class ShareShrinkingPossibleDataLoss(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to "
"possible data loss")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class BridgeDoesNotExist(ManilaException):
message = _("Bridge %(bridge)s does not exist.")
class ServiceInstanceException(ManilaException):
message = _("Exception in service instance manager occurred.")
class ServiceInstanceUnavailable(ServiceInstanceException):
message = _("Service instance is not available.")
class StorageResourceException(ManilaException):
message = _("Storage resource exception.")
class StorageResourceNotFound(StorageResourceException):
message = _("Storage resource %(name)s not found.")
class SnapshotNotFound(StorageResourceNotFound):
message = _("Snapshot %(name)s not found.")
class SnapshotUnavailable(StorageResourceException):
message = _("Snapshot %(name)s info not available.")
class NetAppException(ManilaException):
message = _("Exception due to NetApp failure.")
class VserverNotFound(NetAppException):
message = _("Vserver %(vserver)s not found.")
class VserverNotSpecified(NetAppException):
message = _("Vserver not specified.")
class EMCVnxXMLAPIError(Invalid):
message = _("%(err)s")
class EMCVnxLockRequiredException(ManilaException):
message = _("Unable to acquire lock(s).")
class HP3ParInvalidClient(Invalid):
message = _("%(err)s")
class HP3ParInvalid(Invalid):
message = _("%(err)s")
class HP3ParUnexpectedError(ManilaException):
message = _("%(err)s")
class GaneshaCommandFailure(ProcessExecutionError):
_description = _("Ganesha management command failed.")
def __init__(self, **kw):
if 'description' not in kw:
kw['description'] = self._description
super(GaneshaCommandFailure, self).__init__(**kw)
class InvalidSqliteDB(Invalid):
message = _("Invalid Sqlite database.")
class SSHException(ManilaException):
message = _("Exception in SSH protocol negotiation or logic.")
class HDFSException(ManilaException):
message = _("HDFS exception occurred!")
class QBException(ManilaException):
message = _("Quobyte exception occurred: %(msg)s")
class QBRpcException(ManilaException):
"""Quobyte backend specific exception."""
message = _("Quobyte JsonRpc call to backend raised "
"an exception: %(result)s, Quobyte error"
" code %(qbcode)s")
class SSHInjectionThreat(ManilaException):
message = _("SSH command injection detected: %(command)s")
class HNASBackendException(ManilaException):
message = _("HNAS Backend Exception: %(msg)s")
class HNASConnException(ManilaException):
message = _("HNAS Connection Exception: %(msg)s")
# ConsistencyGroup
class ConsistencyGroupNotFound(NotFound):
message = _("ConsistencyGroup %(consistency_group_id)s could not be "
"found.")
class CGSnapshotNotFound(NotFound):
message = _("Consistency group snapshot %(cgsnapshot_id)s could not be "
"found.")
class CGSnapshotMemberNotFound(NotFound):
message = _("CG snapshot %(member_id)s could not be found.")
class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
class InvalidCGSnapshot(Invalid):
message = _("Invalid CGSnapshot: %(reason)s")
| {
"content_hash": "3ba41115ae0801253023f56289af75a3",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 79,
"avg_line_length": 26.603343465045594,
"alnum_prop": 0.6778634675806913,
"repo_name": "jcsp/manila",
"id": "9da2a14e86c30d884e4e3fe3cc90f70414693d5b",
"size": "18237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "4993686"
},
{
"name": "Shell",
"bytes": "42913"
}
],
"symlink_target": ""
} |
class DirectoryNotifier:
def __init__(self, dirname):
if not os.path.isdir(dirname):
raise RuntimeError, "you can only watch a directory."
self.dirname = dirname
self.fd = os.open(dirname, 0)
self.currentcontents = os.listdir(dirname)
self.oldsig = fcntl.fcntl(self.fd, fcntl.F_GETSIG)
fcntl.fcntl(self.fd, fcntl.F_SETSIG, 0)
fcntl.fcntl(self.fd, fcntl.F_NOTIFY, fcntl.DN_DELETE|fcntl.DN_CREATE|fcntl.DN_MULTISHOT)
def __del__(self):
# fcntl.fcntl(self.fd, fcntl.F_SETSIG, self.oldsig)
os.close(self.fd)
def __str__(self):
return "%s watching %s" % (self.__class__.__name__, self.dirname)
# there are lots of race conditions here, but we'll live with that for now.
def __call__(self, frame):
newcontents = os.listdir(self.dirname)
if len(newcontents) > len(self.currentcontents):
new = filter(lambda item: item not in self.currentcontents, newcontents)
self.entry_added(new)
elif len(newcontents) < len(self.currentcontents):
rem = filter(lambda item: item not in newcontents, self.currentcontents)
self.entry_removed(rem)
else:
self.no_change()
self.currentcontents = newcontents
# override these in a subclass
def entry_added(self, added):
print added, "added to", self.dirname
def entry_removed(self, removed):
print removed, "removed from", self.dirname
def no_change(self):
print "No change in", self.dirname
| {
"content_hash": "ed6db6704ffc0cc1b2dbec2573d7ef35",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 90,
"avg_line_length": 34.575,
"alnum_prop": 0.7093275488069414,
"repo_name": "ActiveState/code",
"id": "69456aa7810f2757ab70128f1fa87e50cbfafb2e",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/217829_Watching_directory_tree/recipe-217829.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
""" The module provides:
* functions used when evaluating signature's features
* regexp's constants used when evaluating signature's features
"""
import unicodedata
import regex as re
from talon.utils import to_unicode
from talon.signature.constants import SIGNATURE_MAX_LINES
rc = re.compile
RE_EMAIL = rc('\S@\S')
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
RE_URL = rc(r'''https?://|www\.[\S]+\.[\S]''')
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line matches the regular expression "^[\s]*---*[\s]*$".
RE_SEPARATOR = rc('^[\s]*---*[\s]*$')
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line has a sequence of 10 or more special characters.
RE_SPECIAL_CHARS = rc(('^[\s]*([\*]|#|[\+]|[\^]|-|[\~]|[\&]|[\$]|_|[\!]|'
'[\/]|[\%]|[\:]|[\=]){10,}[\s]*$'))
RE_SIGNATURE_WORDS = rc(('(T|t)hank.*,|(B|b)est|(R|r)egards|'
'^sent[ ]{1}from[ ]{1}my[\s,!\w]*$|BR|(S|s)incerely|'
'(C|c)orporation|Group'))
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
RE_NAME = rc('[A-Z][a-z]+\s\s?[A-Z][\.]?\s\s?[A-Z][a-z]+')
INVALID_WORD_START = rc('\(|\+|[\d]')
BAD_SENDER_NAMES = [
# known mail domains
'hotmail', 'gmail', 'yandex', 'mail', 'yahoo', 'mailgun', 'mailgunhq',
'example',
# first level domains
'com', 'org', 'net', 'ru',
# bad words
'mailto'
]
def binary_regex_search(prog):
'''Returns a function that returns 1 or 0 depending on regex search result.
If regular expression compiled into prog is present in a string
the result of calling the returned function with the string will be 1
and 0 otherwise.
>>> import regex as re
>>> binary_regex_search(re.compile("12"))("12")
1
>>> binary_regex_search(re.compile("12"))("34")
0
'''
return lambda s: 1 if prog.search(s) else 0
def binary_regex_match(prog):
'''Returns a function that returns 1 or 0 depending on regex match result.
If a string matches regular expression compiled into prog
the result of calling the returned function with the string will be 1
and 0 otherwise.
>>> import regex as re
>>> binary_regex_match(re.compile("12"))("12 3")
1
>>> binary_regex_match(re.compile("12"))("3 12")
0
'''
return lambda s: 1 if prog.match(s) else 0
def flatten_list(list_to_flatten):
"""Simple list comprehension to flatten list.
>>> flatten_list([[1, 2], [3, 4, 5]])
[1, 2, 3, 4, 5]
>>> flatten_list([[1], [[2]]])
[1, [2]]
>>> flatten_list([1, [2]])
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
"""
return [e for sublist in list_to_flatten for e in sublist]
def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0
def extract_names(sender):
"""Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <serobnic@mail.ru>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[]
"""
sender = to_unicode(sender, precise=True)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
return names
def categories_percent(s, categories):
'''Returns category characters percent.
>>> categories_percent("qqq ggg hhh", ["Po"])
0.0
>>> categories_percent("q,w.", ["Po"])
50.0
>>> categories_percent("qqq ggg hhh", ["Nd"])
0.0
>>> categories_percent("q5", ["Nd"])
50.0
>>> categories_percent("s.s,5s", ["Po", "Nd"])
50.0
'''
count = 0
s = to_unicode(s, precise=True)
for c in s:
if unicodedata.category(c) in categories:
count += 1
return 100 * float(count) / len(s) if len(s) else 0
def punctuation_percent(s):
'''Returns punctuation percent.
>>> punctuation_percent("qqq ggg hhh")
0.0
>>> punctuation_percent("q,w.")
50.0
'''
return categories_percent(s, ['Po'])
def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
return 0
def many_capitalized_words(s):
"""Returns a function to check percentage of capitalized words.
The function returns 1 if percentage greater then 65% and 0 otherwise.
"""
return 1 if capitalized_words_percent(s) > 66 else 0
def has_signature(body, sender):
'''Checks if the body has signature. Returns True or False.'''
non_empty = [line for line in body.splitlines() if line.strip()]
candidate = non_empty[-SIGNATURE_MAX_LINES:]
upvotes = 0
for line in candidate:
# we check lines for sender's name, phone, email and url,
# those signature lines don't take more then 27 lines
if len(line.strip()) > 27:
continue
elif contains_sender_names(sender)(line):
return True
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
upvotes += 1
if upvotes > 1:
return True
| {
"content_hash": "14fffaafb7fcaa7f523e3cf1123a8354",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 30.22421524663677,
"alnum_prop": 0.5948071216617211,
"repo_name": "tgwizard/talon",
"id": "7085a7454a171f9563ad310c6d3750be513484fa",
"size": "6765",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "talon/signature/learning/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99429"
}
],
"symlink_target": ""
} |
import pytest
import os
from pelops.datasets.str import get_sa_cam_id
from pelops.datasets.str import get_sa_car_id
from pelops.datasets.str import int_from_string
from pelops.datasets.str import StrDataset
@pytest.fixture
def str_sa(tmpdir):
""" Set up some test files and an instance of StrDataset(). """
# Write a file to read back
FILE_NAMES = (
# filepath, car_id, cam_id, time, misc
("match00001_cam02.png", 1, 2, None, None),
("match00001_cam01_mask.png", None, None, None, None),
("match00010_cam01.png", 10, 1, None, None),
("match00011_cam02_mask.png", None, None, None, None)
)
# The contents of the files do not matter, the name is enough
internal_dir = tmpdir.mkdir("crossCameraMatches")
for name, _, _, _, _ in FILE_NAMES:
out_file = internal_dir.join(name)
out_file.write("TEST")
# Setup the class
instantiated_class = StrDataset(os.path.dirname(out_file.dirname))
# Rename filepath
FILE_NAMES = (
(os.path.join(out_file.dirname, "match00001_cam02.png"), 1, 2, None, None),
(os.path.join(out_file.dirname, "match00001_cam01_mask.png"), None, None, None, None),
(os.path.join(out_file.dirname, "match00010_cam01.png"), 10, 1, None, None),
(os.path.join(out_file.dirname, "match00011_cam02_mask.png"), None, None, None, None)
)
# Filter out the files that were not read
RET_FILE_NAMES = tuple(t for t in FILE_NAMES if t[1] is not None)
return (instantiated_class, RET_FILE_NAMES)
def test_str_sa_chips_len(str_sa):
""" Test that StrDataset.chips is the correct length """
instantiated_class = str_sa[0]
FILE_NAMES = str_sa[1]
# check that self.chips has been created, is not empty, and has the right
# number of entries
assert len(FILE_NAMES)
assert len(FILE_NAMES) == len(instantiated_class.chips)
def test_str_sa_chips_vals(str_sa):
""" Test that StrDataset chips have the correct values. """
instantiated_class = str_sa[0]
FILE_NAMES = str_sa[1]
# Check that the correct chips exist
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
chip = instantiated_class.chips[filepath]
assert car_id == chip.car_id
assert cam_id == chip.cam_id
# No time data
assert chip.time is None
# No misc data
assert chip.misc is None
# Filepath should be filled
assert chip.filepath
def test_get_all_chips_by_car_id(str_sa):
""" Test StrDataset.get_all_chips_by_car_id() """
instantiated_class = str_sa[0]
FILE_NAMES = str_sa[1]
seen_ids = []
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
# Generate all the chips by hand, and compare
if car_id in seen_ids:
continue
seen_ids.append(car_id)
chips = []
for key, val in instantiated_class.chips.items():
if val.car_id == car_id:
chips.append(val)
chips.sort()
test_chips = sorted(instantiated_class.get_all_chips_by_car_id(car_id))
assert chips == test_chips
def test_get_all_chips_by_cam_id(str_sa):
""" Test StrDataset.get_all_chips_by_cam_id() """
instantiated_class = str_sa[0]
FILE_NAMES = str_sa[1]
seen_ids = []
for filepath, car_id, cam_id, time, misc in FILE_NAMES:
# Generate all the chips by hand, and compare
if cam_id in seen_ids:
continue
seen_ids.append(cam_id)
chips = []
for key, val in instantiated_class.chips.items():
if val.cam_id == cam_id:
chips.append(val)
chips.sort()
test_chips = sorted(instantiated_class.get_all_chips_by_cam_id(cam_id))
assert chips == test_chips
def test_get_distinct_cams_by_car_id(str_sa):
""" Test StrDataset.get_distinct_cams_by_car_id() and get_distinct_cams_per_car """
instantiated_class = str_sa[0]
CAR_ID = 1
TEST_CAMS = [2]
for test_cam, cam in zip(TEST_CAMS, sorted(instantiated_class.get_distinct_cams_by_car_id(CAR_ID))):
assert test_cam == cam
def test_get_all_cam_ids(str_sa):
""" Test StrDataset.get_distinct_cams_by_car_id() """
instantiated_class = str_sa[0]
TEST_CAMS = [1, 2]
for test_cam, cam in zip(TEST_CAMS, sorted(instantiated_class.get_all_cam_ids())):
assert test_cam == cam
def test_get_all_car_ids(str_sa):
""" Test StrDataset.get_distinct_cams_by_car_id() """
instantiated_class = str_sa[0]
TEST_CARS = [1, 10]
for test_car, car in zip (TEST_CARS, sorted(instantiated_class.get_all_car_ids())):
assert test_car == car
def test_str_sa_iter(str_sa):
""" Test StrDataset.__iter__() """
instantiated_class = str_sa[0]
FILE_NAMES = str_sa[1]
chip_ids = tuple(i for i, _, _, _, _ in FILE_NAMES)
for chip in instantiated_class:
assert chip.filepath in chip_ids
def test_int_from_string():
""" Test int_from_string() """
TEST_STRINGS = (
# String, Args, Answer
("test_010_test", ("test_", 3), 10),
("test_010_test", ("FAIL_", 3), None),
("test_010", ("test_", 3), 10),
("test_11_test", ("test_", 2), 11),
("010_test", ("", 3), 10),
("/foo/bar/bass/test_/test_010_test", ("test_", 3), 10),
)
for test_string, args, answer in TEST_STRINGS:
assert answer == int_from_string(test_string, args[0], args[1])
def test_get_sa_cam_id():
""" Test get_sa_cam_id() """
TEST_STRINGS = (
# String, Answer
("match00001_cam02.png", 2),
("match00001_cam01_mask.png", 1),
("match00010_cam01.png", 1),
("match00011_cam02_mask.png", 2),
)
for test_string, answer in TEST_STRINGS:
assert answer == get_sa_cam_id(test_string)
def test_get_sa_car_id():
""" Test get_sa_car_id() """
TEST_STRINGS = (
# String, Answer
("match00001_cam02.png", 1),
("match00001_cam01_mask.png", 1),
("match00010_cam01.png", 10),
("match00011_cam02_mask.png", 11),
)
for test_string, answer in TEST_STRINGS:
assert answer == get_sa_car_id(test_string)
| {
"content_hash": "ede6a0bdf210addc51b1718350eb9d9a",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 104,
"avg_line_length": 33.898395721925134,
"alnum_prop": 0.5898406688752169,
"repo_name": "dave-lab41/pelops",
"id": "b5212715bcb7ca23c91cddd0b1659489400bc765",
"size": "6339",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testci/test_str.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "684624"
},
{
"name": "Makefile",
"bytes": "854"
},
{
"name": "Python",
"bytes": "255707"
},
{
"name": "Shell",
"bytes": "5739"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from BRWRY_bootstrap.forms import BRWRYForm, BRWRYTest, HardwareForm, InstructionForm
from BRWRY_bootstrap.models import Hardware, Instruction
from BRWRY_django.BRWRY_bootstrap.forms import BRWRYModelForm, BRWRYInlineForm, WidgetsForm
from BRWRY_django.BRWRY_bootstrap.updateInstruction import updateInstruction, updateName, removeBrw, updateList
def BRWRY_configure(request):
hardware = Hardware.objects.get(pk=1)
if request.method == 'POST':
form = HardwareForm()
form = HardwareForm(request.POST,instance=hardware)
# form = HardwareForm(request.POST)
if form.is_valid():
form.save()
else:
form = HardwareForm()
form = HardwareForm(instance=hardware)
return render_to_response('configure.html', RequestContext(request, {
'form': form,
'hardware': hardware,
}))
def BRWRY_index(request):
hardware = Hardware.objects.get(pk=1)
instructions = Instruction.objects.get(pk=1)
brwName = request.GET.get('name')
if (brwName != None):
updateName(brwName)
stopBrw = request.GET.get('stop')
if (stopBrw != None):
removeBrw()
listBrw = request.GET.get('list')
if (listBrw != None):
updateList()
if request.method == 'POST':
form = InstructionForm()
form = InstructionForm(request.POST,instance=instructions)
if form.is_valid():
form.save()
post = request.POST.copy()
updateInstruction(post)
else:
form = InstructionForm()
form = InstructionForm(instance=instructions)
return render_to_response('index.html', RequestContext(request, {
'form': form,
'hardware': hardware,
'instructions':instructions,
}))
def BRWRY_history(request):
brwName = request.GET.get('file')
return render_to_response('history.html', RequestContext(request, {
'file': brwName,
}))
def BRWRY_form_with_template(request):
layout = request.GET.get('layout')
if not layout:
layout = 'vertical'
if request.method == 'POST':
form = BRWRYForm(request.POST)
form.is_valid()
else:
form = BRWRYForm()
modelform = BRWRYModelForm()
return render_to_response('form_using_template.html', RequestContext(request, {
'form': form,
'layout': layout,
}))
def BRWRY_form(request):
layout = request.GET.get('layout')
if not layout:
layout = 'vertical'
if request.method == 'POST':
form = BRWRYForm(request.POST)
form.is_valid()
else:
form = BRWRYForm()
return render_to_response('form.html', RequestContext(request, {
'form': form,
'layout': layout,
'test':'This is a test',
}))
def BRWRY_form_inline(request):
layout = request.GET.get('layout', '')
if layout != 'search':
layout = 'inline'
form = BRWRYInlineForm()
return render_to_response('form_inline.html', RequestContext(request, {
'form': form,
'layout': layout,
}))
def BRWRY_tabs(request):
layout = request.GET.get('layout')
if not layout:
layout = 'tabs'
tabs = [
{
'link': "#",
'title': 'Tab 1',
},
{
'link': "#",
'title': 'Tab 2',
}
]
return render_to_response('tabs.html', RequestContext(request, {
'tabs': tabs,
'layout': layout,
}))
def BRWRY_widgets(request):
layout = request.GET.get('layout', 'vertical')
form = WidgetsForm()
return render_to_response('form.html', RequestContext(request, {
'form': form,
'layout': layout,
})) | {
"content_hash": "b9de48605c36b4d181c6a3169fe14cd5",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 111,
"avg_line_length": 30.612903225806452,
"alnum_prop": 0.6101159114857745,
"repo_name": "oehokie/BRWRY-old",
"id": "896f1a36bc2756459ff1fc5b2808f9c9584e0dd0",
"size": "3796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BRWRY_django/BRWRY_bootstrap/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "7673"
},
{
"name": "Python",
"bytes": "36144"
},
{
"name": "Shell",
"bytes": "478"
}
],
"symlink_target": ""
} |
import os
import traceback
filename = 'file.txt'
try:
f = open (filename, 'r')
try:
print f.read()
finally:
f.close()
except (os.error, IOError) as ex:
traceback.print_exc(ex)
| {
"content_hash": "b76c8eaeaafa4049ab2f8276371835ea",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 33,
"avg_line_length": 17.416666666666668,
"alnum_prop": 0.5933014354066986,
"repo_name": "janusnic/21v-python",
"id": "27425fc505e313be83b1716af1fbf769b37dabee",
"size": "232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_06/7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
} |
import logging
from cliff import command
class DeployPlugin(command.Command):
"""Overcloud Image Build plugin"""
auth_required = False
log = logging.getLogger(__name__ + ".BuildPlugin")
def get_parser(self, prog_name):
parser = super(DeployPlugin, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
| {
"content_hash": "9418994f7f89f11c0f0d2c80431fe0f1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 24.941176470588236,
"alnum_prop": 0.6650943396226415,
"repo_name": "bcrochet/python-rdomanager-oscplugin",
"id": "5e001d98e519538d771c191a20a8780140fb183e",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdomanager_oscplugin/v1/overcloud_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44497"
}
],
"symlink_target": ""
} |
'''
Created on 1-Apr-2015
@author: Asawari.Vaidya
'''
import cgi
import cgitb
from PythonNetBanxSDK.CardPayments.Authorization import Authorization
from PythonNetBanxSDK.CardPayments.BillingDetails import BillingDetails
from PythonNetBanxSDK.CardPayments.Card import Card
from PythonNetBanxSDK.CardPayments.CardExpiry import CardExpiry
from PythonNetBanxSDK.OptimalApiClient import OptimalApiClient
from Config import Config
from RandomTokenGenerator import RandomTokenGenerator
cgitb.enable()
#from sample_application.RandomTokenGenerator import RandomTokenGenerator
#from sample_application.Config import Config
form = cgi.FieldStorage()
card_num = form.getvalue('cardNumber')
optimal_obj = OptimalApiClient(Config.api_key, Config.api_password, Config.environment, Config.account_number)
auth_obj = Authorization(None)
card_obj = Card(None)
cardExpiry_obj = CardExpiry(None)
billing_obj = BillingDetails(None)
auth_obj.merchantRefNum(RandomTokenGenerator().generateToken())
auth_obj.amount("100")
auth_obj.settleWithAuth("false")
#card_obj.cardNum("4530910000012345")
card_obj.cardNum(card_num)
card_obj.cvv("123")
auth_obj.card(card_obj)
cardExpiry_obj.month("2")
cardExpiry_obj.year("2017")
card_obj.cardExpiry(cardExpiry_obj)
billing_obj.street("Carlos Pellegrini 551")
billing_obj.city("Buenos Aires")
billing_obj.state("Zulia")
billing_obj.country("AR")
billing_obj.zip("C1009ABK")
auth_obj.billingDetails(billing_obj)
response_object = optimal_obj.card_payments_service_handler().create_authorization(auth_obj)
print ('Content-Type: text/html')
print ()
print ('<html>')
print ('<head><title>Card Payments - Create Authorization</title></head>')
print ('<body>')
print (response_object.__dict__)
print (response_object.error.__dict__)
print ('</body></html>')
| {
"content_hash": "14cf0e96a29e7ad46a93ae2791651f1b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 110,
"avg_line_length": 28.838709677419356,
"alnum_prop": 0.7885906040268457,
"repo_name": "OptimalPayments/Python_SDK",
"id": "8b6bb1c78d73401e029bbbd23a2e0a73887e8949",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sample_application/CardPayment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "334349"
}
],
"symlink_target": ""
} |
from file_operation import read_all_from_file
from git_ignore_add import git_ignore_add_ignorelist
# git ignore which
def git_ignore_which(languages):
if len(languages)==0:
return
ignores = git_ignore_add_ignorelist(languages)
if len(ignores)==0:
print "no available git ignore file"
return
for ignore in ignores:
print ignore | {
"content_hash": "2d8f3c714ebb8bddd0c8bd91680dd51b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 22.733333333333334,
"alnum_prop": 0.7565982404692082,
"repo_name": "imwithye/git-ignore",
"id": "ba6d0004aac52fa6b74e2050b103295af70695e3",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git-ignore/git_ignore_which.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9117"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(r'^votecollector/device/$',
views.DeviceStatus.as_view(),
name='votecollector_device'),
url(r'^votecollector/start_voting/(?P<id>\d+)/$',
views.StartYNA.as_view(), {
'app': 'motions',
'model': 'MotionPoll',
'mode': 'YesNoAbstain',
'resource': '/vote/'
},
name='votecollector_start_voting'),
url(r'^votecollector/start_election/(?P<id>\d+)/(?P<options>\d+)/$',
views.StartElection.as_view(), {
'app': 'assignments',
'model': 'AssignmentPoll',
'mode': 'SingleDigit',
'resource': '/candidate/'
},
name='votecollector_start_election'),
url(r'^votecollector/start_election_one/(?P<id>\d+)/$',
views.StartYNA.as_view(), {
'app': 'assignments',
'model': 'AssignmentPoll',
'mode': 'YesNoAbstain',
'resource': '/vote/'
},
name='votecollector_start_election_one'),
url(r'^votecollector/start_speaker_list/(?P<id>\d+)/$',
views.StartSpeakerList.as_view(), {
'app': 'agenda',
'model': 'Item',
'mode': 'SpeakerList',
'resource': '/speaker/'
},
name='votecollector_start_speaker_list'),
url(r'^votecollector/start_ping/$',
views.StartPing.as_view(), {
'mode': 'Ping',
'resource': '/keypad/'
},
name='votecollector_start_ping'),
url(r'^votecollector/stop/$',
views.StopVoting.as_view(),
name='votecollector_stop'),
url(r'^votecollector/clear_voting/(?P<id>\d+)/$',
views.ClearVotes.as_view(), {
'app': 'motions',
'model': 'MotionPoll'
},
name='votecollector_clear_voting'),
url(r'^votecollector/clear_election/(?P<id>\d+)/$',
views.ClearVotes.as_view(), {
'app': 'assignments',
'model': 'AssignmentPoll'
},
name='votecollector_clear_election'),
url(r'^votecollector/status/$',
views.VotingStatus.as_view(),
name='votecollector_status'),
url(r'^votecollector/result_voting/(?P<id>\d+)/$',
views.VotingResult.as_view(), {
'app': 'motions',
'model': 'MotionPoll'
},
name='votecollector_result_yna'),
url(r'^votecollector/result_election/(?P<id>\d+)/$',
views.VotingResult.as_view(), {
'app': 'assignments',
'model': 'AssignmentPoll'
},
name='votecollector_result_election'),
url(r'^votecollector/vote/(?P<poll_id>\d+)/$',
csrf_exempt(views.Votes.as_view()),
name='votecollector_votes'),
url(r'^votecollector/vote/(?P<poll_id>\d+)/(?P<keypad_id>\d+)/$',
csrf_exempt(views.VoteCallback.as_view()),
name='votecollector_vote'),
url(r'^votecollector/candidate/(?P<poll_id>\d+)/$',
csrf_exempt(views.Candidates.as_view()),
name='votecollector_candidates'),
url(r'^votecollector/candidate/(?P<poll_id>\d+)/(?P<keypad_id>\d+)/$',
csrf_exempt(views.CandidateCallback.as_view()),
name='votecollector_candidate'),
url(r'^votecollector/speaker/(?P<item_id>\d+)/(?P<keypad_id>\d+)/$',
csrf_exempt(views.SpeakerCallback.as_view()),
name='votecollector_speaker'),
url(r'^votecollector/keypad/$',
csrf_exempt(views.Keypads.as_view()),
name='votecollector_keypads'),
url(r'^votecollector/keypad/(?P<keypad_id>\d+)/$',
csrf_exempt(views.KeypadCallback.as_view()),
name='votecollector_keypad'),
]
| {
"content_hash": "f37c509c77e949ff0fc6211699ef5634",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 74,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.549867374005305,
"repo_name": "jwinzer/openslides-votecollector",
"id": "36d09b6c54a11dd531253d9b1f33678e4cf43931",
"size": "3770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openslides_votecollector/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34258"
},
{
"name": "JavaScript",
"bytes": "73424"
},
{
"name": "Python",
"bytes": "74940"
}
],
"symlink_target": ""
} |
"""
This module contains the loss classes.
Specific losses are used for regression, binary classification or multiclass
classification.
"""
# Author: Nicolas Hug
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import expit, logsumexp, xlogy
from .common import Y_DTYPE
from .common import G_H_DTYPE
from ._loss import _update_gradients_least_squares
from ._loss import _update_gradients_hessians_least_squares
from ._loss import _update_gradients_least_absolute_deviation
from ._loss import _update_gradients_hessians_least_absolute_deviation
from ._loss import _update_gradients_hessians_binary_crossentropy
from ._loss import _update_gradients_hessians_categorical_crossentropy
from ._loss import _update_gradients_hessians_poisson
from ...utils.stats import _weighted_percentile
class BaseLoss(ABC):
"""Base class for a loss."""
def __init__(self, hessians_are_constant):
self.hessians_are_constant = hessians_are_constant
def __call__(self, y_true, raw_predictions, sample_weight):
"""Return the weighted average loss"""
return np.average(self.pointwise_loss(y_true, raw_predictions),
weights=sample_weight)
@abstractmethod
def pointwise_loss(self, y_true, raw_predictions):
"""Return loss value for each input"""
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
need_update_leaves_values = False
def init_gradients_and_hessians(self, n_samples, prediction_dim,
sample_weight):
"""Return initial gradients and hessians.
Unless hessians are constant, arrays are initialized with undefined
values.
Parameters
----------
n_samples : int
The number of samples passed to `fit()`.
prediction_dim : int
The dimension of a raw prediction, i.e. the number of trees
built at each iteration. Equals 1 for regression and binary
classification, or K where K is the number of classes for
multiclass classification.
sample_weight : array-like of shape(n_samples,) default=None
Weights of training data.
Returns
-------
gradients : ndarray, shape (prediction_dim, n_samples)
The initial gradients. The array is not initialized.
hessians : ndarray, shape (prediction_dim, n_samples)
If hessians are constant (e.g. for `LeastSquares` loss, the
array is initialized to ``1``. Otherwise, the array is allocated
without being initialized.
"""
shape = (prediction_dim, n_samples)
gradients = np.empty(shape=shape, dtype=G_H_DTYPE)
if self.hessians_are_constant:
# If the hessians are constant, we consider they are equal to 1.
# - This is correct for the half LS loss
# - For LAD loss, hessians are actually 0, but they are always
# ignored anyway.
hessians = np.ones(shape=(1, 1), dtype=G_H_DTYPE)
else:
hessians = np.empty(shape=shape, dtype=G_H_DTYPE)
return gradients, hessians
@abstractmethod
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
"""Return initial predictions (before the first iteration).
Parameters
----------
y_train : ndarray, shape (n_samples,)
The target training values.
sample_weight : array-like of shape(n_samples,) default=None
Weights of training data.
prediction_dim : int
The dimension of one prediction: 1 for binary classification and
regression, n_classes for multiclass classification.
Returns
-------
baseline_prediction : float or ndarray, shape (1, prediction_dim)
The baseline prediction.
"""
@abstractmethod
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
"""Update gradients and hessians arrays, inplace.
The gradients (resp. hessians) are the first (resp. second) order
derivatives of the loss for each sample with respect to the
predictions of model, evaluated at iteration ``i - 1``.
Parameters
----------
gradients : ndarray, shape (prediction_dim, n_samples)
The gradients (treated as OUT array).
hessians : ndarray, shape (prediction_dim, n_samples) or \
(1,)
The hessians (treated as OUT array).
y_true : ndarray, shape (n_samples,)
The true target values or each training sample.
raw_predictions : ndarray, shape (prediction_dim, n_samples)
The raw_predictions (i.e. values from the trees) of the tree
ensemble at iteration ``i - 1``.
sample_weight : array-like of shape(n_samples,) default=None
Weights of training data.
"""
class LeastSquares(BaseLoss):
"""Least squares loss, for regression.
For a given sample x_i, least squares loss is defined as::
loss(x_i) = 0.5 * (y_true_i - raw_pred_i)**2
This actually computes the half least squares loss to simplify
the computation of the gradients and get a unit hessian (and be consistent
with what is done in LightGBM).
"""
def __init__(self, sample_weight):
# If sample weights are provided, the hessians and gradients
# are multiplied by sample_weight, which means the hessians are
# equal to sample weights.
super().__init__(hessians_are_constant=sample_weight is None)
def pointwise_loss(self, y_true, raw_predictions):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
loss = 0.5 * np.power(y_true - raw_predictions, 2)
return loss
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
return np.average(y_train, weights=sample_weight)
@staticmethod
def inverse_link_function(raw_predictions):
return raw_predictions
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
gradients = gradients.reshape(-1)
if sample_weight is None:
_update_gradients_least_squares(gradients, y_true, raw_predictions)
else:
hessians = hessians.reshape(-1)
_update_gradients_hessians_least_squares(gradients, hessians,
y_true, raw_predictions,
sample_weight)
class LeastAbsoluteDeviation(BaseLoss):
"""Least absolute deviation, for regression.
For a given sample x_i, the loss is defined as::
loss(x_i) = |y_true_i - raw_pred_i|
"""
def __init__(self, sample_weight):
# If sample weights are provided, the hessians and gradients
# are multiplied by sample_weight, which means the hessians are
# equal to sample weights.
super().__init__(hessians_are_constant=sample_weight is None)
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
need_update_leaves_values = True
def pointwise_loss(self, y_true, raw_predictions):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
loss = np.abs(y_true - raw_predictions)
return loss
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
if sample_weight is None:
return np.median(y_train)
else:
return _weighted_percentile(y_train, sample_weight, 50)
@staticmethod
def inverse_link_function(raw_predictions):
return raw_predictions
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
gradients = gradients.reshape(-1)
if sample_weight is None:
_update_gradients_least_absolute_deviation(gradients, y_true,
raw_predictions)
else:
hessians = hessians.reshape(-1)
_update_gradients_hessians_least_absolute_deviation(
gradients, hessians, y_true, raw_predictions, sample_weight)
def update_leaves_values(self, grower, y_true, raw_predictions,
sample_weight):
# Update the values predicted by the tree with
# median(y_true - raw_predictions).
# See note about need_update_leaves_values in BaseLoss.
# TODO: ideally this should be computed in parallel over the leaves
# using something similar to _update_raw_predictions(), but this
# requires a cython version of median()
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight is None:
median_res = np.median(y_true[indices]
- raw_predictions[indices])
else:
median_res = _weighted_percentile(
y_true[indices] - raw_predictions[indices],
sample_weight=sample_weight[indices],
percentile=50
)
leaf.value = grower.shrinkage * median_res
# Note that the regularization is ignored here
class Poisson(BaseLoss):
"""Poisson deviance loss with log-link, for regression.
For a given sample x_i, Poisson deviance loss is defined as::
loss(x_i) = y_true_i * log(y_true_i/exp(raw_pred_i))
- y_true_i + exp(raw_pred_i))
This actually computes half the Poisson deviance to simplify
the computation of the gradients.
"""
def __init__(self, sample_weight):
super().__init__(hessians_are_constant=False)
inverse_link_function = staticmethod(np.exp)
def pointwise_loss(self, y_true, raw_predictions):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
# TODO: For speed, we could remove the constant xlogy(y_true, y_true)
# Advantage of this form: minimum of zero at raw_predictions = y_true.
loss = (xlogy(y_true, y_true) - y_true * (raw_predictions + 1)
+ np.exp(raw_predictions))
return loss
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
y_pred = np.average(y_train, weights=sample_weight)
eps = np.finfo(y_train.dtype).eps
y_pred = np.clip(y_pred, eps, None)
return np.log(y_pred)
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
gradients = gradients.reshape(-1)
hessians = hessians.reshape(-1)
_update_gradients_hessians_poisson(gradients, hessians,
y_true, raw_predictions,
sample_weight)
class BinaryCrossEntropy(BaseLoss):
"""Binary cross-entropy loss, for binary classification.
For a given sample x_i, the binary cross-entropy loss is defined as the
negative log-likelihood of the model which can be expressed as::
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
section 4.4.1 (about logistic regression).
"""
def __init__(self, sample_weight):
super().__init__(hessians_are_constant=False)
inverse_link_function = staticmethod(expit)
def pointwise_loss(self, y_true, raw_predictions):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
# logaddexp(0, x) = log(1 + exp(x))
loss = np.logaddexp(0, raw_predictions) - y_true * raw_predictions
return loss
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
if prediction_dim > 2:
raise ValueError(
"loss='binary_crossentropy' is not defined for multiclass"
" classification with n_classes=%d, use"
" loss='categorical_crossentropy' instead" % prediction_dim)
proba_positive_class = np.average(y_train, weights=sample_weight)
eps = np.finfo(y_train.dtype).eps
proba_positive_class = np.clip(proba_positive_class, eps, 1 - eps)
# log(x / 1 - x) is the anti function of sigmoid, or the link function
# of the Binomial model.
return np.log(proba_positive_class / (1 - proba_positive_class))
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
gradients = gradients.reshape(-1)
hessians = hessians.reshape(-1)
_update_gradients_hessians_binary_crossentropy(
gradients, hessians, y_true, raw_predictions, sample_weight)
def predict_proba(self, raw_predictions):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
proba = np.empty((raw_predictions.shape[0], 2), dtype=Y_DTYPE)
proba[:, 1] = expit(raw_predictions)
proba[:, 0] = 1 - proba[:, 1]
return proba
class CategoricalCrossEntropy(BaseLoss):
"""Categorical cross-entropy loss, for multiclass classification.
For a given sample x_i, the categorical cross-entropy loss is defined as
the negative log-likelihood of the model and generalizes the binary
cross-entropy to more than 2 classes.
"""
def __init__(self, sample_weight):
super().__init__(hessians_are_constant=False)
def pointwise_loss(self, y_true, raw_predictions):
one_hot_true = np.zeros_like(raw_predictions)
prediction_dim = raw_predictions.shape[0]
for k in range(prediction_dim):
one_hot_true[k, :] = (y_true == k)
loss = (logsumexp(raw_predictions, axis=0) -
(one_hot_true * raw_predictions).sum(axis=0))
return loss
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
init_value = np.zeros(shape=(prediction_dim, 1), dtype=Y_DTYPE)
eps = np.finfo(y_train.dtype).eps
for k in range(prediction_dim):
proba_kth_class = np.average(y_train == k,
weights=sample_weight)
proba_kth_class = np.clip(proba_kth_class, eps, 1 - eps)
init_value[k, :] += np.log(proba_kth_class)
return init_value
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions, sample_weight):
_update_gradients_hessians_categorical_crossentropy(
gradients, hessians, y_true, raw_predictions, sample_weight)
def predict_proba(self, raw_predictions):
# TODO: This could be done in parallel
# compute softmax (using exp(log(softmax)))
proba = np.exp(raw_predictions -
logsumexp(raw_predictions, axis=0)[np.newaxis, :])
return proba.T
_LOSSES = {
'squared_error': LeastSquares,
'least_absolute_deviation': LeastAbsoluteDeviation,
'binary_crossentropy': BinaryCrossEntropy,
'categorical_crossentropy': CategoricalCrossEntropy,
'poisson': Poisson,
}
| {
"content_hash": "04a3e1cf47f813198b9c402259fbc1ca",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 79,
"avg_line_length": 40.976580796252925,
"alnum_prop": 0.621592272961079,
"repo_name": "glemaitre/scikit-learn",
"id": "c336bd347e4cf963064008ce17d97e2ae37cc4fe",
"size": "17497",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/ensemble/_hist_gradient_boosting/loss.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "10011694"
},
{
"name": "Shell",
"bytes": "44168"
}
],
"symlink_target": ""
} |
import os
import sys
import datetime
from fabric.colors import red, green
from fabric.operations import local, prompt
PROJECT_NAME = 'ssheepdog'
APPS = ['ssheepdog']
TESTS = ' '.join(APPS)
COVERAGE_SOURCES = ','.join(APPS)
COVERAGE_PARAMS = "--omit='*migrations*,*tests*"
ENVS = {
'dev': {
'repo_url': 'git@heroku.com:sheltered-bastion-8737.git',
'site_url': 'http://sheltered-bastion-8737.herokuapp.com'
},
'prod': {
'repo_url': 'git@heroku.com:setsailforfail.git',
'site_url': 'http://setsailforfail.herokuapp.com'
},
}
def check_dest(fn):
"""
Decorator that verifies whether kwarg 'dest' is present and is a valid
environment name.
"""
def validate_dest(input):
if input not in ENVS:
raise Exception('Invalid environment specified.')
return input
def wrapper(*args, **kwargs):
dest = kwargs.get('dest', None)
if dest:
validate_dest(dest)
else:
kwargs['dest'] = prompt(
"Enter one of the following destinations [%s]:"
% ', '.join(ENVS),
validate=validate_dest)
return fn(*args, **kwargs)
return wrapper
##### Git remote repo management #####
def get_repos():
return local("git remote", capture=True).split("\n")
def _add_repo(repo, url, repos):
if repo not in repos:
local("git remote add %s %s" % (repo, url))
def _rm_repo(repo, url, repos):
if repo in repos:
local("git remote rm %s" % repo)
def _reset_repo(repo, url, repos):
_rm_repo(repo, url, repos)
_add_repo(repo, url, [])
def env_repos(action=None):
"""
Perform an action on each environment repository, specified by action.
"""
actions = {
'add': _add_repo,
'reset': _reset_repo,
'rm': _rm_repo
}
def validate_action(input):
if input not in actions:
raise Exception('Invalid action specified.')
return input
if action:
validate_action(action)
else:
action = prompt(
"Enter one of the following actions: <%s>" % ", ".join(actions),
validate=validate_action)
repos = get_repos()
for env, details in ENVS.iteritems():
actions[action](env, details['repo_url'], repos)
def _get_local_branches():
return [b.strip() for b in local("git branch", capture=True).split('\n')]
def _get_current_branch():
selected = [b for b in _get_local_branches() if b.startswith('* ')]
return selected[0].replace('* ', "") if len(selected) else None
@check_dest
def deploy(dest=''):
"""
Deploy from current repo to respective environment
"""
# Make sure our env repos are added as remotes
env_repos('add')
# Get the current branch
current_branch = _get_current_branch()
if current_branch == "(no branch)":
current_branch = local("git rev-parse --short HEAD", capture=True)
# Push to the destination
local('git push %s %s:master --force' % (dest, current_branch))
remote('syncdb --noinput', dest=dest)
remote('migrate', dest=dest)
deploy_static(dest=dest)
check(dest=dest)
def run():
local('./manage.py runserver 0.0.0.0:8000')
##### Static file management #####
@check_dest
def deploy_static(dest=''):
"""
Compress and upload static files to S3.
"""
local('./manage.py collectstatic --noinput'
' --settings=%s.settings.heroku.%s' % (PROJECT_NAME, dest))
# local('./manage.py compress'
# ' --force --settings=%s.settings.heroku.%s' % (PROJECT_NAME, dest))
def _now():
return datetime.now().strftime('%Y%m%s-%H%M')
##### Database management #####
def reset_local_db():
local('dropdb %s' % PROJECT_NAME)
local('createdb %s' % PROJECT_NAME)
def try_migrations():
reset_local_db()
local('./manage.py syncdb')
local('./manage.py migrate')
def try_clean():
reset_local_db()
local('./manage.py syncdb --noinput')
local('./manage.py migrate')
def reset_heroku_db():
local('heroku pg:reset')
def load_db():
"""
Populate empty Heroku database via json fixture
"""
print red('This will drop all tables from the database.')
print 'Please make sure you understand what this command does.'
print 'Do you want to continue? [y/n]'
answer = raw_input()
if answer != 'y':
print 'Aborting...'
return
commands = [
'./manage.py syncdb --noinput',
'./manage.py migrate',
'./manage.py droptables -y',
'./manage.py loaddata dump.json',
]
local('heroku run "%s"' % '; '.join(commands))
def make_dump():
local('./manage.py dumpdata | python -mjson.tool > dump.json')
##### Heroku specific helpers #####
@check_dest
def remote(cmd='', dest=''):
"""
Run a manage.py command on Heroku using ``settings_heroku``
Usage:
$ fab remote:'sendtestemail adam@sheepdoginc.ca'
$ fab remote:syncdb
Or
$ fab remote
Command to run: syncdb
"""
if not cmd:
cmd = prompt('Command to run:')
local("heroku run python manage.py %s \
--settings=%s.settings.heroku.%s" % (cmd, PROJECT_NAME, dest))
##### Testing, coverage & site validation #####
def test():
"""
Run unit tests for this Django Application
"""
if len(APPS) == 0:
return
local('./manage.py test %s' % TESTS)
def coverage():
"""
Generate Coverage report for this Django Application
"""
if len(APPS) == 0:
return
local('coverage run --source=%s ./manage.py test %s' % COVERAGE_SOURCES, TESTS)
print '============================================'
print 'Coverage Results:'
local('coverage report %s' % COVERAGE_PARAMS)
local('rm .coverage')
@check_dest
def check(dest=''):
"""
Check that the home page of the site returns an HTTP 200.
"""
print 'Checking site status...'
response = local('curl --silent -I "%s"' % ENVS[dest]['site_url'],
capture=True)
if not '200 OK' in response:
print(red('\nSomething seems to have gone wrong!\n'))
else:
print(green('\nLooks good from here!\n'))
##### Local utility tasks #####
def clean():
"""
Remove all .pyc files
"""
local('find . -name "*.pyc" -exec rm {} \;')
def debug():
"""
Find files with debug symbols
"""
clean()
local('grep -ir "print" *')
local('grep -ir "console.log" *')
def todo():
"""
Find all TODO and XXX
"""
clean()
local('grep -ir "TODO" *')
local('grep -ir "XXX" *')
def stats():
"""
Show number of additions and deletions between 1.0 and now
"""
local('git diff 1.0..HEAD --shortstat')
def freeze():
"""
Generate a stable requirements.txt based on requirements.spec.txt.
"""
local('pip freeze -r requirements.spec.txt > requirements.txt')
try:
assert os.getcwd() == os.path.dirname(os.path.abspath(__file__))
except AssertionError:
print red('You must run this from the root of the project.')
sys.exit(1)
| {
"content_hash": "74193bc5094db1bd2bcad5469f007bde",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 83,
"avg_line_length": 23.774086378737543,
"alnum_prop": 0.5824482951369481,
"repo_name": "SheepDogInc/ssheepdog",
"id": "255eff84a00e39b29c7f40f4e0fa3d4946b3eb79",
"size": "7156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1929"
},
{
"name": "Python",
"bytes": "140973"
},
{
"name": "Ruby",
"bytes": "1736"
},
{
"name": "Shell",
"bytes": "1610"
}
],
"symlink_target": ""
} |
import Sougou
import Ziguang
import Baidu
| {
"content_hash": "5eacb33c681041cf260049f92b58bd1a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 14,
"avg_line_length": 14,
"alnum_prop": 0.8571428571428571,
"repo_name": "zer4tul/RIME-Extend-Dict",
"id": "18eb6bc9b7033e14a2b6bd8760d188c3a0ef54d3",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/IME/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32834"
}
],
"symlink_target": ""
} |
from .NotifierClass import Notifier
import twitter
import re
class TVNotifier(Notifier):
def __init__(self,cfgParser,insec):
self.header = insec
self.conskey = cfgParser.get(insec,"conskey").strip()
self.conssecret = cfgParser.get(insec,"conssecret").strip()
self.acctokenkey = cfgParser.get(insec,"acctokenkey").strip()
self.acctokensecret = cfgParser.get(insec,"acctokensecret").strip()
try:
self.tweetif = cfgParser.get(insec,"tweetif").strip().split(",")
except:
self.tweetif = None
def pushResults(self,newres):
# the only thing this cares about is the final, and *that* only matters once x minutes have passed.
if "morningAnnounce" in list(newres.keys()) and len(newres["morningAnnounce"]) > 0:
toTweet = []
for game in newres["morningAnnounce"]:
lines = game.split("\n")
gameOn = []
for line in lines:
if re.search("^TV: ",line):
channels = line.split("TV: ")[1].split(", ")
for ch in channels:
if ch in self.tweetif:
gameOn.append(ch)
if len(gameOn) > 0:
toTweet.append(lines[0] + " on " + ", ".join(gameOn))
for msg in toTweet:
self._tweet(msg)
def _tweet(self,message):
api = twitter.Api(consumer_key=self.conskey, consumer_secret=self.conssecret,
access_token_key=self.acctokenkey, access_token_secret=self.acctokensecret)
api.PostUpdate(message)
| {
"content_hash": "c42fefc3d5c29ccfb982178df0f09185",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 101,
"avg_line_length": 31.40909090909091,
"alnum_prop": 0.6714905933429812,
"repo_name": "joshcvt/natinal",
"id": "c2fd77a15aceb4bde2b02e71d5110bad7ab3a71f",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifiers/TVNotifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72104"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.operators.branch`."""
import warnings
# pylint: disable=unused-import
from airflow.operators.branch import BaseBranchOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.operators.branch`.", DeprecationWarning, stacklevel=2
)
| {
"content_hash": "b55dd201e77606555d7f69247808cfd3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 105,
"avg_line_length": 31.5,
"alnum_prop": 0.7714285714285715,
"repo_name": "sekikn/incubator-airflow",
"id": "218e26e77e8d9831a3e6ec206eca764c81c3ee1d",
"size": "1102",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/operators/branch_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import numpy as np
from matplotlib import colors
def rainbow(n):
"""
Returns a list of colors sampled at equal intervals over the spectrum.
Parameters
----------
n : int
The number of colors to return
Returns
-------
R : (n,3) array
An of rows of RGB color values
Notes
-----
Converts from HSV coordinates (0, 1, 1) to (1, 1, 1) to RGB. Based on
the Sage function of the same name.
"""
R = np.ones((1,n,3))
R[0,:,0] = np.linspace(0, 1, n, endpoint=False)
#Note: could iterate and use colorsys.hsv_to_rgb
return colors.hsv_to_rgb(R).squeeze()
| {
"content_hash": "974cc807e15968c152aca7ba8bfa8c83",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.5904761904761905,
"repo_name": "pprett/statsmodels",
"id": "615cc4609fdb33faf7e31b9891e444bd6c4837fa",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/graphics/plottools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "JavaScript",
"bytes": "11143"
},
{
"name": "Python",
"bytes": "4135946"
},
{
"name": "R",
"bytes": "5412"
}
],
"symlink_target": ""
} |
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import os
import sys
import code
import logging
import types
import re
import optparse
import glob
import traceback
import fileutils
import settings
from utils import web2py_uuid
from compileapp import build_environment, read_pyc, run_models_in
from restricted import RestrictedError
from globals import Request, Response, Session
from storage import Storage
from admin import w2p_unpack
from dal import BaseAdapter
logger = logging.getLogger("web2py")
def exec_environment(
pyfile='',
request=None,
response=None,
session=None,
):
"""
.. function:: gluon.shell.exec_environment([pyfile=''[, request=Request()
[, response=Response[, session=Session()]]]])
Environment builder and module loader.
Builds a web2py environment and optionally executes a Python
file into the environment.
A Storage dictionary containing the resulting environment is returned.
The working directory must be web2py root -- this is the web2py default.
"""
if request is None: request = Request()
if response is None: response = Response()
if session is None: session = Session()
if request.folder is None:
mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile)
if mo:
appname = mo.group('appname')
request.folder = os.path.join('applications', appname)
else:
request.folder = ''
env = build_environment(request, response, session, store_current=False)
if pyfile:
pycfile = pyfile + 'c'
if os.path.isfile(pycfile):
exec read_pyc(pycfile) in env
else:
execfile(pyfile, env)
return Storage(env)
def env(
a,
import_models=False,
c=None,
f=None,
dir='',
extra_request={},
):
"""
Return web2py execution environment for application (a), controller (c),
function (f).
If import_models is True the exec all application models into the
environment.
extra_request allows you to pass along any extra
variables to the request object before your models
get executed. This was mainly done to support
web2py_utils.test_runner, however you can use it
with any wrapper scripts that need access to the
web2py environment.
"""
request = Request()
response = Response()
session = Session()
request.application = a
# Populate the dummy environment with sensible defaults.
if not dir:
request.folder = os.path.join('applications', a)
else:
request.folder = dir
request.controller = c or 'default'
request.function = f or 'index'
response.view = '%s/%s.html' % (request.controller,
request.function)
request.env.path_info = '/%s/%s/%s' % (a, c, f)
request.env.http_host = '127.0.0.1:8000'
request.env.remote_addr = '127.0.0.1'
request.env.web2py_runtime_gae = settings.global_settings.web2py_runtime_gae
for k,v in extra_request.items():
request[k] = v
# Monkey patch so credentials checks pass.
def check_credentials(request, other_application='admin'):
return True
fileutils.check_credentials = check_credentials
environment = build_environment(request, response, session)
if import_models:
try:
run_models_in(environment)
except RestrictedError, e:
sys.stderr.write(e.traceback+'\n')
sys.exit(1)
environment['__name__'] = '__main__'
return environment
def exec_pythonrc():
pythonrc = os.environ.get('PYTHONSTARTUP')
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
def run(
appname,
plain=False,
import_models=False,
startfile=None,
bpython=False,
python_code=False
):
"""
Start interactive shell or run Python script (startfile) in web2py
controller environment. appname is formatted like:
a web2py application name
a/c exec the controller c into the application environment
"""
(a, c, f) = parse_path_info(appname)
errmsg = 'invalid application name: %s' % appname
if not a:
die(errmsg)
adir = os.path.join('applications', a)
if not os.path.exists(adir):
if raw_input('application %s does not exist, create (y/n)?'
% a).lower() in ['y', 'yes']:
os.mkdir(adir)
w2p_unpack('welcome.w2p', adir)
for subfolder in ['models','views','controllers', 'databases',
'modules','cron','errors','sessions',
'languages','static','private','uploads']:
subpath = os.path.join(adir,subfolder)
if not os.path.exists(subpath):
os.mkdir(subpath)
db = os.path.join(adir,'models/db.py')
if os.path.exists(db):
data = fileutils.read_file(db)
data = data.replace('<your secret key>','sha512:'+web2py_uuid())
fileutils.write_file(db, data)
if c:
import_models = True
_env = env(a, c=c, import_models=import_models)
if c:
cfile = os.path.join('applications', a, 'controllers', c + '.py')
if not os.path.isfile(cfile):
cfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c,f))
if not os.path.isfile(cfile):
die(errmsg)
else:
exec read_pyc(cfile) in _env
else:
execfile(cfile, _env)
if f:
exec ('print %s()' % f, _env)
elif startfile:
exec_pythonrc()
try:
execfile(startfile, _env)
if import_models: BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models: BaseAdapter.close_all_instances('rollback')
elif python_code:
exec_pythonrc()
try:
exec(python_code, _env)
if import_models: BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models: BaseAdapter.close_all_instances('rollback')
else:
if not plain:
if bpython:
try:
import bpython
bpython.embed(locals_=_env)
return
except:
logger.warning(
'import bpython error; trying ipython...')
else:
try:
import IPython
if IPython.__version__ >= '0.11':
from IPython.frontend.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed(user_ns=_env)
shell()
return
else:
# following 2 lines fix a problem with
# IPython; thanks Michael Toomim
if '__builtins__' in _env:
del _env['__builtins__']
shell = IPython.Shell.IPShell(argv=[],user_ns=_env)
shell.mainloop()
return
except:
logger.warning(
'import IPython error; use default python shell')
try:
import readline
import rlcompleter
except ImportError:
pass
else:
readline.set_completer(rlcompleter.Completer(_env).complete)
readline.parse_and_bind('tab:complete')
exec_pythonrc()
code.interact(local=_env)
def parse_path_info(path_info):
"""
Parse path info formatted like a/c/f where c and f are optional
and a leading / accepted.
Return tuple (a, c, f). If invalid path_info a is set to None.
If c or f are omitted they are set to None.
"""
mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$',
path_info)
if mo:
return (mo.group('a'), mo.group('c'), mo.group('f'))
else:
return (None, None, None)
def die(msg):
print >> sys.stderr, msg
sys.exit(1)
def test(testpath, import_models=True, verbose=False):
"""
Run doctests in web2py environment. testpath is formatted like:
a tests all controllers in application a
a/c tests controller c in application a
a/c/f test function f in controller c, application a
Where a, c and f are application, controller and function names
respectively. If the testpath is a file name the file is tested.
If a controller is specified models are executed by default.
"""
import doctest
if os.path.isfile(testpath):
mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath)
if not mo:
die('test file is not in application directory: %s'
% testpath)
a = mo.group('a')
c = f = None
files = [testpath]
else:
(a, c, f) = parse_path_info(testpath)
errmsg = 'invalid test path: %s' % testpath
if not a:
die(errmsg)
cdir = os.path.join('applications', a, 'controllers')
if not os.path.isdir(cdir):
die(errmsg)
if c:
cfile = os.path.join(cdir, c + '.py')
if not os.path.isfile(cfile):
die(errmsg)
files = [cfile]
else:
files = glob.glob(os.path.join(cdir, '*.py'))
for testfile in files:
globs = env(a, import_models)
ignores = globs.keys()
execfile(testfile, globs)
def doctest_object(name, obj):
"""doctest obj and enclosed methods and classes."""
if type(obj) in (types.FunctionType, types.TypeType,
types.ClassType, types.MethodType,
types.UnboundMethodType):
# Reload environment before each test.
globs = env(a, c=c, f=f, import_models=import_models)
execfile(testfile, globs)
doctest.run_docstring_examples(obj, globs=globs,
name='%s: %s' % (os.path.basename(testfile),
name), verbose=verbose)
if type(obj) in (types.TypeType, types.ClassType):
for attr_name in dir(obj):
# Execute . operator so decorators are executed.
o = eval('%s.%s' % (name, attr_name), globs)
doctest_object(attr_name, o)
for (name, obj) in globs.items():
if name not in ignores and (f is None or f == name):
doctest_object(name, obj)
def get_usage():
usage = """
%prog [options] pythonfile
"""
return usage
def execute_from_command_line(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage=get_usage())
parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME',
help='run web2py in interactive shell or IPython(if installed) ' + \
'with specified appname')
msg = 'run web2py in interactive shell or bpython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += '\n Use combined with --shell'
parser.add_option(
'-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg,
)
parser.add_option(
'-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help='only use plain python shell, should be used with --shell option',
)
parser.add_option(
'-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help='auto import model files, default is False, ' + \
' should be used with --shell option',
)
parser.add_option(
'-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help='run PYTHON_FILE in web2py environment, ' + \
'should be used with --shell option',
)
(options, args) = parser.parse_args(argv[1:])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if len(args) > 0:
startfile = args[0]
else:
startfile = ''
run(options.shell, options.plain, startfile=startfile, bpython=options.bpython)
if __name__ == '__main__':
execute_from_command_line()
| {
"content_hash": "747a838e4f504a65db4a09388e3e2f7d",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 96,
"avg_line_length": 30.990453460620525,
"alnum_prop": 0.5609549480169427,
"repo_name": "SEA000/uw-empathica",
"id": "948d7e276b8275c5dff82e0878315968a0239a2a",
"size": "13032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empathica/gluon/shell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "127034"
},
{
"name": "JavaScript",
"bytes": "981904"
},
{
"name": "PHP",
"bytes": "15326"
},
{
"name": "Python",
"bytes": "3911190"
},
{
"name": "Shell",
"bytes": "31485"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '306_0122_auto_20161007_0255'),
]
operations = [
migrations.AlterModelOptions(
name='source',
options={},
),
migrations.RemoveField(
model_name='source',
name='active',
),
migrations.RemoveField(
model_name='source',
name='type',
),
migrations.RemoveField(
model_name='source',
name='type_code',
),
migrations.AddField(
model_name='source',
name='deprecated',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "f52d12cc1485ebc0da4e0ea9d3678c5c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 53,
"avg_line_length": 23.441176470588236,
"alnum_prop": 0.5194479297365119,
"repo_name": "lingdb/CoBL-public",
"id": "451639fbc8929d806c51a405fe7f821888f55ee1",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ielex/lexicon/migrations/306_0123_auto_20161013_1557.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "76222"
},
{
"name": "HTML",
"bytes": "558967"
},
{
"name": "JavaScript",
"bytes": "189642"
},
{
"name": "Python",
"bytes": "858438"
},
{
"name": "Shell",
"bytes": "1258"
},
{
"name": "TeX",
"bytes": "119143"
},
{
"name": "Vim script",
"bytes": "870"
}
],
"symlink_target": ""
} |
import blameDBQuery as query
import sqlite3
import argparse
import os
parser = argparse.ArgumentParser(description = "check for files that should maybe be excluded")
parser.add_argument('database', help="which db to check")
parser.add_argument('exclusions', help="file containing exclusions (same format at plot.py)")
parser.add_argument('-n', default=10, type=int, help='number of entries to print')
args = parser.parse_args()
if not os.path.exists(args.database):
print "database file does not exit!"
parser.print_help()
elif not os.path.exists(args.exclusions):
print "exclusions file does not exit!"
parser.print_help()
else:
exclusions = []
with open(args.exclusions, 'r') as infile:
for line in infile:
pattern = line.strip()
if pattern != '':
exclusions.append(pattern)
with sqlite3.connect(args.database) as conn:
cur = conn.cursor()
query.PrintLargeFiles(cur, exclusions, num = args.n)
query.PrintDiffSpikes(cur, exclusions, num = args.n)
| {
"content_hash": "e4586ca9b28f49bc61bb496e440bf44b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 95,
"avg_line_length": 32.9375,
"alnum_prop": 0.6840607210626186,
"repo_name": "bradneuman/BlameOverTime",
"id": "d8c78fbe0bbbecfc311b88c846efd70fe4bc0e76",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkExclusions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44730"
}
],
"symlink_target": ""
} |
"""REANA-Job-Controller errors."""
class ComputingBackendSubmissionError(Exception):
"""Operation to compute backend could not be performed."""
| {
"content_hash": "bd5ebf676be68c6257b7448892c80806",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 30,
"alnum_prop": 0.7533333333333333,
"repo_name": "tiborsimko/reana-job-controller",
"id": "e88673c857d919122e49ef5973a381e73d31f8cf",
"size": "388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reana_job_controller/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3563"
},
{
"name": "Python",
"bytes": "130603"
},
{
"name": "Shell",
"bytes": "4720"
}
],
"symlink_target": ""
} |
import os
from os import path as op
import shutil
import glob
import warnings
from nose.tools import assert_true, assert_raises
from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg,
mne_compute_proj_ecg, mne_compute_proj_eog,
mne_coreg, mne_kit2fiff,
mne_make_scalp_surfaces, mne_maxfilter,
mne_report, mne_surf2bem, mne_watershed_bem,
mne_compare_fiff, mne_flash_bem, mne_show_fiff,
mne_show_info)
from mne.utils import (run_tests_if_main, _TempDir, requires_mne, requires_PIL,
requires_mayavi, requires_tvtk, requires_freesurfer,
ArgvSetter, slow_test, ultra_slow_test)
from mne.io import Raw
from mne.datasets import testing, sample
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
subjects_dir = op.join(testing.data_path(download=False), 'subjects')
warnings.simplefilter('always')
def check_usage(module, force_help=False):
"""Helper to ensure we print usage"""
args = ('--help',) if force_help else ()
with ArgvSetter(args) as out:
try:
module.run()
except SystemExit:
pass
assert_true('Usage: ' in out.stdout.getvalue())
@slow_test
def test_browse_raw():
"""Test mne browse_raw"""
check_usage(mne_browse_raw)
def test_bti2fiff():
"""Test mne bti2fiff"""
check_usage(mne_bti2fiff)
def test_compare_fiff():
"""Test mne compare_fiff"""
check_usage(mne_compare_fiff)
def test_show_fiff():
"""Test mne compare_fiff"""
check_usage(mne_show_fiff)
with ArgvSetter((raw_fname,)):
mne_show_fiff.run()
@requires_mne
def test_clean_eog_ecg():
"""Test mne clean_eog_ecg"""
check_usage(mne_clean_eog_ecg)
tempdir = _TempDir()
raw = Raw([raw_fname, raw_fname, raw_fname])
raw.info['bads'] = ['MEG 2443']
use_fname = op.join(tempdir, op.basename(raw_fname))
raw.save(use_fname)
with ArgvSetter(('-i', use_fname, '--quiet')):
mne_clean_eog_ecg.run()
fnames = glob.glob(op.join(tempdir, '*proj.fif'))
assert_true(len(fnames) == 2) # two projs
fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
assert_true(len(fnames) == 3) # raw plus two projs
@slow_test
def test_compute_proj_ecg_eog():
"""Test mne compute_proj_ecg/eog"""
for fun in (mne_compute_proj_ecg, mne_compute_proj_eog):
check_usage(fun)
tempdir = _TempDir()
use_fname = op.join(tempdir, op.basename(raw_fname))
bad_fname = op.join(tempdir, 'bads.txt')
with open(bad_fname, 'w') as fid:
fid.write('MEG 2443\n')
shutil.copyfile(raw_fname, use_fname)
with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname,
'--rej-eeg', '150')):
fun.run()
fnames = glob.glob(op.join(tempdir, '*proj.fif'))
assert_true(len(fnames) == 1)
fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
assert_true(len(fnames) == 1)
def test_coreg():
"""Test mne coreg"""
assert_true(hasattr(mne_coreg, 'run'))
def test_kit2fiff():
"""Test mne kit2fiff"""
# Can't check
check_usage(mne_kit2fiff, force_help=True)
@requires_tvtk
@requires_mne
@testing.requires_testing_data
def test_make_scalp_surfaces():
"""Test mne make_scalp_surfaces"""
check_usage(mne_make_scalp_surfaces)
# Copy necessary files to avoid FreeSurfer call
tempdir = _TempDir()
surf_path = op.join(subjects_dir, 'sample', 'surf')
surf_path_new = op.join(tempdir, 'sample', 'surf')
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(surf_path_new)
os.mkdir(op.join(tempdir, 'sample', 'bem'))
shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new)
orig_fs = os.getenv('FREESURFER_HOME', None)
orig_mne = os.getenv('MNE_ROOT')
if orig_fs is not None:
del os.environ['FREESURFER_HOME']
cmd = ('-s', 'sample', '--subjects-dir', tempdir)
os.environ['_MNE_TESTING_SCALP'] = 'true'
try:
with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False):
assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
os.environ['FREESURFER_HOME'] = tempdir # don't need it
del os.environ['MNE_ROOT']
assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
os.environ['MNE_ROOT'] = orig_mne
mne_make_scalp_surfaces.run()
assert_raises(IOError, mne_make_scalp_surfaces.run) # no overwrite
finally:
if orig_fs is not None:
os.environ['FREESURFER_HOME'] = orig_fs
os.environ['MNE_ROOT'] = orig_mne
del os.environ['_MNE_TESTING_SCALP']
def test_maxfilter():
"""Test mne maxfilter"""
check_usage(mne_maxfilter)
with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60',
'--trans', raw_fname)) as out:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
os.environ['_MNE_MAXFILTER_TEST'] = 'true'
try:
mne_maxfilter.run()
finally:
del os.environ['_MNE_MAXFILTER_TEST']
assert_true(len(w) == 1)
for check in ('maxfilter', '-trans', '-movecomp'):
assert_true(check in out.stdout.getvalue(), check)
@slow_test
@requires_mayavi
@requires_PIL
@testing.requires_testing_data
def test_report():
"""Test mne report"""
check_usage(mne_report)
tempdir = _TempDir()
use_fname = op.join(tempdir, op.basename(raw_fname))
shutil.copyfile(raw_fname, use_fname)
with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir,
'-s', 'sample', '--no-browser', '-m', '30')):
mne_report.run()
fnames = glob.glob(op.join(tempdir, '*.html'))
assert_true(len(fnames) == 1)
def test_surf2bem():
"""Test mne surf2bem"""
check_usage(mne_surf2bem)
@ultra_slow_test
@requires_freesurfer
@testing.requires_testing_data
def test_watershed_bem():
"""Test mne watershed bem"""
check_usage(mne_watershed_bem)
# Copy necessary files to tempdir
tempdir = _TempDir()
mridata_path = op.join(subjects_dir, 'sample', 'mri')
mridata_path_new = op.join(tempdir, 'sample', 'mri')
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(mridata_path_new)
if op.exists(op.join(mridata_path, 'T1')):
shutil.copytree(op.join(mridata_path, 'T1'), op.join(mridata_path_new,
'T1'))
if op.exists(op.join(mridata_path, 'T1.mgz')):
shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
op.join(mridata_path_new, 'T1.mgz'))
with ArgvSetter(('-d', tempdir, '-s', 'sample', '-o'),
disable_stdout=False, disable_stderr=False):
mne_watershed_bem.run()
@ultra_slow_test
@requires_mne
@requires_freesurfer
@sample.requires_sample_data
def test_flash_bem():
"""Test mne flash_bem"""
check_usage(mne_flash_bem, force_help=True)
# Using the sample dataset
subjects_dir = op.join(sample.data_path(download=False), 'subjects')
# Copy necessary files to tempdir
tempdir = _TempDir()
mridata_path = op.join(subjects_dir, 'sample', 'mri')
mridata_path_new = op.join(tempdir, 'sample', 'mri')
os.makedirs(op.join(mridata_path_new, 'flash'))
os.makedirs(op.join(tempdir, 'sample', 'bem'))
shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
op.join(mridata_path_new, 'T1.mgz'))
shutil.copyfile(op.join(mridata_path, 'brain.mgz'),
op.join(mridata_path_new, 'brain.mgz'))
# Copy the available mri/flash/mef*.mgz files from the dataset
files = glob.glob(op.join(mridata_path, 'flash', 'mef*.mgz'))
for infile in files:
shutil.copyfile(infile, op.join(mridata_path_new, 'flash',
op.basename(infile)))
# Test mne flash_bem with --noconvert option
# (since there are no DICOM Flash images in dataset)
currdir = os.getcwd()
with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'),
disable_stdout=False, disable_stderr=False):
mne_flash_bem.run()
os.chdir(currdir)
def test_show_info():
"""Test mne show_info"""
check_usage(mne_show_info)
with ArgvSetter((raw_fname,)):
mne_show_info.run()
run_tests_if_main()
| {
"content_hash": "9a90978f5d70c6f2843368f72e1c5fb4",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 33.98809523809524,
"alnum_prop": 0.6009340338587273,
"repo_name": "wronk/mne-python",
"id": "55de6ca2ebf407c5e15860ab22df728bfdbcd4e3",
"size": "8589",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/commands/tests/test_commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5079143"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
__author__ = 'Josh Allen Bosley'
import SocketServer
import sql_settings
import request_handler
# Query information
recognized_queries = ['u', 'i', 's', 'g', 'a', 'ci'] # update, insert, select, generate
recent_queries = []
max_stored_queries = 25
class TCPConnectionHandler(SocketServer.BaseRequestHandler):
def handle(self):
reply = None
# Receive data from client
self.data = self.request.recv(sql_settings.BLOCK).split("^")
if self.data[0] in recognized_queries:
reply = request_handler.request_handler(self.data)
if self.data[0] not in recognized_queries:
reply = "?"
sql_settings.output_update_to_screen("UNRECOGNIZED QUERY !")
sql_settings.output_update_to_screen(self.data)
if reply is not None:
self.request.send(reply)
self.request.close()
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def initiate_server_instance():
the_server = Server((sql_settings.HOST, sql_settings.PORT), TCPConnectionHandler)
the_server.serve_forever()
return | {
"content_hash": "fba2ef009b99d0e48d827c965e5ae20e",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 24.642857142857142,
"alnum_prop": 0.6376811594202898,
"repo_name": "LSSUHDTeam/ReservationServer",
"id": "a9ef375fe73c098f3ff6509a9e70067d66aeb86c",
"size": "1380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33637"
}
],
"symlink_target": ""
} |
""":mod:`wand` --- Simple `MagickWand API`_ binding for Python
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _MagickWand API: http://www.imagemagick.org/script/magick-wand.php
"""
| {
"content_hash": "2d81f3e07d40f3822ade419c3d44fa68",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 29,
"alnum_prop": 0.46798029556650245,
"repo_name": "tommo/gii",
"id": "c7992243745d06ab6cb5fbf98cc38ab12be1939e",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/mock/asset/tools/wand/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
} |
import sys
import joblib
import pytest
from joblib.testing import check_subprocess_call
def test_version():
assert hasattr(joblib, '__version__'), (
"There are no __version__ argument on the joblib module")
def test_no_start_method_side_effect_on_import():
# check that importing joblib does not implicitly set the global
# start_method for multiprocessing.
code = """if True:
import joblib
import multiprocessing as mp
# The following line would raise RuntimeError if the
# start_method is already set.
mp.set_start_method("loky")
"""
check_subprocess_call([sys.executable, '-c', code])
def test_no_semaphore_tracker_on_import():
# check that importing joblib does not implicitly spawn a resource tracker
# or a semaphore tracker
code = """if True:
import joblib
from multiprocessing import semaphore_tracker
# The following line would raise RuntimeError if the
# start_method is already set.
msg = "multiprocessing.semaphore_tracker has been spawned on import"
assert semaphore_tracker._semaphore_tracker._fd is None, msg"""
if sys.version_info >= (3, 8):
# semaphore_tracker was renamed in Python 3.8:
code = code.replace("semaphore_tracker", "resource_tracker")
check_subprocess_call([sys.executable, '-c', code])
def test_no_resource_tracker_on_import():
code = """if True:
import joblib
from joblib.externals.loky.backend import resource_tracker
# The following line would raise RuntimeError if the
# start_method is already set.
msg = "loky.resource_tracker has been spawned on import"
assert resource_tracker._resource_tracker._fd is None, msg
"""
check_subprocess_call([sys.executable, '-c', code])
| {
"content_hash": "367f1e10edeb1404bf910130af8933fb",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 36.64,
"alnum_prop": 0.6719432314410481,
"repo_name": "ryfeus/lambda-packs",
"id": "9c3b12b909f62ffb2b94e85374c6dadabc69c21e",
"size": "1832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/joblib/test/test_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from FPPbrowser import app
from flask import render_template, request, jsonify, flash
from matplotlib import pyplot as plt
from bokeh import plotting as bkplt
from bokeh.embed import file_html, components
from bokeh.resources import CDN
import numpy as np
import os, os.path
# import vespa
import json
import mpld3
# starpop = vespa.MultipleStarPopulation(1)
data_options = []
# for each_key in starpop.stars.keys():
# if np.isfinite(getattr(starpop.stars,each_key)).any():
# data_options.append(each_key)
# Create empty pyplot figure
fig = bkplt.figure()
KOI_files = []
curdir = os.path.dirname(__file__)
KOI_filepath = os.path.join(curdir,'static','fpp')
for each_file in os.listdir(KOI_filepath):
if each_file[0] != '.':
KOI_files.append(each_file)
@app.route('/')
def index():
# jsonfig = json.dumps(mpld3.fig_to_dict(fig))
# return render_template('index.html', data_options=data_options, jsonfig=jsonfig)
script, div = components(fig, CDN)
return render_template('index.html', data_options=data_options, script=script, div=div, KOI_files=KOI_files)
@app.route('/loadKOIData', methods=['POST'])
def load_KOIdata():
KOI_filename = request.form['KOIinput']
return render_template('index.html', KOI_filename=KOI_filename, KOI_files=KOI_files)
@app.route('/_plot_data', methods=['POST'])
def plot_data():
data = request.form['KOIinput']
return render_template('index.html', data=data, KOI_files=KOI_files)
# all_picks = request.args.getlist('checkbox_data')
# x_pick = 'H_mag'#str(all_picks[0])
# y_pick = 'H_mag'#str(all_picks[1])
# x = starpop[x_pick]
# y = starpop[y_pick]
# fig.xaxis.axis_label = x_pick
# fig.yaxis.axis_label = y_pick
# # Plot data and convert to JSON
# script, div = components(fig, CDN)
# # jsonfig = jsonify(mpld3.fig_to_dict(fig))
# # return render_template('index.html', data_options=data_options, jsonfig=jsonfig)
# return div
| {
"content_hash": "e759e8da338385aeb6be3e16182a40b8",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 109,
"avg_line_length": 30.07936507936508,
"alnum_prop": 0.712401055408971,
"repo_name": "timothydmorton/vespa-visualization",
"id": "3aeac806c6b1a38163cd07e6961910ddcf7fd3ab",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FPPbrowser/FPPbrowser/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "1229054"
},
{
"name": "JavaScript",
"bytes": "4849"
},
{
"name": "Python",
"bytes": "3779"
}
],
"symlink_target": ""
} |
import functools
import inspect
import logging
from logging import handlers
import os
import sys
import time
from oslo.config import cfg
from sahara.openstack.common import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def _get_log_file_path(logfile):
logdir = CONF.log_dir
if not logdir:
logdir = os.path.curdir
return os.path.join(logdir, logfile)
fh = handlers.WatchedFileHandler(_get_log_file_path('timing.log'))
fh.setLevel(logging.DEBUG)
LOG.logger.addHandler(fh)
def timed(f):
@functools.wraps(f)
def wrapper(*args, **kwds):
indent_level = len(inspect.stack()) - 1
start = time.time()
try:
result = f(*args, **kwds)
except Exception:
LOG.info('Exception raised by invocation of {0}: {1}'
.format(f.__name__, sys.exc_info()[0]))
raise
finally:
elapsed = time.time() - start
LOG.info('-' * indent_level + '{0}({1}), {2} seconds'.format(
f.__name__, args[0].__class__.__name__, elapsed))
return result
return wrapper
| {
"content_hash": "3fea83d9330a7d6040d7cd3369c2d5fd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 23.78723404255319,
"alnum_prop": 0.59391771019678,
"repo_name": "tellesnobrega/storm_plugin",
"id": "0f029c052a1a6184bfbf806c0e9a6375466b7786",
"size": "1705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/utils/timing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Python",
"bytes": "1742534"
},
{
"name": "Shell",
"bytes": "15761"
}
],
"symlink_target": ""
} |
import visvis as vv
import OpenGL.GL as gl
import numpy as np
def getframe(ob):
""" getframe(object)
Get a snapshot of the current figure or axes or axesContainer.
It is retured as a numpy array (color image).
Also see vv.screenshot().
"""
# Get figure
fig = ob.GetFigure()
if not fig:
raise ValueError('Object is not present in any alive figures.')
# Select the figure
fig._SetCurrent() # works on all backends
# we read the pixels as shown on screen.
gl.glReadBuffer(gl.GL_FRONT)
# establish rectangle to sample
if isinstance(ob, vv.BaseFigure):
x,y,w,h = 0, 0, ob.position.w, ob.position.h
elif isinstance(ob, vv.AxesContainer):
x,y = ob.position.absTopLeft
w,h = ob.position.size
y = fig.position.h - (y+h)
elif isinstance(ob, vv.Axes):
x,y = ob.position.absTopLeft
w,h = ob.position.size
y = fig.position.h - (y+h)
x+=1; y+=1; w-=1; h-=1; # first pixel is the bounding box
else:
raise ValueError("The given object is not a figure nor an axes.")
# read
# use floats to prevent strides etc. uint8 caused crash on qt backend.
im = gl.glReadPixels(x, y, w, h, gl.GL_RGB, gl.GL_FLOAT)
# reshape, flip, and store
im.shape = h,w,3
im = np.flipud(im)
# done
return im
if __name__ == '__main__':
import time
# Prepare
f = vv.figure()
a1 = vv.subplot(211)
a2 = vv.subplot(212)
# Draw some data
vv.plot([2,3,4,2,4,3], axes=a1)
f.DrawNow()
# Take snapshots
im1 = vv.getframe(f)
im2 = vv.getframe(a1)
# clear and show snapshots
a1.Clear()
a2.Clear()
vv.imshow(im1,axes=a1, clim=(0,1))
vv.imshow(im2,axes=a2, clim=(0,1))
| {
"content_hash": "de0511902c8d516670daa1cdb377d44b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 24.958904109589042,
"alnum_prop": 0.5817782656421515,
"repo_name": "pbfy0/visvis",
"id": "93e95904509a04370751594f40ed82f935961b07",
"size": "1999",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "functions/getframe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "158972"
},
{
"name": "C++",
"bytes": "44817"
},
{
"name": "Python",
"bytes": "1475236"
}
],
"symlink_target": ""
} |
"""
Script that evaluates the `SunMoon` class.
For one location, this script computes solar event times for all of
the nights of one year by three methods:
1. with one call to get_solar_events_in_interval
2. with one call per day to get_solar_events
3. with one call per event to get_solar_event_time
For each method, the script reports how long it takes to get the
event times. It also checks that the three methods yield the same
events.
"""
from collections import namedtuple
import cProfile
import datetime
import itertools
import time
from vesper.ephem.sun_moon import SunMoonCache, Event
from vesper.util.date_range import DateRange
PROFILING_ENABLED = False
Location = namedtuple(
'Location', ('latitude', 'longitude', 'time_zone', 'name'))
ITHACA = Location(42.4440, -76.5019, 'US/Eastern', 'Ithaca')
MISSOULA = Location(46.8721, -113.9940, 'US/Mountain', 'Missoula')
YEAR = 2020
SOLAR_EVENT_NAMES = (
'Solar Midnight',
'Astronomical Dawn',
'Nautical Dawn',
'Civil Dawn',
'Sunrise',
'Solar Noon',
'Sunset',
'Civil Dusk',
'Nautical Dusk',
'Astronomical Dusk',
)
TIME_EQUALITY_THRESHOLD = .001
def main():
cache = SunMoonCache()
print('Getting events for two locations...')
print()
get_events_for_two_locations(cache)
# We include this to test event caching. Getting solar events
# should be fast here, since we computed all of them above.
print('Getting events for two locations again...')
print()
get_events_for_two_locations(cache)
def get_events_for_two_locations(cache):
for location in (ITHACA, MISSOULA):
sun_moon = cache.get_sun_moon(
location.latitude, location.longitude, location.time_zone)
get_events_for_location(location.name, sun_moon)
def get_events_for_location(location_name, sun_moon):
for day in (True, False):
get_events_for_location_aux(location_name, sun_moon, day, False)
# We include this to test event caching. Getting solar events
# should be fast here, since we computed all of them above.
get_events_for_location_aux(location_name, sun_moon, day, True)
def get_events_for_location_aux(location_name, sun_moon, day, again):
day_or_night = 'day' if day else 'night'
again = ' again' if again else ''
print(f'Getting {day_or_night} events for {location_name}{again}...')
by_year_events, by_year_time = \
get_events(get_events_by_year, sun_moon, day, 'year')
by_date_events, by_date_time = \
get_events(get_events_by_date, sun_moon, day, day_or_night)
by_name_events, by_name_time = \
get_events(get_events_by_name, sun_moon, day, 'name')
show_elapsed_time('year', by_year_time)
show_elapsed_time(day_or_night, by_date_time)
show_elapsed_time('name', by_name_time)
compare_events(day_or_night, by_date_events, 'year', by_year_events)
compare_events('name', by_name_events, 'year', by_year_events)
print()
def get_events(function, sun_moon, day, method):
print(f'Getting events by {method}...')
start_time = time.time()
if PROFILING_ENABLED:
runner = Runner(function, sun_moon, day)
cProfile.runctx('runner.run()', globals(), locals())
events = runner.result
else:
events = function(sun_moon, day)
elapsed_time = time.time() - start_time
return events, elapsed_time
def get_events_by_year(sun_moon, day):
time_zone = sun_moon.time_zone
hour = 0 if day else 12
start_dt = time_zone.localize(datetime.datetime(YEAR, 1, 1, hour))
end_dt = time_zone.localize(datetime.datetime(YEAR + 1, 1, 1, hour))
return sun_moon.get_solar_events_in_interval(start_dt, end_dt)
def get_events_by_date(sun_moon, day):
start_date = datetime.date(YEAR, 1, 1)
end_date = datetime.date(YEAR + 1, 1, 1)
event_lists = [
sun_moon.get_solar_events(date, day=day)
for date in DateRange(start_date, end_date)]
return list(itertools.chain.from_iterable(event_lists))
def get_events_by_name(sun_moon, day):
start_date = datetime.date(YEAR, 1, 1)
end_date = datetime.date(YEAR + 1, 1, 1)
event_lists = [
get_solar_events_by_name(sun_moon, date, day)
for date in DateRange(start_date, end_date)]
return list(itertools.chain.from_iterable(event_lists))
def get_solar_events_by_name(sun_moon, date, day):
events = [
get_solar_event(sun_moon, date, name, day)
for name in SOLAR_EVENT_NAMES]
return sorted(e for e in events if e is not None)
def get_solar_event(sun_moon, date, event_name, day):
time = sun_moon.get_solar_event_time(date, event_name, day=day)
if time is None:
return None
else:
return Event(time, event_name)
def show_elapsed_time(method, elapsed_time):
print(f'Getting events by {method} took {elapsed_time:.1f} seconds.')
def compare_events(method_a, events_a, method_b, events_b):
if len(events_a) != len(events_b):
print(
f'Got {len(events_a)} events by {method_a} but '
f'{len(events_b)} by {method_b}.')
return
else:
for i in range(len(events_a)):
a = events_a[i]
b = events_b[i]
# print(f'{a.name}: {a.time}, {b.name}: {b.time}')
if a.name != b.name or times_differ(a.time, b.time):
print(
f'Events by {method_a} differ from events by {method_b}.')
return
# If we get here, the two sets of events were the same.
print(f'Got same events by {method_a} and {method_b}.')
def times_differ(a, b):
return abs((a - b).total_seconds()) > TIME_EQUALITY_THRESHOLD
class Runner:
def __init__(self, function, *args):
self.function = function
self.args = args
def run(self):
self.result = self.function(*self.args)
if __name__ == '__main__':
main()
| {
"content_hash": "c648ed7ee98c79e6e11a04fc23523aab",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 78,
"avg_line_length": 28.322727272727274,
"alnum_prop": 0.6127427379232868,
"repo_name": "HaroldMills/Vesper",
"id": "f7ccebc76b73ec797ad45545c54b0fb73f054e01",
"size": "6231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vesper/ephem/tests/scripts/evaluate_sun_moon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "92"
},
{
"name": "CSS",
"bytes": "9101"
},
{
"name": "Dockerfile",
"bytes": "1678"
},
{
"name": "HTML",
"bytes": "70614"
},
{
"name": "JavaScript",
"bytes": "410277"
},
{
"name": "Python",
"bytes": "2697554"
},
{
"name": "Shell",
"bytes": "2772"
},
{
"name": "TypeScript",
"bytes": "30001"
}
],
"symlink_target": ""
} |
from abc import ABC
from typing import TYPE_CHECKING
from azure.core.pipeline.transport import HttpRequest
from ._configuration import AzureReservationAPIConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core import AsyncPipelineClient
from .._serialization import Deserializer, Serializer
class MixinABC(ABC):
"""DO NOT use this class. It is for internal typing use only."""
_client: "AsyncPipelineClient"
_config: AzureReservationAPIConfiguration
_serialize: "Serializer"
_deserialize: "Deserializer"
| {
"content_hash": "4333dff587857f00f9fb518348c125e0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.7692307692307693,
"repo_name": "Azure/azure-sdk-for-python",
"id": "985979c1d804ce6c6bc8dc0c616269b7bb9be5df",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/aio/_vendor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Unit tests."""
import mock
import pytest
from google.cloud import trace_v1
from google.cloud.trace_v1.proto import trace_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestTraceServiceClient(object):
def test_list_traces(self):
# Setup Expected Response
next_page_token = ""
traces_element = {}
traces = [traces_element]
expected_response = {"next_page_token": next_page_token, "traces": traces}
expected_response = trace_pb2.ListTracesResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup Request
project_id = "projectId-1969970175"
paged_list_response = client.list_traces(project_id)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.traces[0] == resources[0]
assert len(channel.requests) == 1
expected_request = trace_pb2.ListTracesRequest(project_id=project_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_traces_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup request
project_id = "projectId-1969970175"
paged_list_response = client.list_traces(project_id)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_trace(self):
# Setup Expected Response
project_id_2 = "projectId2939242356"
trace_id_2 = "traceId2987826376"
expected_response = {"project_id": project_id_2, "trace_id": trace_id_2}
expected_response = trace_pb2.Trace(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup Request
project_id = "projectId-1969970175"
trace_id = "traceId1270300245"
response = client.get_trace(project_id, trace_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = trace_pb2.GetTraceRequest(
project_id=project_id, trace_id=trace_id
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_trace_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup request
project_id = "projectId-1969970175"
trace_id = "traceId1270300245"
with pytest.raises(CustomException):
client.get_trace(project_id, trace_id)
def test_patch_traces(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup Request
project_id = "projectId-1969970175"
traces = {}
client.patch_traces(project_id, traces)
assert len(channel.requests) == 1
expected_request = trace_pb2.PatchTracesRequest(
project_id=project_id, traces=traces
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_patch_traces_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = trace_v1.TraceServiceClient()
# Setup request
project_id = "projectId-1969970175"
traces = {}
with pytest.raises(CustomException):
client.patch_traces(project_id, traces)
| {
"content_hash": "35121d5d557f735ed71e15b89d43e2cf",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 87,
"avg_line_length": 33.868263473053894,
"alnum_prop": 0.6412659123055162,
"repo_name": "tswast/google-cloud-python",
"id": "a2601fb1848d5ee409750bd1d0ff47804282a9b2",
"size": "6258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trace/tests/unit/gapic/v1/test_trace_service_client_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import os
import sys
from django.core.management.base import AppCommand
from django_extensions.management.utils import _make_writeable
from optparse import make_option
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--name', '-n', action='store', dest='tag_library_name', default='appname_tags',
help='The name to use for the template tag base name. Defaults to `appname`_tags.'),
make_option('--base', '-b', action='store', dest='base_command', default='Base',
help='The base class used for implementation of this command. Should be one of Base, App, Label, or NoArgs'),
)
help = ("Creates a Django template tags directory structure for the given app name"
" in the apps's directory")
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
def handle_app(self, app, **options):
app_dir = os.path.dirname(app.__file__)
tag_library_name = options.get('tag_library_name')
if tag_library_name == 'appname_tags':
tag_library_name = '%s_tags' % os.path.basename(app_dir)
copy_template('template_tags_template', app_dir, tag_library_name)
def copy_template(template_name, copy_to, tag_library_name):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', tag_library_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read())
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| {
"content_hash": "ecb36fd2e694fc640d94fc54934d13a0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 156,
"avg_line_length": 44.161764705882355,
"alnum_prop": 0.6087246087246088,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "d8f33c6cc05971ce310f72837968915ae64d3d0b",
"size": "3003",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Lib/site-packages/django_extensions/management/commands/create_template_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
} |
"""This code example deactivates all active placements.
To determine which placements exist, run get_all_placements.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
PLACEMENT_ID = 'INSERT_PLACEMENT_ID_HERE'
def main(client, placement_id):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201805')
# Create query.
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('id = :placementId')
.WithBindVariable('placementId', long(placement_id))
.Limit(1))
# Get placements by statement.
placements = placement_service.getPlacementsByStatement(
statement.ToStatement())
for placement in placements:
print ('Placement with id "%s", name "%s", and status "%s" will be '
'deactivated.' % (placement['id'], placement['name'],
placement['status']))
# Perform action.
result = placement_service.performPlacementAction(
{'xsi_type': 'DeactivatePlacements'}, statement.ToStatement())
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of placements deactivated: %s' % result['numChanges']
else:
print 'No placements were deactivated.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PLACEMENT_ID)
| {
"content_hash": "c6df26cc34d7f20c892f8ebff07c2454",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 31.97826086956522,
"alnum_prop": 0.681169272603671,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "7b031b549854c8a94b7a644d628f093ecda7355a",
"size": "2093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201805/placement_service/deactivate_placements.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2012 Diego Torres Milano
Created on Aug 31, 2013
@author: diego
'''
import sys
import os
from com.dtmilano.android.viewclient import ViewClient
if len(sys.argv) < 2:
sys.exit("usage: %s filename.png [serialno]" % sys.argv[0])
filename = sys.argv.pop(1)
device, serialno = ViewClient.connectToDeviceOrExit(verbose=False)
device.takeSnapshot().save(filename, 'PNG')
| {
"content_hash": "7f47b14684564a1a1ad62c85633a8662",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 20.68421052631579,
"alnum_prop": 0.7302798982188295,
"repo_name": "jyx140521/AndroidViewClient",
"id": "5656379c380c66fd71f349c8dda78742d9507742",
"size": "416",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/screenshot.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "9419"
},
{
"name": "Python",
"bytes": "977248"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myblog', '0006_auto_20150423_1231'),
]
operations = [
migrations.CreateModel(
name='Env',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('create_time', models.DateTimeField(auto_now=True)),
('modify_time', models.DateTimeField(auto_now_add=True)),
('content', models.CharField(unique=True, max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Knowledge',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('create_time', models.DateTimeField(auto_now=True)),
('modify_time', models.DateTimeField(auto_now_add=True)),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
('env', models.ManyToManyField(to='myblog.Env')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='classification',
name='c_name',
field=models.CharField(unique=True, max_length=30),
preserve_default=True,
),
migrations.AlterField(
model_name='tag',
name='tag_name',
field=models.CharField(unique=True, max_length=20),
preserve_default=True,
),
]
| {
"content_hash": "49ec6b3effc678442372491161e08728",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 114,
"avg_line_length": 34.528301886792455,
"alnum_prop": 0.5207650273224044,
"repo_name": "madarou/angular-django",
"id": "d17c3379c56107003df754e4b8fca44421fe5b21",
"size": "1854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/myblog/migrations/0007_auto_20150423_1527.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "356826"
},
{
"name": "HTML",
"bytes": "77340"
},
{
"name": "Python",
"bytes": "1816255"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns,url,include
from django.conf import settings
from django.views.generic.base import RedirectView,TemplateView
from .leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
url(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
url(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
url(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
url(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
# static
url(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
url(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
url(r'^.*/home/$', RedirectView.as_view(url='/leave/')),
# home page
url(r'^leave/$', TemplateView.as_view(template_name='leave.html')),
# starting application
url(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
url(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
url(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
url(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
url(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
url(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
url(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
url(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
url(r'^leave/admin/apptools/', include('goflow.apptools.urls_admin')),
url(r'^leave/admin/workflow/', include('goflow.apptools.urls_admin')),
url(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
url(r'^leave/admin/(.*)', admin.site.urls),
# Goflow pages
url(r'^leave/', include('goflow.urls')),
url(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
| {
"content_hash": "ce9816ea18a353756a8c98a95a01d3ab",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 142,
"avg_line_length": 53.30508474576271,
"alnum_prop": 0.5761526232114468,
"repo_name": "mikewolfli/django-goflow",
"id": "b9f8d18016f4a9d4e3e5159a035a23bb896a281a",
"size": "3145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leavedemo/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43781"
},
{
"name": "JavaScript",
"bytes": "154850"
},
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "158420"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
} |
""":synopsis: Common functions for text encodings.
:module: mom.codec.text
Text encoding
-------------
::
"There is no such thing as plain text."
- Plain Text.
UTF-8 is one of the many ways in which Unicode strings can be represented as
a *sequence of bytes*, and because UTF-8 is more portable between diverse
systems, you must ensure to convert your Unicode strings to UTF-8 encoded
bytes before they leave your system and ensure to decode UTF-8 encoded bytes
back into Unicode strings before you start working with them in your
code--that is, if you know those bytes are UTF-8 encoded.
Terminology
~~~~~~~~~~~
* The process of **encoding** is that of converting a Unicode string into a
sequence of bytes. The **method** using which this conversion is done is
*also* called an **encoding**::
Unicode string -> Encoded bytes
---------------------------------
"深入 Python" -> b"\\xe6\\xb7\\xb1\\xe5\\x85\\xa5 Python"
The **encoding** (method) used to *encode* in this example is UTF-8.
* The process of **decoding** is that of converting a sequence of bytes into a
Unicode string::
Encoded bytes -> Unicode string
----------------------------------------------------
b"\\xe6\\xb7\\xb1\\xe5\\x85\\xa5 Python" -> "深入 Python"
The **encoding** (method) used to *decode* in this example is UTF-8.
A very crude explanation of when to use what
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Essentially, inside your own system, work with::
"深入 Python"
and not::
b"\\xe6\\xb7\\xb1\\xe5\\x85\\xa5 Python"
but when sending things out to other systems that may not see `"深入 Python"`
the way Python does, you encode it into UTF-8 bytes::
b"\\xe6\\xb7\\xb1\\xe5\\x85\\xa5 Python"
**and tell** those systems that you're using UTF-8 to encode your Unicode
strings so that those systems can decode the bytes you sent appropriately.
When receiving text from other systems, ask for their encodings.
Decode the text using the appropriate encoding method as soon as you receive
it and then operate on the resulting Unicode text.
Read these before you begin to use these functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. http://www.joelonsoftware.com/articles/Unicode.html
2. http://diveintopython3.org/strings.html
3. http://docs.python.org/howto/unicode.html
4. http://docs.python.org/library/codecs.html
.. autofunction:: utf8_encode
.. autofunction:: utf8_decode
.. autofunction:: utf8_encode_if_unicode
.. autofunction:: utf8_decode_if_bytes
.. autofunction:: utf8_encode_recursive
.. autofunction:: utf8_decode_recursive
.. autofunction:: bytes_to_unicode
.. autofunction:: bytes_to_unicode_recursive
.. autofunction:: to_unicode_if_bytes
.. autofunction:: ascii_encode
.. autofunction:: latin1_encode
"""
from __future__ import absolute_import
from mom import builtins
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
__all__ = [
"ascii_encode",
"bytes_to_unicode",
"bytes_to_unicode_recursive",
"latin1_encode",
"to_unicode_if_bytes",
"utf8_decode",
"utf8_decode_if_bytes",
"utf8_decode_recursive",
"utf8_encode",
"utf8_encode_if_unicode",
"utf8_encode_recursive",
]
def utf8_encode(unicode_text):
"""
UTF-8 encodes a Unicode string into bytes; bytes and None are left alone.
Work with Unicode strings in your code and encode your Unicode strings into
UTF-8 before they leave your system.
:param unicode_text:
If already a byte string or None, it is returned unchanged.
Otherwise it must be a Unicode string and is encoded as UTF-8 bytes.
:returns:
UTF-8 encoded bytes.
"""
if unicode_text is None or builtins.is_bytes(unicode_text):
return unicode_text
if not builtins.is_unicode(unicode_text):
raise TypeError("unsupported argument type: %r" %
type(unicode_text).__name__)
return unicode_text.encode("utf-8")
def utf8_decode(utf8_encoded_bytes):
"""
Decodes bytes into a Unicode string using the UTF-8 encoding.
Decode your UTF-8 encoded bytes into Unicode strings as soon as
they arrive into your system. Work with Unicode strings in your code.
:param utf8_encoded_bytes:
UTF-8 encoded bytes.
:returns:
Unicode string.
"""
return bytes_to_unicode(utf8_encoded_bytes)
def utf8_encode_if_unicode(obj):
"""
UTF-8 encodes the object only if it is a Unicode string.
:param obj:
The value that will be UTF-8 encoded if it is a Unicode string.
:returns:
UTF-8 encoded bytes if the argument is a Unicode string; otherwise
the value is returned unchanged.
"""
return utf8_encode(obj) if builtins.is_unicode(obj) else obj
def utf8_decode_if_bytes(obj):
"""
Decodes UTF-8 encoded bytes into a Unicode string.
:param obj:
Python object. If this is a bytes instance, it will be decoded
into a Unicode string; otherwise, it will be left alone.
:returns:
Unicode string if the argument is a bytes instance;
the unchanged object otherwise.
"""
return to_unicode_if_bytes(obj)
def to_unicode_if_bytes(obj, encoding="utf-8"):
"""
Decodes encoded bytes into a Unicode string.
:param obj:
The value that will be converted to a Unicode string.
:param encoding:
The encoding used to decode bytes. Defaults to UTF-8.
:returns:
Unicode string if the argument is a byte string. Otherwise the value
is returned unchanged.
"""
return bytes_to_unicode(obj, encoding) if builtins.is_bytes(obj) else obj
def bytes_to_unicode(raw_bytes, encoding="utf-8"):
"""
Converts bytes to a Unicode string decoding it according to the encoding
specified.
:param raw_bytes:
If already a Unicode string or None, it is returned unchanged.
Otherwise it must be a byte string.
:param encoding:
The encoding used to decode bytes. Defaults to UTF-8
"""
if raw_bytes is None or builtins.is_unicode(raw_bytes):
return raw_bytes
if not builtins.is_bytes(raw_bytes):
raise TypeError("unsupported argument type: %r" % type(raw_bytes).__name__)
return raw_bytes.decode(encoding)
def utf8_encode_recursive(obj):
"""
Walks a simple data structure, converting Unicode strings to UTF-8 encoded
byte strings.
Supports lists, tuples, and dictionaries.
:param obj:
The Python data structure to walk recursively looking for
Unicode strings.
:returns:
obj with all the Unicode strings converted to byte strings.
"""
if isinstance(obj, dict):
return dict((utf8_encode_recursive(k),
utf8_encode_recursive(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(utf8_encode_recursive(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(utf8_encode_recursive(i) for i in obj)
elif builtins.is_unicode(obj):
return utf8_encode(obj)
else:
return obj
def bytes_to_unicode_recursive(obj, encoding="utf-8"):
"""
Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
:param obj:
The Python data structure to walk recursively looking for
byte strings.
:param encoding:
The encoding to use when decoding the byte string into Unicode.
Default UTF-8.
:returns:
obj with all the byte strings converted to Unicode strings.
"""
if isinstance(obj, dict):
return dict((bytes_to_unicode_recursive(k),
bytes_to_unicode_recursive(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(bytes_to_unicode_recursive(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(bytes_to_unicode_recursive(i) for i in obj)
elif builtins.is_bytes(obj):
return bytes_to_unicode(obj, encoding=encoding)
else:
return obj
def utf8_decode_recursive(obj):
"""
Walks a simple data structure, converting bytes to Unicode strings.
Supports lists, tuples, and dictionaries.
:param obj:
The Python data structure to walk recursively looking for
byte strings.
:returns:
obj with all the byte strings converted to Unicode strings.
"""
return bytes_to_unicode_recursive(obj)
def ascii_encode(obj):
"""
Encodes a string using ASCII encoding.
:param obj:
String to encode.
:returns:
ASCII-encoded bytes.
"""
return obj.encode("ascii")
def latin1_encode(obj):
"""
Encodes a string using LATIN-1 encoding.
:param obj:
String to encode.
:returns:
LATIN-1 encoded bytes.
"""
return obj.encode("latin1")
| {
"content_hash": "cbaf3a037a634d568c0a55d43aaa51eb",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 79,
"avg_line_length": 29.257627118644066,
"alnum_prop": 0.6765148881937203,
"repo_name": "gorakhargosh/mom",
"id": "45dc78e987ee2bcc474ecbcd0efd8e9745a91ccc",
"size": "9379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mom/codec/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "626298"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
import time
from cupboard import Cupboard
from redis import ConnectionError
from test_env import INVARIANT_ENVS, INVARIANT_KEYS, INVARIANT_VALUES, filename
def test_expiry():
d = INVARIANT_ENVS[0](Cupboard)
d.rmkeys()
with d.pass_arguments(ex=1):
d['a'] = 'b'
assert d['a'] == 'b'
time.sleep(2)
with pytest.raises(KeyError):
a = d['a']
d.rmkeys()
def test_nx():
d = INVARIANT_ENVS[0](Cupboard)
d.rmkeys()
with d.pass_arguments(nx=True):
d['a'] = 'b'
assert d['a'] == 'b'
with d.pass_arguments(nx=True):
d['a'] = 'other'
assert d['a'] == 'b'
d.rmkeys()
| {
"content_hash": "3b3e15cae547fe0deb2acf5f693c0b6f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 17.29268292682927,
"alnum_prop": 0.5881523272214386,
"repo_name": "lukedeo/Cupboard",
"id": "404a0bc6ffcd13078ce7479ba7884a3af74fe739",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "32871"
},
{
"name": "Python",
"bytes": "48133"
},
{
"name": "Shell",
"bytes": "181"
}
],
"symlink_target": ""
} |
"""Client for interacting with the Google Cloud DNS API."""
from google.api_core import page_iterator
from google.api_core import client_options as client_options_mod
from google.cloud.client import ClientWithProject
from google.cloud.dns._http import Connection
from google.cloud.dns.zone import ManagedZone
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a zone. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
:type client_info: :class:`~google.api_core.client_info.ClientInfo`
:param client_info:
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
:type client_options: :class:`~google.api_core.client_options.ClientOptions`
or :class:`dict`
:param client_options: (Optional) Client options used to set user options
on the client. API Endpoint should be set through client_options.
"""
SCOPE = ("https://www.googleapis.com/auth/ndev.clouddns.readwrite",)
"""The scopes required for authenticating as a Cloud DNS consumer."""
def __init__(
self,
project=None,
credentials=None,
_http=None,
client_info=None,
client_options=None,
):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http
)
kwargs = {"client_info": client_info}
if client_options:
if isinstance(client_options, dict):
client_options = client_options_mod.from_dict(client_options)
if client_options.api_endpoint:
kwargs["api_endpoint"] = client_options.api_endpoint
self._connection = Connection(self, **kwargs)
def quotas(self):
"""Return DNS quotas for the project associated with this client.
See
https://cloud.google.com/dns/api/v1/projects/get
:rtype: mapping
:returns: keys for the mapping correspond to those of the ``quota``
sub-mapping of the project resource. ``kind`` is stripped
from the results.
"""
path = "/projects/%s" % (self.project,)
resp = self._connection.api_request(method="GET", path=path)
quotas = resp["quota"]
# Remove the key "kind"
# https://cloud.google.com/dns/docs/reference/v1/projects#resource
quotas.pop("kind", None)
if "whitelistedKeySpecs" in quotas:
# whitelistedKeySpecs is a list of dicts that represent keyspecs
# Remove "kind" here as well
for key_spec in quotas["whitelistedKeySpecs"]:
key_spec.pop("kind", None)
return quotas
def list_zones(self, max_results=None, page_token=None):
"""List zones for the project associated with this client.
See
https://cloud.google.com/dns/api/v1/managedZones/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: Optional. If present, return the next batch of
zones, using the value, which must correspond to the
``nextPageToken`` value returned in the previous response.
Deprecated: use the ``pages`` property of the returned iterator
instead of manually passing the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.dns.zone.ManagedZone`
belonging to this project.
"""
path = "/projects/%s/managedZones" % (self.project,)
return page_iterator.HTTPIterator(
client=self,
api_request=self._connection.api_request,
path=path,
item_to_value=_item_to_zone,
items_key="managedZones",
page_token=page_token,
max_results=max_results,
)
def zone(self, name, dns_name=None, description=None):
"""Construct a zone bound to this client.
:type name: str
:param name: Name of the zone.
:type dns_name: str
:param dns_name:
(Optional) DNS name of the zone. If not passed, then calls to
:meth:`zone.create` will fail.
:type description: str
:param description:
(Optional) the description for the zone. If not passed, defaults
to the value of 'dns_name'.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: a new ``ManagedZone`` instance.
"""
return ManagedZone(name, dns_name, client=self, description=description)
def _item_to_zone(iterator, resource):
"""Convert a JSON managed zone to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: An item to be converted to a managed zone.
:rtype: :class:`.ManagedZone`
:returns: The next managed zone in the page.
"""
return ManagedZone.from_api_repr(resource, iterator.client)
| {
"content_hash": "29e16814446dec2df1afca09cd3c5600",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 80,
"avg_line_length": 38.853658536585364,
"alnum_prop": 0.6216258631512869,
"repo_name": "googleapis/python-dns",
"id": "780849d7037751a810d9962e0f6066fc9f862055",
"size": "6947",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dns/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "127282"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
"""
Save test plots for all styles defined in `mpltools.style`.
Note that `test_artists_plot` calls `matplotlib.pyplot.tight_layout` so subplot
spacing is not tested for this plot.
"""
from __future__ import print_function
import os
import os.path as pth
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpltools import style
PATH = pth.abspath(pth.dirname(__file__))
TEST_DIRS = ('test_artists_png', 'test_artists_pdf',
'test_simple_png', 'test_simple_pdf')
for d in TEST_DIRS:
test_dir = pth.join(PATH, d)
if not pth.exists(test_dir):
os.mkdir(test_dir)
def test_artists_plot():
fig, axes = plt.subplots(2, 2)
axes = axes.ravel()
x = np.linspace(0, 1)
axes[0].plot(x, np.sin(2*np.pi*x), label='line')
c = plt.Circle((0.25, 0), radius=0.1, label='patch')
axes[0].add_patch(c)
axes[0].grid()
axes[0].legend()
img = axes[1].imshow(np.random.random(size=(20, 20)))
axes[1].set_title('image')
ncolors = len(plt.rcParams['axes.color_cycle'])
phi = np.linspace(0, 2*np.pi, ncolors + 1)[:-1]
for p in phi:
axes[2].plot(x, np.sin(2*np.pi*x + p))
axes[2].set_title('color cycle')
axes[3].text(0, 0, 'hello world')
axes[3].set_xlabel('x-label')
axes[3].set_ylabel('y-label')
axes[3].set_title('title')
try:
fig.tight_layout()
except AttributeError:
pass
# `colorbar` should be called after `tight_layout`.
fig.colorbar(img, ax=axes[1])
return fig
def test_simple_plot():
fig, ax = plt.subplots()
ax.plot([0, 1])
ax.set_xlabel('x-label')
ax.set_ylabel('y-label')
ax.set_title('title')
return fig
# Only show styles defined by package, not by user.
base_styles = list(style.baselib.keys())
for sty in base_styles:
# reset matplotlib defaults before applying new style
plt.rcdefaults()
style.use(sty, use_baselib=True)
print("Plotting tests for '%s' style" % sty)
fig = test_artists_plot()
fig.savefig(pth.join(PATH, 'test_artists_png', sty + '.png'))
fig.savefig(pth.join(PATH, 'test_artists_pdf', sty + '.pdf'))
fig = test_simple_plot()
fig.savefig(pth.join(PATH, 'test_simple_png', sty + '.png'))
fig.savefig(pth.join(PATH, 'test_simple_pdf', sty + '.pdf'))
| {
"content_hash": "1293fd6d4f342bee5c39a4327bfc96c4",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 25.582417582417584,
"alnum_prop": 0.6280068728522337,
"repo_name": "matteoicardi/mpltools",
"id": "1cb7f86dff4a8f233d7fd461488f8ccc52fa15ca",
"size": "2328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/plot_all_styles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "144"
},
{
"name": "Python",
"bytes": "73729"
}
],
"symlink_target": ""
} |
import requests
# Budapest
longitude = 19.05
latitude = 47.53
url_template = 'https://api.forecast.io/forecast/{api}/{lat},{lon}'
apikey = 'e2fd60c047ae3fac95a0618a98a9e5fd'
url = url_template.format(api=apikey, lat=latitude, lon=longitude)
# print url
params_dict = {
'units': 'si'
}
r = requests.get(url, params=params_dict)
if r.ok:
data = r.json()
# import json
# data2 = json.loads(r.text)
# print data == data2
| {
"content_hash": "e0f387d7b5bf95d8b14f0d7d415a56be",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 17.8,
"alnum_prop": 0.6651685393258427,
"repo_name": "Pylvax/code",
"id": "5c362194c59f55fd08a22913df6680d8ff3cbe1d",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http_client/forecast-sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6188"
}
],
"symlink_target": ""
} |
import datetime
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import (
CreateView, DetailView, UpdateView, View, )
from base.views import CurrentSiteMixin, PaginateListView
from .forms import (
BookCreateForm, BookForm,
CollectionCreateForm, CollectionForm,
ItemCreateForm, ItemForm, )
from .models import Book, Collection, Item
from .tasks import compile_book
from .gedcom import GEDCOMWriter
class PublicBookList(LoginRequiredMixin, CurrentSiteMixin, PaginateListView):
"""Display list of all publically available books."""
model = Book
paginate_by = 12
def get_queryset(self, *args, **kwargs):
qs = super(PublicBookList, self).get_queryset(*args, **kwargs)
qs = qs.filter(public=True)
return qs
def get_context_data(self, **kwargs):
context = super(PublicBookList, self).get_context_data(**kwargs)
context['public_books'] = True
context['page_title'] = 'Öffentlich verfügbare Bücher'
return context
class UserBookList(LoginRequiredMixin, CurrentSiteMixin, PaginateListView):
"""Display list of all book project authored by request.user."""
model = Book
paginate_by = 12
def get_queryset(self, *args, **kwargs):
qs = super(UserBookList, self).get_queryset(*args, **kwargs)
qs = qs.filter(authors=self.request.user)
return qs
def get_context_data(self, **kwargs):
context = super(UserBookList, self).get_context_data(**kwargs)
context['page_title'] = 'Meine Buchprojekte'
return context
class BookDetail(LoginRequiredMixin, CurrentSiteMixin, UpdateView):
model = Book
form_class = BookForm
template_name_suffix = '_detail'
def get_queryset(self):
qs = super(BookDetail, self).get_queryset()
qs = qs.filter(authors=self.request.user)
return qs
def form_valid(self, form):
if self.request.user in self.get_object().authors.all():
return super(BookDetail, self).form_valid(form)
else:
raise PermissionDenied
class BookCreateView(LoginRequiredMixin, CreateView):
model = Book
form_class = BookCreateForm
template_name_suffix = '_detail'
def form_valid(self, form):
# add author
book = form.save(commit=False)
book.save()
book.authors.add(self.request.user)
if form.cleaned_data['populate'] != '-':
book.populate(
form.cleaned_data['populate'],
form.cleaned_data['reference'])
return super(BookCreateView, self).form_valid(form)
class CreatePDFView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
# pylint: disable=unsubscriptable-object
compile_book(int(self.kwargs['id']))
return HttpResponseRedirect(
reverse('book-detail', kwargs={'pk': int(self.kwargs['id'])}))
class CollectionDetail(LoginRequiredMixin, CurrentSiteMixin, UpdateView):
model = Collection
form_class = CollectionForm
template_name_suffix = '_detail'
def get_queryset(self):
qs = super(CollectionDetail, self).get_queryset()
qs = qs.filter(book__authors=self.request.user)
return qs
def form_valid(self, form):
if self.request.user in self.get_object().book.authors.all():
# analyze ordering and reorder/delete items, subcollections
if form.cleaned_data['ordering']:
ordering = json.loads(form.cleaned_data['ordering'])
item_ids = [int(x[5:]) for x in ordering['items']]
for item in self.get_object().item_set.all():
try:
item.position = item_ids.index(item.id)
item.save()
except ValueError:
item.delete()
collection_ids = [int(x[5:]) for x in ordering['collections']]
for collection in self.get_object().collection_set.all():
try:
collection.position = collection_ids.index(
collection.id)
collection.save()
except ValueError:
collection.delete()
return super(CollectionDetail, self).form_valid(form)
else:
raise PermissionDenied
def get_initial(self):
result = super(CollectionDetail, self).get_initial()
result.update({'c_flags': self.get_object().get_flags_json(), })
return result
class CollectionCreateView(LoginRequiredMixin, CreateView):
model = Collection
form_class = CollectionCreateForm
template_name_suffix = '_detail'
def form_valid(self, form):
collection = form.save(commit=False)
collection.position = Collection.objects.filter(
parent=form.cleaned_data['parent']).count()
collection.parent = form.cleaned_data['parent']
collection.save()
if form.cleaned_data['model']:
# populate collection as requested
collection.populate()
return super(CollectionCreateView, self).form_valid(form)
def get_initial(self):
result = super(CollectionCreateView, self).get_initial()
# pylint: disable=unsubscriptable-object
parent = Collection.objects.get(id=int(self.kwargs['parent']))
result.update({
'parent': self.kwargs['parent'],
'c_flags': parent.get_flags_json(),
})
return result
class ItemDetail(LoginRequiredMixin, CurrentSiteMixin, UpdateView):
model = Item
form_class = ItemForm
template_name_suffix = '_detail'
def get_queryset(self):
qs = super(ItemDetail, self).get_queryset()
qs = qs.filter(parent__book__authors=self.request.user)
return qs
def form_valid(self, form):
if self.request.user in self.get_object().parent.book.authors.all():
return super(ItemDetail, self).form_valid(form)
else:
raise PermissionDenied
def get_initial(self):
result = super(ItemDetail, self).get_initial()
result.update({'c_flags': self.get_object().get_flags_json(), })
return result
class ItemCreateView(LoginRequiredMixin, CreateView):
model = Item
form_class = ItemCreateForm
template_name_suffix = '_detail'
def form_valid(self, form):
item = form.save(commit=False)
item.position = Item.objects.filter(
parent=form.cleaned_data['parent']).count()
item.parent = form.cleaned_data['parent']
item.save()
return super(ItemCreateView, self).form_valid(form)
def get_initial(self):
result = super(ItemCreateView, self).get_initial()
# pylint: disable=unsubscriptable-object
parent = Collection.objects.get(id=int(self.kwargs['parent']))
result.update({
'parent': self.kwargs['parent'],
'c_flags': parent.get_flags_json(),
})
return result
class ItemRetrieveText(LoginRequiredMixin, DetailView):
model = Item
def get(self, *args, **kwargs):
obj = self.get_object()
obj.set_text_from_template()
return HttpResponseRedirect(obj.get_absolute_url())
class ExportGEDCOMView(View):
def get(self, *args, **kwargs):
# pylint: disable=unsubscriptable-object
book = Book.objects.get(id=self.kwargs['id'])
data = {
'persons': set([]),
'families': set([]),
'notes': set([]),
'events': set([]),
}
book.root.get_gedcom_data(data)
writer = GEDCOMWriter(**data)
filename = 'data-{0:%Y}-{0:%m}-{0:%d}.ged'.format(
datetime.datetime.today())
response = HttpResponse(
writer.export(), content_type='text/plain; charset=utf8')
response['Content-Disposition'] =\
'attachment; filename="{fn}"'.format(fn=filename)
return response
| {
"content_hash": "2f12867d9cf165eebf28ecffee47b4d1",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 78,
"avg_line_length": 32.3307392996109,
"alnum_prop": 0.613431219159947,
"repo_name": "ugoertz/django-familio",
"id": "f6e6cfa20b9194838de488d6c7cd22aa20ce4a3a",
"size": "8336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61023"
},
{
"name": "HTML",
"bytes": "632961"
},
{
"name": "JavaScript",
"bytes": "1352913"
},
{
"name": "Makefile",
"bytes": "1735"
},
{
"name": "Python",
"bytes": "532976"
},
{
"name": "Shell",
"bytes": "352"
},
{
"name": "TeX",
"bytes": "16522"
}
],
"symlink_target": ""
} |
import re
station_list_search='/nfs/a90/eepdw/Data/Observations/Radiosonde_downloaded_from_NOAA_GUAN/igra-stations.txt'
def StationListParse(station_list_search):
'''
'''
station_metadata=[]
f = open(station_list_search,'r')
for line in f:
line = line.strip()
line=re.sub(r'([A-Z])\s([A-Z])', r'\1_\2',line)
line=re.sub(r'([A-Z])\s\s([A-Z])', r'\1_\2',line)
station_metadata.append(line.split())
f.close()
return station_metadata
station_metadata = StationListParse(station_list_search)
def StationInfoSearch(stat):
'''
'''
for line in station_metadata:
if "%s" % stat in line:
st = line[2].lower().title().replace('_',' ')
lo = float(line[3])
la = float(line[4])
st_height = float(line[5])
return st,la,lo, st_height
def CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon):
fslat_rad = radians(first_station_lat)
fslon_rad = radians(first_station_lon)
lat_rad = radians(station_lat)
lon_rad = radians(station_lon)
#Haversine Formula
a = sin((lat_rad-fslat_rad)/2)**2 + cos(lat_rad) * cos(fslat_rad) * sin((lon_rad-fslon_rad)/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = 6371 * c
return d
| {
"content_hash": "dfecce3c444071912d8a687d6439648e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 109,
"avg_line_length": 28.29787234042553,
"alnum_prop": 0.5977443609022557,
"repo_name": "peterwilletts24/Python-Scripts",
"id": "dc1c5b7efe9b22ceb3ef2ba21b7fd93c35e98fa4",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/GeogFunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2242925"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_perlek.iff"
result.attribute_template_id = 9
result.stfName("monster_name","perlek")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "96b9a43082ab655e8266fc8a649d2628",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 21.615384615384617,
"alnum_prop": 0.6868327402135231,
"repo_name": "obi-two/Rebelion",
"id": "fb7f4968392e71e2f67c5df898a5f08dbe5bfeab",
"size": "426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_perlek.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import sqlalchemy as sa
from alembic import op
"""empty message
Revision ID: 30572c3dd213
Revises: None
Create Date: 2016-06-12 15:05:34.114215
"""
# revision identifiers, used by Alembic.
# revision identifiers, used by Alembic.
revision = '30572c3dd213'
down_revision = None
def upgrade():
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(
length=100), nullable=True),
sa.Column('password', sa.String(length=20), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('bucket_list',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('created_by', sa.Integer(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['created_by'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
)
op.create_table('bucket_list_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('done', sa.Boolean(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('bucketlist_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['bucketlist_id'], ['bucket_list.id'], ),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('user')
op.drop_table('bucket_list')
op.drop_table('bucket_list_item')
| {
"content_hash": "d7921c136c3b2603daad6a166b033c2e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 40.018181818181816,
"alnum_prop": 0.5329395729213994,
"repo_name": "andela-hoyeboade/bucketlist-api",
"id": "e8b609ff71107b482d6bdd7778440977baf9b690",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/30572c3dd213_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "40984"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from autogp import util
from . import likelihood
class Softmax(likelihood.Likelihood):
def __init__(self, num_samples=2000):
self.num_samples = num_samples
def log_cond_prob(self, outputs, latent):
return tf.reduce_sum(outputs * latent, 2) - util.logsumexp(latent, 2)
def get_params(self):
return []
def predict(self, latent_means, latent_vars):
# Generate samples to estimate the expected value and variance of outputs.
num_points = tf.shape(latent_means)[0]
output_dims = tf.shape(latent_means)[1]
latent = (latent_means + tf.sqrt(latent_vars) *
tf.random_normal([self.num_samples, num_points, output_dims]))
# Compute the softmax of all generated latent values in a stable fashion.
softmax = tf.exp(latent - tf.expand_dims(util.logsumexp(latent, 2), 2))
# Estimate the expected value of the softmax and the variance through sampling.
pred_means = tf.reduce_mean(softmax, 0)
pred_vars = tf.reduce_sum((softmax - pred_means) ** 2, 0) / (self.num_samples - 1.0)
return pred_means, pred_vars
| {
"content_hash": "8a7abb190890694dd984b8efc2bb2a44",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 36.96969696969697,
"alnum_prop": 0.6581967213114754,
"repo_name": "ebonilla/AutoGP",
"id": "39cce1228252754b0da42623929a851614749a12",
"size": "1220",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autogp/likelihoods/softmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "85968"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
"""Utilities of visualising an environment."""
from collections import deque
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import gymnasium as gym
from gymnasium import Env, logger
from gymnasium.core import ActType, ObsType
from gymnasium.error import DependencyNotInstalled
from gymnasium.logger import deprecation
try:
import pygame
from pygame import Surface
from pygame.event import Event
from pygame.locals import VIDEORESIZE
except ImportError:
raise gym.error.DependencyNotInstalled(
"Pygame is not installed, run `pip install gymnasium[classic_control]`"
)
try:
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
except ImportError:
logger.warn("Matplotlib is not installed, run `pip install gymnasium[other]`")
matplotlib, plt = None, None
class MissingKeysToAction(Exception):
"""Raised when the environment does not have a default ``keys_to_action`` mapping."""
class PlayableGame:
"""Wraps an environment allowing keyboard inputs to interact with the environment."""
def __init__(
self,
env: Env,
keys_to_action: Optional[Dict[Tuple[int, ...], int]] = None,
zoom: Optional[float] = None,
):
"""Wraps an environment with a dictionary of keyboard buttons to action and if to zoom in on the environment.
Args:
env: The environment to play
keys_to_action: The dictionary of keyboard tuples and action value
zoom: If to zoom in on the environment render
"""
if env.render_mode not in {"rgb_array", "rgb_array_list"}:
raise ValueError(
"PlayableGame wrapper works only with rgb_array and rgb_array_list render modes, "
f"but your environment render_mode = {env.render_mode}."
)
self.env = env
self.relevant_keys = self._get_relevant_keys(keys_to_action)
self.video_size = self._get_video_size(zoom)
self.screen = pygame.display.set_mode(self.video_size)
self.pressed_keys = []
self.running = True
def _get_relevant_keys(
self, keys_to_action: Optional[Dict[Tuple[int], int]] = None
) -> set:
if keys_to_action is None:
if hasattr(self.env, "get_keys_to_action"):
keys_to_action = self.env.get_keys_to_action()
elif hasattr(self.env.unwrapped, "get_keys_to_action"):
keys_to_action = self.env.unwrapped.get_keys_to_action()
else:
assert self.env.spec is not None
raise MissingKeysToAction(
f"{self.env.spec.id} does not have explicit key to action mapping, "
"please specify one manually"
)
assert isinstance(keys_to_action, dict)
relevant_keys = set(sum((list(k) for k in keys_to_action.keys()), []))
return relevant_keys
def _get_video_size(self, zoom: Optional[float] = None) -> Tuple[int, int]:
rendered = self.env.render()
if isinstance(rendered, List):
rendered = rendered[-1]
assert rendered is not None and isinstance(rendered, np.ndarray)
video_size = (rendered.shape[1], rendered.shape[0])
if zoom is not None:
video_size = (int(video_size[0] * zoom), int(video_size[1] * zoom))
return video_size
def process_event(self, event: Event):
"""Processes a PyGame event.
In particular, this function is used to keep track of which buttons are currently pressed
and to exit the :func:`play` function when the PyGame window is closed.
Args:
event: The event to process
"""
if event.type == pygame.KEYDOWN:
if event.key in self.relevant_keys:
self.pressed_keys.append(event.key)
elif event.key == pygame.K_ESCAPE:
self.running = False
elif event.type == pygame.KEYUP:
if event.key in self.relevant_keys:
self.pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
self.running = False
elif event.type == VIDEORESIZE:
self.video_size = event.size
self.screen = pygame.display.set_mode(self.video_size)
def display_arr(
screen: Surface, arr: np.ndarray, video_size: Tuple[int, int], transpose: bool
):
"""Displays a numpy array on screen.
Args:
screen: The screen to show the array on
arr: The array to show
video_size: The video size of the screen
transpose: If to transpose the array on the screen
"""
arr_min, arr_max = np.min(arr), np.max(arr)
arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0, 0))
def play(
env: Env,
transpose: Optional[bool] = True,
fps: Optional[int] = None,
zoom: Optional[float] = None,
callback: Optional[Callable] = None,
keys_to_action: Optional[Dict[Union[Tuple[Union[str, int]], str], ActType]] = None,
seed: Optional[int] = None,
noop: ActType = 0,
):
"""Allows one to play the game using keyboard.
Example::
>>> import gymnasium as gym
>>> from gymnasium.utils.play import play
>>> play(gym.make("CarRacing-v1", render_mode="rgb_array"), keys_to_action={
... "w": np.array([0, 0.7, 0]),
... "a": np.array([-1, 0, 0]),
... "s": np.array([0, 0, 1]),
... "d": np.array([1, 0, 0]),
... "wa": np.array([-1, 0.7, 0]),
... "dw": np.array([1, 0.7, 0]),
... "ds": np.array([1, 0, 1]),
... "as": np.array([-1, 0, 1]),
... }, noop=np.array([0,0,0]))
Above code works also if the environment is wrapped, so it's particularly useful in
verifying that the frame-level preprocessing does not render the game
unplayable.
If you wish to plot real time statistics as you play, you can use
:class:`gym.utils.play.PlayPlot`. Here's a sample code for plotting the reward
for last 150 steps.
>>> import gymnasium as gym
>>> def callback(obs_t, obs_tp1, action, rew, terminated, truncated, info):
... return [rew,]
>>> plotter = PlayPlot(callback, 150, ["reward"])
>>> play(gym.make("CartPole-v1"), callback=plotter.callback)
Args:
env: Environment to use for playing.
transpose: If this is ``True``, the output of observation is transposed. Defaults to ``True``.
fps: Maximum number of steps of the environment executed every second. If ``None`` (the default),
``env.metadata["render_fps""]`` (or 30, if the environment does not specify "render_fps") is used.
zoom: Zoom the observation in, ``zoom`` amount, should be positive float
callback: If a callback is provided, it will be executed after every step. It takes the following input:
obs_t: observation before performing action
obs_tp1: observation after performing action
action: action that was executed
rew: reward that was received
terminated: whether the environment is terminated or not
truncated: whether the environment is truncated or not
info: debug info
keys_to_action: Mapping from keys pressed to action performed.
Different formats are supported: Key combinations can either be expressed as a tuple of unicode code
points of the keys, as a tuple of characters, or as a string where each character of the string represents
one key.
For example if pressing 'w' and space at the same time is supposed
to trigger action number 2 then ``key_to_action`` dict could look like this:
>>> {
... # ...
... (ord('w'), ord(' ')): 2
... # ...
... }
or like this:
>>> {
... # ...
... ("w", " "): 2
... # ...
... }
or like this:
>>> {
... # ...
... "w ": 2
... # ...
... }
If ``None``, default ``key_to_action`` mapping for that environment is used, if provided.
seed: Random seed used when resetting the environment. If None, no seed is used.
noop: The action used when no key input has been entered, or the entered key combination is unknown.
"""
env.reset(seed=seed)
if keys_to_action is None:
if hasattr(env, "get_keys_to_action"):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, "get_keys_to_action"):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
assert env.spec is not None
raise MissingKeysToAction(
f"{env.spec.id} does not have explicit key to action mapping, "
"please specify one manually"
)
assert keys_to_action is not None
key_code_to_action = {}
for key_combination, action in keys_to_action.items():
key_code = tuple(
sorted(ord(key) if isinstance(key, str) else key for key in key_combination)
)
key_code_to_action[key_code] = action
game = PlayableGame(env, key_code_to_action, zoom)
if fps is None:
fps = env.metadata.get("render_fps", 30)
done, obs = True, None
clock = pygame.time.Clock()
while game.running:
if done:
done = False
obs = env.reset(seed=seed)
else:
action = key_code_to_action.get(tuple(sorted(game.pressed_keys)), noop)
prev_obs = obs
obs, rew, terminated, truncated, info = env.step(action)
done = terminated or truncated
if callback is not None:
callback(prev_obs, obs, action, rew, terminated, truncated, info)
if obs is not None:
rendered = env.render()
if isinstance(rendered, List):
rendered = rendered[-1]
assert rendered is not None and isinstance(rendered, np.ndarray)
display_arr(
game.screen, rendered, transpose=transpose, video_size=game.video_size
)
# process pygame events
for event in pygame.event.get():
game.process_event(event)
pygame.display.flip()
clock.tick(fps)
pygame.quit()
class PlayPlot:
"""Provides a callback to create live plots of arbitrary metrics when using :func:`play`.
This class is instantiated with a function that accepts information about a single environment transition:
- obs_t: observation before performing action
- obs_tp1: observation after performing action
- action: action that was executed
- rew: reward that was received
- terminated: whether the environment is terminated or not
- truncated: whether the environment is truncated or not
- info: debug info
It should return a list of metrics that are computed from this data.
For instance, the function may look like this::
>>> def compute_metrics(obs_t, obs_tp, action, reward, terminated, truncated, info):
... return [reward, info["cumulative_reward"], np.linalg.norm(action)]
:class:`PlayPlot` provides the method :meth:`callback` which will pass its arguments along to that function
and uses the returned values to update live plots of the metrics.
Typically, this :meth:`callback` will be used in conjunction with :func:`play` to see how the metrics evolve as you play::
>>> plotter = PlayPlot(compute_metrics, horizon_timesteps=200,
... plot_names=["Immediate Rew.", "Cumulative Rew.", "Action Magnitude"])
>>> play(your_env, callback=plotter.callback)
"""
def __init__(
self, callback: callable, horizon_timesteps: int, plot_names: List[str]
):
"""Constructor of :class:`PlayPlot`.
The function ``callback`` that is passed to this constructor should return
a list of metrics that is of length ``len(plot_names)``.
Args:
callback: Function that computes metrics from environment transitions
horizon_timesteps: The time horizon used for the live plots
plot_names: List of plot titles
Raises:
DependencyNotInstalled: If matplotlib is not installed
"""
deprecation(
"`PlayPlot` is marked as deprecated and will be removed in the near future."
)
self.data_callback = callback
self.horizon_timesteps = horizon_timesteps
self.plot_names = plot_names
if plt is None:
raise DependencyNotInstalled(
"matplotlib is not installed, run `pip install gymnasium[other]`"
)
num_plots = len(self.plot_names)
self.fig, self.ax = plt.subplots(num_plots)
if num_plots == 1:
self.ax = [self.ax]
for axis, name in zip(self.ax, plot_names):
axis.set_title(name)
self.t = 0
self.cur_plot: List[Optional[plt.Axes]] = [None for _ in range(num_plots)]
self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)]
def callback(
self,
obs_t: ObsType,
obs_tp1: ObsType,
action: ActType,
rew: float,
terminated: bool,
truncated: bool,
info: dict,
):
"""The callback that calls the provided data callback and adds the data to the plots.
Args:
obs_t: The observation at time step t
obs_tp1: The observation at time step t+1
action: The action
rew: The reward
terminated: If the environment is terminated
truncated: If the environment is truncated
info: The information from the environment
"""
points = self.data_callback(
obs_t, obs_tp1, action, rew, terminated, truncated, info
)
for point, data_series in zip(points, self.data):
data_series.append(point)
self.t += 1
xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t
for i, plot in enumerate(self.cur_plot):
if plot is not None:
plot.remove()
self.cur_plot[i] = self.ax[i].scatter(
range(xmin, xmax), list(self.data[i]), c="blue"
)
self.ax[i].set_xlim(xmin, xmax)
if plt is None:
raise DependencyNotInstalled(
"matplotlib is not installed, run `pip install gymnasium[other]`"
)
plt.pause(0.000001)
| {
"content_hash": "aa37706019c5ad98ba1a8dcf221dbc6c",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 126,
"avg_line_length": 39.42239185750636,
"alnum_prop": 0.5759375201703996,
"repo_name": "Farama-Foundation/Gymnasium",
"id": "65803b4f7b5683fb7584942198b68f9cc367d812",
"size": "15493",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gymnasium/utils/play.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "888"
},
{
"name": "Python",
"bytes": "1188231"
},
{
"name": "Shell",
"bytes": "484"
}
],
"symlink_target": ""
} |
from rpython.rlib import jit
jitdriver = jit.JitDriver(reds=["i", "s"], greens=["self"])
class Regex(object):
_immutable_fields_ = ["empty"]
def __init__(self, empty):
# empty denotes whether the regular expression
# can match the empty string
self.empty = empty
# mark that is shifted through the regex
self.marked = 0
def reset(self):
""" reset all marks in the regular expression """
self.marked = 0
def shift(self, c, mark):
""" shift the mark from left to right, matching character c."""
# _shift is implemented in the concrete classes
marked = self._shift(c, mark)
self.marked = marked
return marked
def match(self, s):
if not s:
return self.empty
# shift a mark in from the left
result = self.shift(s[0], 1)
i = 1
while i < len(s):
jitdriver.jit_merge_point(i=i, s=s, self=self)
# shift the internal marks around
result = self.shift(s[i], 0)
i += 1
self.reset()
return result
class Char(Regex):
_immutable_fields_ = ["c"]
def __init__(self, c):
Regex.__init__(self, 0)
self.c = c
def _shift(self, c, mark):
return mark & (c == self.c)
class Epsilon(Regex):
def __init__(self):
Regex.__init__(self, empty=1)
def _shift(self, c, mark):
return 0
class Binary(Regex):
_immutable_fields_ = ["left", "right"]
def __init__(self, left, right, empty):
Regex.__init__(self, empty)
self.left = left
self.right = right
def reset(self):
self.left.reset()
self.right.reset()
Regex.reset(self)
class Alternative(Binary):
def __init__(self, left, right):
empty = left.empty | right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(c, mark)
return marked_left | marked_right
class Repetition(Regex):
_immutable_fields_ = ["re"]
def __init__(self, re):
Regex.__init__(self, 1)
self.re = re
def _shift(self, c, mark):
return self.re.shift(c, mark | self.marked)
def reset(self):
self.re.reset()
Regex.reset(self)
class Sequence(Binary):
def __init__(self, left, right):
empty = left.empty & right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
old_marked_left = self.left.marked
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(
c, old_marked_left | (mark & self.left.empty))
return (marked_left & self.right.empty) | marked_right
def make_regex(n):
def a():
return Char("a")
def aorb():
return Alternative(a(), Char("b"))
def any():
return Repetition(aorb())
aorbn = aorb()
for i in range(n - 1):
aorbn = Sequence(aorb(), aorbn)
return Sequence(Sequence(Sequence(Sequence(any(), a()), aorbn), a()), any())
re = make_regex(20)
def main(args):
import os
chunks = []
# use os.read to be RPython compatible
while True:
s = os.read(0, 4096)
if not s:
break
chunks.append(s)
s = "".join(chunks)
print len(s)
print re.match(s)
# make the jit happier
print Epsilon().match(s[:4])
return 0
# needed for the PyPy translation toolchain
def target(*args):
return main, None
def jitpolicy(*args):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| {
"content_hash": "ddd1bb3b36cb92ee2c38497787d263a5",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 81,
"avg_line_length": 26.09722222222222,
"alnum_prop": 0.5620010643959553,
"repo_name": "fhahn/miniregex",
"id": "2da90d0213c679a42117710b67545f1eef77a802",
"size": "3758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jitregex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4913"
},
{
"name": "Java",
"bytes": "9498"
},
{
"name": "Python",
"bytes": "18270"
}
],
"symlink_target": ""
} |
import random
import pylab
import pickle as pkl
import numpy as np
import pandas as pd
from scipy.misc import imread, imresize
from lasagne import layers
from theano.tensor.nnet import softmax
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from scipy.misc import imread as ims
script_root = '/home/shraddha/Project/scripts/'
data_root = '/home/shraddha/Project/datasets/'
model_root = '/home/shraddha/Project/models/'
class Recognition(object):
def load_dataset(self):
self.data = pd.read_csv(script_root + 'LISTFILE.txt', sep = ' ', header = None)
def visualize_dataset(self):
self.Load_dataset()
i = random.randrange(0, self.data[0].count())
img = ims(data_root + 'English/' + self.data[0][i])
pylab.imshow(img)
pylab.show()
| {
"content_hash": "8173c447411b650d4542dae421003c55",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 31.533333333333335,
"alnum_prop": 0.7209302325581395,
"repo_name": "ManasiKhapke/Project",
"id": "07129fe1ad2a74ef9894b1d6fb71bfba075fcab1",
"size": "993",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/packages/recognition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1483102"
},
{
"name": "Python",
"bytes": "7524"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
# from StringIO import StringIO
# import global_functions
# Importing data set
datasetA = pd.read_csv('lc_issue_a.csv', skiprows = [0])
#datasetB = pd.read_csv('lc_rej_a.csv', skiprows = [0])
#dataset = pd.concat([datasetA, datasetB], axis = 0)
columnsToRemain = ['loan_amnt', 'term', 'emp_length',
'annual_inc', 'int_rate',
'grade', 'total_acc', 'loan_status']
dataset = datasetA[columnsToRemain]
# Cleaning data
mapStatus = {'Fully Paid' : 1, 'Current' : 1, 'Charged Off' : 0,
'Default' : 1, 'In Grace Period' : 1,
'Late (31-120 days)' : 0, 'Late (16-30 days)' : 0,
'Does not meet the credit policy. Status:Fully Paid' : 1,
'Does not meet the credit policy. Status:Charged Off' : 0}
datasetC = dataset.dropna(how = 'all')
datasetC['term'] = datasetC['term'].str.extract('(\d\d)', expand=True)
datasetC['emp_length'] = datasetC['emp_length'].str.extract('([0-9]+)', expand=True).astype('float')
datasetC['int_rate'] = datasetC['int_rate'].replace('%', '', regex=True).astype('float')/100
datasetC['health'] = datasetC['loan_status'].map(mapStatus)
X = datasetC.iloc[:, 0:-2].values
y = datasetC.iloc[:, -1].values
imputer = Imputer(missing_values = 'NaN', strategy = 'median', axis = 0)
imputer = imputer.fit(X[:, [2, 3, 6]])
X[:, [2, 3, 6]] = imputer.transform(X[:, [2, 3, 6]])
# Encode categorical variables in X and y
labelencoder_X = LabelEncoder()
X[:, 5] = labelencoder_X.fit_transform(X[:, 5])
onehotencoder_X = OneHotEncoder(categorical_features = [5])
X = onehotencoder_X.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# split into train & test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# feature scaling transformation
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)
| {
"content_hash": "1eecc9121ec57823529cee7d8e1d0466",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 100,
"avg_line_length": 32.58571428571429,
"alnum_prop": 0.6650591845681718,
"repo_name": "lancezlin/ml_template_py",
"id": "20db4b4dfdd35eb0b4c394fbe27e6cf0f6bd2c9c",
"size": "2300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DecisionTreeReg/dtr_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
} |
"""
split and interactively page a string or file of text
"""
def more(text, numlines=15):
lines = text.splitlines() # like split('\n') but no '' at end
while lines:
chunk = lines[:numlines]
lines = lines[numlines:]
for line in chunk: print(line)
if lines and input('More?') not in ['y', 'Y']: break
if __name__ == '__main__':
import sys # when run, not imported
more(open(sys.argv[1]).read(), 10) # page contents of file on cmdline
| {
"content_hash": "d9bf9b823875a6a299acc3a3cc381336",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 34.86666666666667,
"alnum_prop": 0.5544933078393881,
"repo_name": "yubo/program",
"id": "e1c264c3b7d4328daf308dfa3ccbe3dcd9ad65a0",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pp4e/02_system_programming/01_more.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "75417"
},
{
"name": "Awk",
"bytes": "5739"
},
{
"name": "C",
"bytes": "3346469"
},
{
"name": "C++",
"bytes": "867833"
},
{
"name": "CMake",
"bytes": "46139"
},
{
"name": "E",
"bytes": "744"
},
{
"name": "GDB",
"bytes": "86"
},
{
"name": "Gnuplot",
"bytes": "122"
},
{
"name": "Go",
"bytes": "541317"
},
{
"name": "Groovy",
"bytes": "768"
},
{
"name": "HTML",
"bytes": "1706744"
},
{
"name": "Java",
"bytes": "5363664"
},
{
"name": "JavaScript",
"bytes": "30531"
},
{
"name": "Lex",
"bytes": "4326"
},
{
"name": "Lua",
"bytes": "421"
},
{
"name": "M4",
"bytes": "3025"
},
{
"name": "Makefile",
"bytes": "238992"
},
{
"name": "Objective-C",
"bytes": "4254"
},
{
"name": "PHP",
"bytes": "41"
},
{
"name": "Python",
"bytes": "22955"
},
{
"name": "Roff",
"bytes": "271184"
},
{
"name": "Shell",
"bytes": "60487"
},
{
"name": "Smarty",
"bytes": "706"
},
{
"name": "Yacc",
"bytes": "9248"
}
],
"symlink_target": ""
} |
import hTools2.dialogs.glyphs.copy_paste
reload(hTools2.dialogs.glyphs.copy_paste)
hTools2.dialogs.glyphs.copy_paste.copyPasteGlyphDialog()
| {
"content_hash": "6ff694df3140135710aa319751bf9ac4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 56,
"avg_line_length": 35.25,
"alnum_prop": 0.8439716312056738,
"repo_name": "gferreira/hTools2_extension",
"id": "86ff1581adfdbc6ba1c6c689e6a0d1e115b08d15",
"size": "161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hTools2.roboFontExt/lib/Scripts/selected glyphs/actions/copy & paste.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18852"
},
{
"name": "HTML",
"bytes": "1477535"
},
{
"name": "JavaScript",
"bytes": "98858"
},
{
"name": "Python",
"bytes": "686182"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.