blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e43bbffa55609202fd7f452a6ba9bdc2d6717f6 | c9a809c5ef2a6b5e7e50da548c182510d203f430 | /salt/modules/win_pki.py | ca17b6d626c1a33c1da8964e62e336fdbe4a1266 | [
"Apache-2.0"
] | permissive | andyyumiao/saltx | 676a44c075ce06d5ac62fc13de6dcd750b3d0d74 | a05c22a60706b5c4389adbd77581b5cf985763b5 | refs/heads/master | 2022-02-24T00:51:42.420453 | 2022-02-09T06:46:40 | 2022-02-09T06:46:40 | 231,860,568 | 1 | 5 | NOASSERTION | 2022-02-09T06:46:40 | 2020-01-05T03:10:15 | Python | UTF-8 | Python | false | false | 16,167 | py | # -*- coding: utf-8 -*-
'''
Microsoft certificate management via the PKI Client PowerShell module.
https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient
The PKI Client PowerShell module is only available on Windows 8+ and Windows
Server 2012+.
https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx
:platform: Windows
:depends:
- PowerShell 4
- PKI Client Module (Windows 8+ / Windows Server 2012+)
.. versionadded:: 2016.11.0
'''
# Import python libs
from __future__ import absolute_import
import json
import logging
# Import salt libs
from salt.exceptions import SaltInvocationError
import ast
import os
import salt.utils
import salt.utils.powershell
_DEFAULT_CONTEXT = 'LocalMachine'
_DEFAULT_FORMAT = 'cer'
_DEFAULT_STORE = 'My'
_LOG = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'win_pki'
def __virtual__():
'''
Requires Windows
Requires Windows 8+ / Windows Server 2012+
Requires PowerShell
Requires PKI Client PowerShell module installed.
'''
if not salt.utils.is_windows():
return False, 'Only available on Windows Systems'
if salt.utils.version_cmp(__grains__['osversion'], '6.2.9200') == -1:
return False, 'Only available on Windows 8+ / Windows Server 2012 +'
if not __salt__['cmd.shell_info']('powershell')['installed']:
return False, 'Powershell not available'
if not salt.utils.powershell.module_exists('PKI'):
return False, 'PowerShell PKI module not available'
return __virtualname__
def _cmd_run(cmd, as_json=False):
'''
Ensure that the Pki module is loaded, and convert to and extract data from
Json as needed.
'''
cmd_full = ['Import-Module -Name PKI; ']
if as_json:
cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '
r'@({0})'.format(cmd))
else:
cmd_full.append(cmd)
cmd_ret = __salt__['cmd.run_all'](
str().join(cmd_full), shell='powershell', python_shell=True)
if cmd_ret['retcode'] != 0:
_LOG.error('Unable to execute command: %s\nError: %s', cmd,
cmd_ret['stderr'])
if as_json:
try:
items = json.loads(cmd_ret['stdout'], strict=False)
return items
except ValueError:
_LOG.error('Unable to parse return data as Json.')
return cmd_ret['stdout']
def _validate_cert_path(name):
'''
Ensure that the certificate path, as determind from user input, is valid.
'''
cmd = r"Test-Path -Path '{0}'".format(name)
if not ast.literal_eval(_cmd_run(cmd=cmd)):
raise SaltInvocationError(r"Invalid path specified: {0}".format(name))
def _validate_cert_format(name):
'''
Ensure that the certificate format, as determind from user input, is valid.
'''
cert_formats = ['cer', 'pfx']
if name not in cert_formats:
message = ("Invalid certificate format '{0}' specified. Valid formats:"
' {1}').format(name, cert_formats)
raise SaltInvocationError(message)
def get_stores():
'''
Get the certificate location contexts and their corresponding stores.
:return: A dictionary of the certificate location contexts and stores.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_pki.get_stores
'''
ret = dict()
cmd = r"Get-ChildItem -Path 'Cert:\' | " \
r"Select-Object LocationName, StoreNames"
items = _cmd_run(cmd=cmd, as_json=True)
for item in items:
ret[item['LocationName']] = list()
for store in item['StoreNames']:
ret[item['LocationName']].append(store)
return ret
def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
'''
Get the available certificates in the given store.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:return: A dictionary of the certificate thumbprints and properties.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_pki.get_certs
'''
ret = dict()
cmd = list()
blacklist_keys = ['DnsNameList']
store_path = r'Cert:\{0}\{1}'.format(context, store)
_validate_cert_path(name=store_path)
cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path))
cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version')
items = _cmd_run(cmd=str().join(cmd), as_json=True)
for item in items:
cert_info = dict()
for key in item:
if key not in blacklist_keys:
cert_info[key.lower()] = item[key]
cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]
ret[item['Thumbprint']] = cert_info
return ret
def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''):
'''
Get the details of the certificate file.
:param str name: The filesystem path of the certificate file.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or
'pfx' for PKCS #12.
:param str password: The password of the certificate. Only applicable to pfx
format. Note that if used interactively, the password will be seen by all minions.
To protect the password, use a state and get the password from pillar.
:return: A dictionary of the certificate thumbprints and properties.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer'
'''
ret = dict()
cmd = list()
blacklist_keys = ['DnsNameList']
cert_format = cert_format.lower()
_validate_cert_format(name=cert_format)
if not name or not os.path.isfile(name):
_LOG.error('Path is not present: %s', name)
return ret
if cert_format == 'pfx':
if password:
cmd.append('$CertObject = New-Object')
cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;')
cmd.append(r" $CertObject.Import('{0}'".format(name))
cmd.append(",'{0}'".format(password))
cmd.append(",'DefaultKeySet') ; $CertObject")
cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '
'Thumbprint, Version')
else:
cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name))
cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '
'Thumbprint, Version')
else:
cmd.append('$CertObject = New-Object')
cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;')
cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name))
cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '
'Thumbprint, Version')
items = _cmd_run(cmd=str().join(cmd), as_json=True)
for item in items:
for key in item:
if key not in blacklist_keys:
ret[key.lower()] = item[key]
ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]
if ret:
_LOG.debug('Certificate thumbprint obtained successfully: %s', name)
else:
_LOG.error('Unable to obtain certificate thumbprint: %s', name)
return ret
def import_cert(name,
cert_format=_DEFAULT_FORMAT,
context=_DEFAULT_CONTEXT,
store=_DEFAULT_STORE,
exportable=True,
password='',
saltenv='base'):
'''
Import the certificate file into the given certificate store.
:param str name: The path of the certificate file to import.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or
'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool exportable: Mark the certificate as exportable. Only applicable
to pfx format.
:param str password: The password of the certificate. Only applicable to pfx
format. Note that if used interactively, the password will be seen by all minions.
To protect the password, use a state and get the password from pillar.
:param str saltenv: The environment the file resides in.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.import_cert name='salt://cert.cer'
'''
cmd = list()
thumbprint = None
store_path = r'Cert:\{0}\{1}'.format(context, store)
cert_format = cert_format.lower()
_validate_cert_format(name=cert_format)
cached_source_path = __salt__['cp.cache_file'](name, saltenv)
if not cached_source_path:
_LOG.error('Unable to get cached copy of file: %s', name)
return False
if password:
cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password)
else:
cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format)
current_certs = get_certs(context=context, store=store)
if cert_props['thumbprint'] in current_certs:
_LOG.debug("Certificate thumbprint '%s' already present in store: %s",
cert_props['thumbprint'], store_path)
return True
if cert_format == 'pfx':
# In instances where an empty password is needed, we use a
# System.Security.SecureString object since ConvertTo-SecureString will
# not convert an empty string.
if password:
cmd.append(r"$Password = ConvertTo-SecureString "
r"-String '{0}'".format(password))
cmd.append(' -AsPlainText -Force; ')
else:
cmd.append('$Password = New-Object System.Security.SecureString; ')
cmd.append(r"Import-PfxCertificate "
r"-FilePath '{0}'".format(cached_source_path))
cmd.append(r" -CertStoreLocation '{0}'".format(store_path))
cmd.append(r" -Password $Password")
if exportable:
cmd.append(' -Exportable')
else:
cmd.append(r"Import-Certificate "
r"-FilePath '{0}'".format(cached_source_path))
cmd.append(r" -CertStoreLocation '{0}'".format(store_path))
_cmd_run(cmd=str().join(cmd))
new_certs = get_certs(context=context, store=store)
for new_cert in new_certs:
if new_cert not in current_certs:
thumbprint = new_cert
if thumbprint:
_LOG.debug('Certificate imported successfully: %s', name)
return True
_LOG.error('Unable to import certificate: %s', name)
return False
def export_cert(name,
thumbprint,
cert_format=_DEFAULT_FORMAT,
context=_DEFAULT_CONTEXT,
store=_DEFAULT_STORE,
password=''):
'''
Export the certificate to a file from the given certificate store.
:param str name: The destination path for the exported certificate file.
:param str thumbprint: The thumbprint value of the target certificate.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or
'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param str password: The password of the certificate. Only applicable to pfx
format. Note that if used interactively, the password will be seen by all minions.
To protect the password, use a state and get the password from pillar.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000'
'''
cmd = list()
thumbprint = thumbprint.upper()
cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint)
cert_format = cert_format.lower()
_validate_cert_path(name=cert_path)
_validate_cert_format(name=cert_format)
if cert_format == 'pfx':
# In instances where an empty password is needed, we use a
# System.Security.SecureString object since ConvertTo-SecureString will
# not convert an empty string.
if password:
cmd.append(r"$Password = ConvertTo-SecureString "
r"-String '{0}'".format(password))
cmd.append(' -AsPlainText -Force; ')
else:
cmd.append('$Password = New-Object System.Security.SecureString; ')
cmd.append(r"Export-PfxCertificate "
r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name))
cmd.append(r" -Password $Password")
else:
cmd.append(r"Export-Certificate "
r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name))
cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name))
ret = ast.literal_eval(_cmd_run(cmd=str().join(cmd)))
if ret:
_LOG.debug('Certificate exported successfully: %s', name)
else:
_LOG.error('Unable to export certificate: %s', name)
return ret
def test_cert(thumbprint,
context=_DEFAULT_CONTEXT,
store=_DEFAULT_STORE,
untrusted_root=False,
dns_name='',
eku=''):
'''
Check the certificate for validity.
:param str thumbprint: The thumbprint value of the target certificate.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool untrusted_root: Whether the root certificate is required to be
trusted in chain building.
:param str dns_name: The DNS name to verify as valid for the certificate.
:param str eku: The enhanced key usage object identifiers to verify for the
certificate chain.
:return: A boolean representing whether the certificate was considered
valid.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test'
'''
cmd = list()
thumbprint = thumbprint.upper()
cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint)
cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path))
_validate_cert_path(name=cert_path)
if untrusted_root:
cmd.append(' -AllowUntrustedRoot')
if dns_name:
cmd.append(" -DnsName '{0}'".format(dns_name))
if eku:
cmd.append(" -EKU '{0}'".format(eku))
cmd.append(' -ErrorAction SilentlyContinue')
return ast.literal_eval(_cmd_run(cmd=str().join(cmd)))
def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
'''
Remove the certificate from the given certificate store.
:param str thumbprint: The thumbprint value of the target certificate.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' win_pki.remove_cert thumbprint='AAA000'
'''
thumbprint = thumbprint.upper()
store_path = r'Cert:\{0}\{1}'.format(context, store)
cert_path = r'{0}\{1}'.format(store_path, thumbprint)
cmd = r"Remove-Item -Path '{0}'".format(cert_path)
current_certs = get_certs(context=context, store=store)
if thumbprint not in current_certs:
_LOG.debug("Certificate '%s' already absent in store: %s", thumbprint,
store_path)
return True
_validate_cert_path(name=cert_path)
_cmd_run(cmd=cmd)
new_certs = get_certs(context=context, store=store)
if thumbprint in new_certs:
_LOG.error('Unable to remove certificate: %s', cert_path)
return False
_LOG.debug('Certificate removed successfully: %s', cert_path)
return True
| [
"yumiao3@jd.com"
] | yumiao3@jd.com |
d8485188dea7802b0cc8718c8456babc2e37dbd4 | ae78ed1ea1cac59ccc9a9b50e4920c74947ef569 | /pretrained-model/alxlnet/xlnet.py | 1f6c2eacf276f873e0bd845c0229fe2d4aad797c | [
"MIT",
"Python-2.0"
] | permissive | kheeirul96/Malaya | d4aafd8844f5a131854a5ede18068ba829b229a4 | ceb7de556cd1077efa749e7e369ffd75b1a0dfd1 | refs/heads/master | 2020-08-20T15:47:36.990840 | 2019-10-18T03:50:58 | 2019-10-18T03:50:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,200 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow as tf
import modeling
def _get_initializer(FLAGS):
"""Get variable intializer."""
if FLAGS.init == 'uniform':
initializer = tf.initializers.random_uniform(
minval = -FLAGS.init_range, maxval = FLAGS.init_range, seed = None
)
elif FLAGS.init == 'normal':
initializer = tf.initializers.random_normal(
stddev = FLAGS.init_std, seed = None
)
else:
raise ValueError('Initializer {} not supported'.format(FLAGS.init))
return initializer
class XLNetConfig(object):
"""XLNetConfig contains hyperparameters that are specific to a model checkpoint;
i.e., these hyperparameters should be the same between
pretraining and finetuning.
The following hyperparameters are defined:
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
"""
def __init__(self, FLAGS = None, json_path = None):
"""Constructing an XLNetConfig.
One of FLAGS or json_path should be provided."""
assert FLAGS is not None or json_path is not None
self.keys = [
'n_layer',
'd_model',
'n_head',
'd_head',
'd_inner',
'ff_activation',
'untie_r',
'n_token',
]
if FLAGS is not None:
self.init_from_flags(FLAGS)
if json_path is not None:
self.init_from_json(json_path)
def init_from_flags(self, FLAGS):
for key in self.keys:
setattr(self, key, getattr(FLAGS, key))
def init_from_json(self, json_path):
with tf.gfile.Open(json_path) as f:
json_data = json.load(f)
for key in self.keys:
setattr(self, key, json_data[key])
def to_json(self, json_path):
"""Save XLNetConfig to a json file."""
json_data = {}
for key in self.keys:
json_data[key] = getattr(self, key)
json_dir = os.path.dirname(json_path)
if not tf.gfile.Exists(json_dir):
tf.gfile.MakeDirs(json_dir)
with tf.gfile.Open(json_path, 'w') as f:
json.dump(json_data, f, indent = 4, sort_keys = True)
def create_run_config(is_training, is_finetune, FLAGS):
kwargs = dict(
is_training = is_training,
use_tpu = FLAGS.use_tpu,
use_bfloat16 = FLAGS.use_bfloat16,
dropout = FLAGS.dropout,
dropatt = FLAGS.dropatt,
init = FLAGS.init,
init_range = FLAGS.init_range,
init_std = FLAGS.init_std,
clamp_len = FLAGS.clamp_len,
)
if not is_finetune:
kwargs.update(
dict(
mem_len = FLAGS.mem_len,
reuse_len = FLAGS.reuse_len,
bi_data = FLAGS.bi_data,
clamp_len = FLAGS.clamp_len,
same_length = FLAGS.same_length,
)
)
return RunConfig(**kwargs)
class RunConfig(object):
"""RunConfig contains hyperparameters that could be different
between pretraining and finetuning.
These hyperparameters can also be changed from run to run.
We store them separately from XLNetConfig for flexibility.
"""
def __init__(
self,
is_training,
use_tpu,
use_bfloat16,
dropout,
dropatt,
init = 'normal',
init_range = 0.1,
init_std = 0.02,
mem_len = None,
reuse_len = None,
bi_data = False,
clamp_len = -1,
same_length = False,
):
"""
Args:
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
use_bfloat16: bool, use bfloat16 instead of float32.
dropout: float, dropout rate.
dropatt: float, dropout rate on attention probabilities.
init: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution
with mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the currect batch to be cached
and reused in the future.
bi_data: bool, whether to use bidirectional input pipeline.
Usually set to True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len.
-1 means no clamping.
same_length: bool, whether to use the same attention length for each token.
"""
self.init = init
self.init_range = init_range
self.init_std = init_std
self.is_training = is_training
self.dropout = dropout
self.dropatt = dropatt
self.use_tpu = use_tpu
self.use_bfloat16 = use_bfloat16
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
class XLNetModel(object):
"""A wrapper of the XLNet model used during both pretraining and finetuning."""
def __init__(
self,
xlnet_config,
run_config,
input_ids,
seg_ids,
input_mask,
mems = None,
perm_mask = None,
target_mapping = None,
inp_q = None,
**kwargs
):
"""
Args:
xlnet_config: XLNetConfig,
run_config: RunConfig,
input_ids: int32 Tensor in shape [len, bsz], the input token IDs.
seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs.
input_mask: float32 Tensor in shape [len, bsz], the input mask.
0 for real tokens and 1 for padding.
mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
from previous batches. The length of the list equals n_layer.
If None, no memory is used.
perm_mask: float32 Tensor in shape [len, len, bsz].
If perm_mask[i, j, k] = 0, i attend to j in batch k;
if perm_mask[i, j, k] = 1, i does not attend to j in batch k.
If None, each position attends to all the others.
target_mapping: float32 Tensor in shape [num_predict, len, bsz].
If target_mapping[i, j, k] = 1, the i-th predict in batch k is
on the j-th token.
Only used during pretraining for partial prediction.
Set to None during finetuning.
inp_q: float32 Tensor in shape [len, bsz].
1 for tokens with losses and 0 for tokens without losses.
Only used during pretraining for two-stream attention.
Set to None during finetuning.
"""
initializer = _get_initializer(run_config)
tfm_args = dict(
n_token = xlnet_config.n_token,
initializer = initializer,
attn_type = 'bi',
n_layer = xlnet_config.n_layer,
d_model = xlnet_config.d_model,
n_head = xlnet_config.n_head,
d_head = xlnet_config.d_head,
d_inner = xlnet_config.d_inner,
ff_activation = xlnet_config.ff_activation,
untie_r = xlnet_config.untie_r,
is_training = run_config.is_training,
use_bfloat16 = run_config.use_bfloat16,
use_tpu = run_config.use_tpu,
dropout = run_config.dropout,
dropatt = run_config.dropatt,
mem_len = run_config.mem_len,
reuse_len = run_config.reuse_len,
bi_data = run_config.bi_data,
clamp_len = run_config.clamp_len,
same_length = run_config.same_length,
)
input_args = dict(
inp_k = input_ids,
seg_id = seg_ids,
input_mask = input_mask,
mems = mems,
perm_mask = perm_mask,
target_mapping = target_mapping,
inp_q = inp_q,
)
tfm_args.update(input_args)
with tf.variable_scope('model', reuse = tf.AUTO_REUSE):
(
self.output,
self.new_mems,
self.lookup_table,
self.lookup_table_2,
) = modeling.transformer_xl(**tfm_args)
self.input_mask = input_mask
self.initializer = initializer
self.xlnet_config = xlnet_config
self.run_config = run_config
def get_pooled_out(self, summary_type, use_summ_proj = True):
"""
Args:
summary_type: str, "last", "first", "mean", or "attn". The method
to pool the input to get a vector representation.
use_summ_proj: bool, whether to use a linear projection during pooling.
Returns:
float32 Tensor in shape [bsz, d_model], the pooled representation.
"""
xlnet_config = self.xlnet_config
run_config = self.run_config
with tf.variable_scope('model', reuse = tf.AUTO_REUSE):
summary = modeling.summarize_sequence(
summary_type = summary_type,
hidden = self.output,
d_model = xlnet_config.d_model,
n_head = xlnet_config.n_head,
d_head = xlnet_config.d_head,
dropout = run_config.dropout,
dropatt = run_config.dropatt,
is_training = run_config.is_training,
input_mask = self.input_mask,
initializer = self.initializer,
use_proj = use_summ_proj,
)
return summary
def get_sequence_output(self):
"""
Returns:
float32 Tensor in shape [len, bsz, d_model]. The last layer hidden
representation of XLNet.
"""
return self.output
def get_new_memory(self):
"""
Returns:
list of float32 Tensors in shape [mem_len, bsz, d_model], the new
memory that concatenates the previous memory with the current input
representations.
The length of the list equals n_layer.
"""
return self.new_mems
def get_embedding_table(self):
"""
Returns:
float32 Tensor in shape [n_token, d_model]. The embedding lookup table.
Used for tying embeddings between input and output layers.
"""
return self.lookup_table
def get_embedding_table2(self):
"""
Returns:
float32 Tensor in shape [n_token, d_model]. The embedding lookup table.
Used for tying embeddings between input and output layers.
"""
return self.lookup_table_2
def get_initializer(self):
"""
Returns:
A tf initializer. Used to initialize variables in layers on top of XLNet.
"""
return self.initializer
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
a45c2d75d6cea18de733b89e8760a8a7a827b8f0 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/concat82/728-tideGauge.py | eefcf25d50aba7a69c82559eeee6d526db001363 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 728
y = 729
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
6d907810b817e314675bd275c3313d21111ee5a3 | 781e2692049e87a4256320c76e82a19be257a05d | /intervention/results/control_120802_1449613710_39_13.75.py | cb40cf5e2e0afecd0c7c8544b6db900f76291663 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 227 | py |
def num_common_letters(goal_word, guess):
cache = set('')
counter = 0
for i in goal_word:
if i not in cache:
cache.add(i)
for i in guess:
if i in cache:
counter += 1
cache.remove(i)
return counter | [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
869ca60250a2409dbfa492cac38092f9ed2dd74c | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /srm/651-675/660/Coversta.py | 3322d7823065e870dd37931908d60355b24de092 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #SRM660 Div1 Easy
import math,string,itertools as it,fractions,heapq,collections as collect,re,array,bisect,random,sys
class Coversta:
def place(self, a, x, y):
n, m, k = len(a), len(a[0]), len(x)
a = [map(int, list(a[i])) for i in xrange(n)]
dwr = zip(x, y)
scores = []
for w in xrange(n):
for r in xrange(m):
tmp = 0
for dw, dr in dwr:
nw, nr = w + dw, r + dr
if 0 <= nw < n and 0 <= nr < m:
tmp += a[nw][nr]
scores.append([tmp, w, r])
scores = sorted(scores, key = lambda z:z[0], reverse = True)
ans = 0
for i in xrange(min(k * k + 1, n * m)):
s, w, r = scores[i]
cover1 = set((w + dw, r + dr) for dw, dr in dwr)
for j in xrange(i + 1, min(k * k + 1, n * m)):
ss, ww, rr = scores[j]
cover2 = set((ww + dw, rr + dr) for dw, dr in dwr)
tmp = s + ss
for p, q in cover1 & cover2:
if 0 <= p < n and 0 <= q < m:
tmp -= a[p][q]
ans = max(ans, tmp)
return ans
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
4edc855bd5476a2b9bcab34e8779b09af4dec769 | 485816a0a8b86818e4f2cefec517e6316e2252d6 | /posthog/api/plugin_log_entry.py | 45e70682d86c77170aeadcbe9881f5cb8d48ac7f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abhijitghate/posthog | 3647443274aee6431e7fecf6902644a9fa7eb9d8 | 68dc4d2730600efb00d3708fb7fba70d85612760 | refs/heads/master | 2023-04-19T15:17:25.033992 | 2021-05-13T09:48:59 | 2021-05-13T09:48:59 | 279,130,099 | 1 | 0 | MIT | 2020-07-12T19:04:15 | 2020-07-12T19:04:14 | null | UTF-8 | Python | false | false | 2,275 | py | from typing import Optional
from django.utils import timezone
from rest_framework import exceptions, generics, mixins, request, serializers, status, viewsets
from rest_framework.permissions import IsAuthenticated
from posthog.api.plugin import PluginOwnershipPermission, PluginsAccessLevelPermission
from posthog.api.routing import StructuredViewSetMixin
from posthog.models.plugin import PluginLogEntry, fetch_plugin_log_entries
from posthog.permissions import ProjectMembershipNecessaryPermissions
class PluginLogEntrySerializer(serializers.ModelSerializer):
class Meta:
model = PluginLogEntry
fields = ["id", "team_id", "plugin_id", "timestamp", "source", "type", "message", "instance_id"]
read_only_fields = fields
class PluginLogEntryViewSet(StructuredViewSetMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = PluginLogEntrySerializer
permission_classes = [
IsAuthenticated,
ProjectMembershipNecessaryPermissions,
PluginsAccessLevelPermission,
PluginOwnershipPermission,
]
def get_queryset(self):
limit_raw = self.request.GET.get("limit")
limit: Optional[int]
if limit_raw:
try:
limit = int(limit_raw)
except ValueError:
raise exceptions.ValidationError("Query param limit must be omitted or an integer!")
else:
limit = None
after_raw: Optional[str] = self.request.GET.get("after")
after: Optional[timezone.datetime] = None
if after_raw is not None:
after = timezone.datetime.fromisoformat(after_raw.replace("Z", "+00:00"))
before_raw: Optional[str] = self.request.GET.get("before")
before: Optional[timezone.datetime] = None
if before_raw is not None:
before = timezone.datetime.fromisoformat(before_raw.replace("Z", "+00:00"))
parents_query_dict = self.get_parents_query_dict()
return fetch_plugin_log_entries(
team_id=parents_query_dict["team_id"],
plugin_config_id=parents_query_dict["plugin_config_id"],
after=after,
before=before,
search=self.request.GET.get("search"),
limit=limit,
)
| [
"noreply@github.com"
] | abhijitghate.noreply@github.com |
3aba8ae13b2626bfcc8c03954c7a12d1b3c28e99 | 46265d6f2804c9f74d6dc25723ec62ae349b5dc3 | /manage.py | a65d7a8de7f58dd811bb3b452a092a6fb8ae07f0 | [] | no_license | mdShakilHossainNsu2018/eCommerce_site | ea68c98f3d55ccbade29bbc29598c4d72fdccbec | 8ac90e84e7ba699f54f880c863de30806994d960 | refs/heads/master | 2022-10-26T00:56:39.944312 | 2020-06-16T07:56:04 | 2020-06-16T07:56:04 | 268,805,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eCommerce_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"shakilnsu2018@gmail.com"
] | shakilnsu2018@gmail.com |
3049a52f0e3824b7e9183c34e6fbbd82deb72726 | ecf1ce6f8b592f76c7b7c253608c1264ae0676a3 | /days/day023/bite_022_Write_a_decorator_with_argument/test_bite_022.py | cb84a294a8948552c5e32652a8d195cd737cdd88 | [] | permissive | alex-vegan/100daysofcode-with-python-course | 94e99880a50ac412e398ad209ed53796f253641f | b6c12316abe18274b7963371b8f0ed2fd549ef07 | refs/heads/master | 2021-07-20T23:05:59.721661 | 2019-01-21T16:18:25 | 2019-01-21T16:18:25 | 150,115,516 | 0 | 0 | MIT | 2018-09-24T14:28:16 | 2018-09-24T14:28:15 | null | UTF-8 | Python | false | false | 248 | py | from bite_022 import make_html
def test_make_html():
@make_html('p')
@make_html('strong')
def get_text(text='I code with PyBites'):
return text
assert get_text() == '<p><strong>I code with PyBites</strong></p>'
| [
"alex-vegan@outlook.com"
] | alex-vegan@outlook.com |
ddba2951e6d7f604b3af9174b0cd0fd2bc89d4e9 | ecc4e8c9794c8ecddfd3774a396d4a06881c1245 | /test/docker_test.py | 72d60addf7437caf5bb641bfc13d1b97b21c6640 | [
"MIT"
] | permissive | cheminfo-py/crystal_analysis_webservice | d244bd01523c1edfc428458e6dea42b063e3edbd | 444c91adb645aa99401dbb3d8f30d8b98e2343c5 | refs/heads/master | 2023-01-10T20:58:34.389784 | 2020-11-19T07:06:27 | 2020-11-19T07:06:27 | 303,760,806 | 0 | 0 | MIT | 2020-11-19T06:40:05 | 2020-10-13T16:17:15 | Python | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
"""Test if the API responds in the Docker image"""
import sys
import requests
import os
import json
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
r = requests.get("http://localhost:8091/version/")
keys = r.json().keys()
if "version" in keys:
print("OK")
else:
print("error")
sys.exit(1)
with open(os.path.join(THIS_DIR, "HKUST-1.cif"), "r") as fh:
content = fh.read()
r = requests.post(
"http://localhost:8091/topology/", data=json.dumps({"fileContent": content})
)
response = r.json()
if response["rcsrName"] == "tbo":
print("tbo found for HKUST")
sys.exit(0)
else:
print("error")
sys.exit(1)
| [
"kevin.jablonka@epfl.ch"
] | kevin.jablonka@epfl.ch |
b67776dddfb11a06e921bfc4315136dadb462d10 | e3d6f803beece2ecc2cde8de795fdd20291213ff | /nova/tests/functional/notification_sample_tests/test_aggregate.py | 61a4a20565b9da0c0ca69ba8232bfea12032390e | [
"Apache-2.0"
] | permissive | panguan737/nova | 437c1adb81f3e9ef82c28ad957144623db13ba52 | 0d177185a439baa228b42c948cab4e934d6ac7b8 | refs/heads/main | 2023-01-07T00:08:44.069599 | 2020-11-01T14:00:42 | 2020-11-01T14:00:42 | 309,332,719 | 0 | 0 | Apache-2.0 | 2020-11-02T10:17:13 | 2020-11-02T10:17:13 | null | UTF-8 | Python | false | false | 4,047 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestAggregateNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def test_aggregate_create_delete(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-create-start',
replacements={
'uuid': aggregate['uuid']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-create-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.admin_api.delete_aggregate(aggregate['id'])
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-delete-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-delete-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
def test_aggregate_add_remove_host(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
fake_notifier.reset()
add_host_req = {
"add_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], add_host_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-add_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-add_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
remove_host_req = {
"remove_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], remove_host_req)
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-remove_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-remove_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
self.admin_api.delete_aggregate(aggregate['id'])
| [
"147360410@qq.com"
] | 147360410@qq.com |
37c3c598b02fad75e178c54e31aae90a0d649116 | 8f0ce1be6cc093d962c64179eec99c7ccc20ffc4 | /fabrication/migrations/0007_fabricationprojectpage_items.py | f585382451b27a14eb873908ea552ac48e3a6fb3 | [] | no_license | dmeehan/futuregreenstudio | cf5e12c6ead8f0c7023ba09d5868749888068b72 | e6e2b7f7ffa2ed251d21e6b1d07573ab4f70782f | refs/heads/master | 2023-08-30T20:12:24.814970 | 2023-08-28T14:55:26 | 2023-08-28T14:55:26 | 89,943,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-26 04:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fabrication', '0006_auto_20210726_0440'),
]
operations = [
migrations.AddField(
model_name='fabricationprojectpage',
name='items',
field=models.CharField(blank=True, max_length=250),
),
]
| [
"dmeehan@gmail.com"
] | dmeehan@gmail.com |
eab1cf90ff876af4f0683c8f49fa175a879378ac | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /delivery/tests/__init__.py | 8a969170bf17e9fd0f8173deea33b83546ba2af9 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing detailsself.
from . import test_delivery_cost, test_delivery_stock_move
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
aef82e74b3d77b46a8daf0528299813a33884d69 | 1180c0bfe29959d95f3c131e6e839950e528d4ee | /05/bmiraski/similar_tweeters.py | 370a3f3daee4af7e42956a3ee85a630156a63fbe | [] | no_license | pybites/challenges | e3e461accd8e7f890aee8007ba5070086ef983fc | 02b77652d0901e6e06cb9b1e7cb3e59c675445c2 | refs/heads/community | 2023-08-20T18:19:02.982214 | 2022-11-17T09:23:31 | 2022-11-17T09:23:31 | 78,264,928 | 764 | 3,115 | null | 2023-07-21T05:58:19 | 2017-01-07T07:17:50 | Jupyter Notebook | UTF-8 | Python | false | false | 2,284 | py | """Compare tweet similarity from two users."""
from nltk.corpus import stopwords
import spacy
from string import ascii_lowercase
from string import digits
from string import punctuation
import sys
import usertweets
stoplist = set(stopwords.words('english'))
nlp = spacy.load('en_core_web_md')
def similar_tweeters(user1, user2):
"""Output similarity value for two different users based on 200 tweets."""
u1 = usertweets.UserTweets(user1)
u2 = usertweets.UserTweets(user2)
tt_u1 = " ".join(_word_set(
_remove_solo_words(_get_important_tweet_words(u1))))
tt_u2 = " ".join(_word_set(
_remove_solo_words(_get_important_tweet_words(u2))))
doc1 = nlp(tt_u1)
doc2 = nlp(tt_u2)
sim = doc1.similarity(doc2)
print(sim)
def _get_important_tweet_words(user):
"""Return a list of important words in user's tweets."""
tt = [[word for word in tweet.text.lower().split()]
for tweet in user._tweets]
for tweet in tt:
for word, value in enumerate(tweet):
clean_word = ""
for char in value:
if char in punctuation:
continue
elif char in digits:
continue
elif char not in ascii_lowercase:
continue
clean_word += char
tweet[word] = clean_word
return [[word for word in tweet if not word.startswith('http') and
word not in stoplist and len(word) >= 3]
for tweet in tt]
def _remove_solo_words(tt):
"""Remove words occurring once in the array."""
from collections import defaultdict
frequency = defaultdict(int)
for tweet in tt:
for token in tweet:
frequency[token] += 1
return [[token for token in tweet if frequency[token] > 1]
for tweet in tt]
def _word_set(tt):
"""Return a single list of all the words from array."""
words = []
for tweet in tt:
for word in tweet:
words.append(word)
return words
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage: {} <user1> <user2>'.format(sys.argv[0]))
sys.exit(1)
user1, user2 = sys.argv[1:3]
similar_tweeters(user1, user2)
| [
"pybites@projects.bobbelderbos.com"
] | pybites@projects.bobbelderbos.com |
0eb176292acf6b719ee3c1745b86634560ae1707 | 54da94dce244ab659c8036cafcdc1b326fbfe490 | /datoteke-s-predavanj/2016-17/02-rekurzija/rekurzija-b.py | aaf7cba7342ff8211217d6cb04f8b72ed61abd67 | [] | no_license | jakamrak/uvod-v-programiranje | 640b2738164e2026308d7e60f1478659df79cc40 | 3c05290f4f23b384ad9063880fffe208c08fc599 | refs/heads/master | 2022-07-17T16:50:18.563453 | 2020-05-18T13:54:13 | 2020-05-18T13:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | def fibonacci(n):
'''Vrne n-ti člen zaporedja 1, 1, 2, 3, 5, 8, ...'''
if n == 0:
return 1
elif n == 1:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def splosni_fibonacci(n, a, b):
'''Vrne n-ti člen zaporedja a, b, a + b, a + 2b, 2a + 3b, ...'''
if n == 0:
return a
elif n == 1:
return b
else:
return splosni_fibonacci(n - 1, b, a + b)
def vsota_stevil(n):
'''Vrne vsoto števil 1 + 2 + ... + n.'''
if n == 0:
return 0
return vsota_stevil(n - 1) + n
def vsota_stevil(n):
'''Vrne vsoto števil 1 + 2 + ... + n.'''
if n == 0:
return 0
else:
return vsota_stevil(n - 1) + n
def gcd(m, n):
'''Poišče največji skupni delitelj števil m in n.'''
if n == 0:
return m
else:
return gcd(n, m % n)
def poisci_kvadratni_koren(n, a, b, eps):
'''Na eps natančno poišče kvadratni koren števila n na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if c ** 2 < n:
return poisci_kvadratni_koren(n, c, b, eps)
else:
return poisci_kvadratni_koren(n, a, c, eps)
def poisci_koren(n, k, a, b, eps):
'''Na eps natančno poišče k-ti koren števila n na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if c ** k < n:
return poisci_koren(n, k, c, b, eps)
else:
return poisci_koren(n, k, a, c, eps)
def poisci_niclo(f, a, b, eps):
'''Na eps natančno poišče ničlo funkcije f na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if f(a) * f(c) > 0:
return poisci_niclo(f, c, b, eps)
else:
return poisci_niclo(f, a, c, eps)
| [
"matija@pretnar.info"
] | matija@pretnar.info |
f655b27fab13fb11eed3d2d3b61c3cec82c6c715 | e1dd0997239951d4d459b1ba0229493512b0b331 | /mds_py/mds-env/lib/python3.11/site-packages/cleo/loaders/factory_command_loader.py | f47ae186ab1d41b16e0ae4a0c34e9357ad678bb2 | [] | no_license | alexmy21/Octopus | bd17777cf66654c1e7959654f63ca82b716865b5 | 7844ec616376ec6cd9c1a8b73dbcad9c729557ae | refs/heads/master | 2022-12-22T22:42:29.473433 | 2022-12-21T16:52:09 | 2022-12-21T16:52:09 | 61,543,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | from typing import Callable
from typing import Dict
from typing import List
from cleo.commands.command import Command
from ..exceptions import CommandNotFoundException
from .command_loader import CommandLoader
class FactoryCommandLoader(CommandLoader):
"""
A simple command loader using factories to instantiate commands lazily.
"""
def __init__(self, factories: Dict[str, Callable]) -> None:
self._factories = factories
@property
def names(self) -> List[str]:
return list(self._factories.keys())
def has(self, name: str) -> bool:
return name in self._factories
def get(self, name: str) -> Command:
if name not in self._factories:
raise CommandNotFoundException(name)
factory = self._factories[name]
return factory()
| [
"alex.mylnikov@hitachivantara.com"
] | alex.mylnikov@hitachivantara.com |
eb20013d020a237284de499bd0d8625c37888b89 | bebba3fb1dfc13a2220f06997c4bc8da42ef8e87 | /smashlib/ipy3x/core/error.py | fdce2fe18b4821b059eb0b9aa031ff4c40673968 | [
"MIT"
] | permissive | mattvonrocketstein/smash | b48b93c3419637f615c7ac3386b04ae756e1fadc | 98acdc27ab72ca80d9a7f63a54c0d52f126a8009 | refs/heads/master | 2021-01-18T23:23:59.340206 | 2016-07-14T01:28:17 | 2016-07-14T01:28:17 | 2,813,958 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | # encoding: utf-8
"""
Global exception classes for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Min Ragan-Kelley
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Exception classes
#-----------------------------------------------------------------------------
class IPythonCoreError(Exception):
pass
class TryNext(IPythonCoreError):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation.
"""
class UsageError(IPythonCoreError):
"""Error in magic function arguments, etc.
Something that probably won't warrant a full traceback, but should
nevertheless interrupt a macro / batch file.
"""
class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
"""raw_input was requested in a context where it is not supported
For use in IPython kernels, where only some frontends may support
stdin requests.
"""
class InputRejected(Exception):
"""Input rejected by ast transformer.
Raise this in your NodeTransformer to indicate that InteractiveShell should
not execute the supplied input.
"""
| [
"matthewvonrocketstein@gmail-dot-com"
] | matthewvonrocketstein@gmail-dot-com |
966167169ec32aff8bf1faaf037b3ff555065e29 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /8oNKM4osgxYyrFtGL_19.py | 71e9c04b8fa2dd16ba98bba2536e6efa1c716407 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py |
def multiply(l):
n = []
ll = len(l)
for w in l:
i = []
ll2 = ll
while ll2 > 0:
i.append(w)
ll2 = ll2 - 1
n.append(i)
return n
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
14f0ad0a9d89f796416f432792c730a988d1b86d | 8acba76d7e7787c0b2c8270552ef5bc7eb0c7549 | /backend/todo/models.py | 10f3336c4b31594c2c1e77ee6f14a3858b46b33a | [] | no_license | harishtm/djreact | a99e91723fa92b52a5d467f01235ec0fc8eb02bb | 6bec0b89db10b255581a5a9dd1f440e40e8bf784 | refs/heads/master | 2023-01-29T04:44:28.276494 | 2020-03-13T06:54:49 | 2020-03-13T06:54:49 | 201,904,114 | 0 | 0 | null | 2023-01-04T06:57:14 | 2019-08-12T09:58:03 | JavaScript | UTF-8 | Python | false | false | 268 | py | from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
completed = models.BooleanField(default=False)
def __str__(self):
return self.title
| [
"you@example.com"
] | you@example.com |
a30ee58e97e63845760962ab45ecbdd0b688678b | 8ff12c53e31f134b9f39f59b9a6f7d4f9142cea7 | /lvlist/teacherPython/import1/module1/main.py | 6e25b061ef9cc5b6be2e48bb786ac825ab6be1d1 | [] | no_license | quhuohuo/python | 5b0a80dbec7d22a0b274e4a32d269e85d254718c | 5732c5974519da8e8919dab42b36ab0ab2c99b37 | refs/heads/master | 2021-06-13T11:41:12.356329 | 2017-04-07T08:58:05 | 2017-04-07T08:58:05 | 75,054,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/python
from module1 import A,b
from module2 import _C
s = "main-->s..."
print s
b()
m1 = A()
m1.a()
m2 = _C()
| [
"813559081@qq.com"
] | 813559081@qq.com |
82b887d7cb1e3fd721d84204edba0434de3db89f | fdf1e1f4efc51bc024311d44a2fa4524f9b88bce | /girleffect/solutions/migrations/0042_add_slider_block.py | 83edf03fd57f04e2ff14736ae6830e7b7bb42afe | [] | no_license | girleffect/girleffect.org | 8327ffd6bbd1103033c92fbd4cbe5461aa1c7f03 | 55731b1c024f207211a161fd6d3ca796beea7a61 | refs/heads/master | 2023-04-07T21:40:43.910892 | 2022-06-14T11:50:21 | 2022-06-14T11:50:21 | 112,452,828 | 1 | 2 | null | 2023-04-01T12:05:55 | 2017-11-29T09:13:18 | Python | UTF-8 | Python | false | false | 13,694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-21 14:46
from __future__ import unicode_literals
from django.db import migrations
import girleffect.utils.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailsnippets.blocks
class Migration(migrations.Migration):
dependencies = [
('solutions', '0041_auto_20171220_1706'),
]
operations = [
migrations.AlterField(
model_name='solutionpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('body_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('large_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], label='Large Text', max_length=350, required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('extendable_body', wagtail.wagtailcore.blocks.StructBlock((('body_upper', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('extend_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the extend button', max_length=80, required=False)), ('collapse_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the collapse button', max_length=80, required=False)), ('body_lower', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('quote', wagtail.wagtailcore.blocks.StructBlock((('quotes', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=True)), ('citation', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('link_block', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('drop_shadow_options', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))))), ('quote_mark_hex', wagtail.wagtailcore.blocks.CharBlock(label='Quote Mark Hex Code', max_length=7, required=False)))), icon='openquote', template='blocks/quote_block.html')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('video', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(max_length=30, required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=False)), ('youtube_embed', wagtail.wagtailembeds.blocks.EmbedBlock(help_text="Your YouTube URL goes here. Only YouTube video URLs will be accepted. The custom 'play' button will be created for valid YouTube URLs.", label='YouTube Video URL')), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Girl Effect YouTube Video')), ('carousel', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('label', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item small label, for example Our Reach', max_length=30)), ('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Carousel item large title', max_length=30)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], help_text='Carousel item text', max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))), icon='image', template='blocks/carousel_block.html')), ('slider', wagtail.wagtailcore.blocks.StructBlock((('slider_delay', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Enter the milliseconds of the delay between each slide', required=False)), ('slider_items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('overview_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item overview title', max_length=80, required=False)), ('overview_title_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('overview_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item overview text', max_length=255, required=False)), ('overview_text_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('textbox_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item textbox title', max_length=30, required=False)), ('textbox_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item textbox text', max_length=75, required=False)), ('textbox_link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))))))))), ('media_text_overlay', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Appears above the module.', label='Title Text', max_length=25, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('logo', wagtail.wagtailimages.blocks.ImageChooserBlock(label='Title Logo', required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Full Width Media with Text Overlay')), ('list_block', wagtail.wagtailcore.blocks.StructBlock((('list_block', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', max_length=250, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))))), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('link_row', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False)))), icon='link', template='blocks/inline_link_block.html')), ('statistic', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('statistics', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.Statistic))), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Statistic Block')), ('call_to_action', wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.CallToActionSnippet, template='blocks/call_to_action.html')))),
),
]
| [
"karl@kaed.uk"
] | karl@kaed.uk |
d63abbb48211e4004a4441ab8f36cb5cc3a048f2 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Matriz_esferica_20200707115539.py | 2a05f90d9392c0a9967f9cb280e6bcd2b930b4d3 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import numpy as np
import random
#regras de movimentacao
class Matriz_esferica():
def __init__(self, tamanho):
self.tamanho = tamanho
def get_vizinho_esquerda(self, ponto):
pass
def get_vizinho_direita(self, ponto):
pass
def get_vizinho_cima(self, ponto):
pass
def get_vizinho_baixo(self, ponto):
pass
def checa_limite_matriz(self, indice):
if indice < 0:
return self.tamanho - indice
elif indice > self.tamanho - 1:
return indice - self.tamanho - 1
else:
return indice
#devolve um ponto aleatório vizinho da matriz a partir do ponto especificado em uma das 8 direções
def passo(self, indice_x: int, indice_y: int, tamanho_max_passo: int):
passo_x = random.randint(-tamanho_max_passo, tamanho_max_passo)
passo_y = random.randint(-tamanho_max_passo, tamanho_max_passo)
return self.checa_limite_matriz(indice_x + passo_x), self.checa_limite_matriz(indice_y + passo_y)
def passo(self, indice_x: int, indice_y: int):
return self.passo(self, indice_x, indice_y, 1)
matriz = Matriz_esferica(10)
matriz.passo(0,0)
print(matriz.passo(0,0)) | [
"eduardo_dut@edu.unifor.br"
] | eduardo_dut@edu.unifor.br |
4171668175165d80aa71d270e8c75e6894a51269 | f654f5f07dd8109c0ee31ba89dd4804e6b288343 | /src/programy/clients/xmpp.py | 22f1b1c3347d45f3276d071306235db382006fa7 | [
"MIT"
] | permissive | sprinteroz/program-y | 3d1f5f28e4f3be770705d4bef15410b8b78f19da | 454c6bde225dce7c3fb01c549d46249248caf7b5 | refs/heads/master | 2021-01-19T16:05:25.636700 | 2017-08-22T03:56:33 | 2017-08-22T03:56:33 | 100,986,551 | 1 | 0 | null | 2017-08-21T19:43:43 | 2017-08-21T19:43:43 | null | UTF-8 | Python | false | false | 3,522 | py | """
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import sleekxmpp
from programy.clients.client import BotClient
from programy.config.sections.client.xmpp import XmppConfiguration
class XmppClient(sleekxmpp.ClientXMPP):
def __init__(self, bot_client, jid, password):
self.bot_client = bot_client
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
if msg['type'] in ('chat', 'normal'):
question = msg['body']
userid = msg['from']
response = self.bot_client.bot.ask_question(userid, question)
msg.reply(response).send()
class XmppBotClient(BotClient):
def __init__(self, argument_parser=None):
BotClient.__init__(self, argument_parser)
def set_environment(self, env='xmpp'):
self.bot.brain.properties.add_property("env", env)
def get_client_configuration(self):
return XmppConfiguration()
def run(self):
logging.debug("%s App Running.."%self.bot.brain.properties.predicate("env"))
username = self.bot.license_keys.get_key("XMPP_USERNAME")
password = self.bot.license_keys.get_key("XMPP_PASSWORD")
server = self.configuration.client_configuration.server
port = self.configuration.client_configuration.port
self._client = XmppClient(self, username, password)
if self.configuration.client_configuration.xep_0030 is True:
self._client.register_plugin('xep_0030')
if self.configuration.client_configuration.xep_0004 is True:
self._client.register_plugin('xep_0004')
if self.configuration.client_configuration.xep_0060 is True:
self._client.register_plugin('xep_0060')
if self.configuration.client_configuration.xep_0199 is True:
self._client.register_plugin('xep_0199')
if self._client.connect((server, port)):
print("Connected, running...")
self._client.process(block=True)
else:
print("Failed to connect, exiting...")
if __name__ == '__main__':
def run():
print("Loading Xmpp client, please wait. See log output for progress...")
xmpp_app = XmppBotClient()
xmpp_app.run()
run() | [
"keith@keithsterling.com"
] | keith@keithsterling.com |
183383359d833fb0a64220802c19ab444a1e24ba | d8e4ffa9daaeca95cf52d3a9e3ca1cff865357bf | /section 7/5. 동전분배하기/aa.py | 04a5b8df849fde24fe520978eff8954f916f87e2 | [] | no_license | mike-bskim/coding_test | ed4e3790b5b41ed62b922ba9d04149d1cd335c84 | 430445c3ad0ddb4435a5fd244ba3a86677cd00ff | refs/heads/master | 2023-05-06T00:04:24.090152 | 2021-05-13T19:33:01 | 2021-05-13T19:33:01 | 365,245,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import sys
# sys.stdin=open('in1.txt', 'rt')
# input=sys.stdin.readline
def DFS(L):
global min1
# if max(money)> total/2: # 한개의 숫자가 다른 숫자보다 월등히 크면 로직오류 발생.
# return
if L == n:
dif = max(money) - min(money)
if (dif) < min1:
tmp = set(money)
if len(tmp) == 3:
min1 = dif
# print(money, min1)
else:
for i in range(3):
money[i]+=coin[L]
DFS(L+1)
money[i]-=coin[L]
if __name__ == '__main__':
n = int(input())
coin=list()
money=[0]*3
min1=2147000000
x=list()
for _ in range(n):
coin.append(int(input()))
total=sum(coin)
# print(n, coin, total)
DFS(0)
print(min1)
| [
"mike-bskim@gmail.com"
] | mike-bskim@gmail.com |
e27a4dbde2694d7944a84c853f4374204a7e4703 | cc896a233121c4dc158210fae0588e7bdb63b9ff | /rtcbproj/rtcb/authentication/schema.py | b5af07989b4abed79691dda4ba9e29b3de4ded4c | [
"Apache-2.0"
] | permissive | arsenico13/rtcb-backend | 2b99a0fbb6a07bb0a9ef6652603c6e914ccd4525 | f097eae54a12ba4f3983869fef627ea1d55a37d1 | refs/heads/master | 2023-04-07T04:30:33.874569 | 2020-10-27T00:06:16 | 2020-10-27T00:06:16 | 151,688,197 | 0 | 0 | null | 2018-10-05T07:54:18 | 2018-10-05T07:54:17 | null | UTF-8 | Python | false | false | 554 | py | import graphene
from graphene_django import DjangoObjectType
from .models import User as UserModel
class ChoicesRoles(graphene.Enum):
Defender = 'D'
Striker = 'S'
class User(DjangoObjectType):
role = ChoicesRoles()
class Meta:
model = UserModel
interfaces = (graphene.Node, )
exclude_fields = ['password']
id_db = graphene.ID()
def resolve_id_db(self, info, **input):
""" Ritorna l'ID del db """
return self.id
def resolve_role(self, info, **kwargs):
return self.role
| [
"andrea.cecchi85@gmail.com"
] | andrea.cecchi85@gmail.com |
b501b9b296b86bcc20f10667d6104cff0e7b9bf8 | a1f2df675cfc595b15f1ca9390b7517989f2d4e0 | /testCase/performance/contract_rank/result_contract_rank_month_case2.py | 8640bf96de596e3ca582bfc88f340de7f1a4d6d7 | [] | no_license | GGGYB/crm | d4def2f1abc89451e1c4b11b89ef100a842ed745 | 61932466dd0ac299adc661383d506389d5f0f8e7 | refs/heads/master | 2022-04-15T21:29:48.539311 | 2020-04-14T10:23:41 | 2020-04-14T10:23:41 | 255,575,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,541 | py | import datetime
import os
import codecs
import operator
from commons.const import const
from testCase.roles import testGetRoles
from commons import filters
from testCase.login import testLogin as login
from testCase.performance import result
from testCase.contracts import testContract as contracts
from testCase.contracts import testSettingsContractsApproval as ContractsApproval
from testCase.contracts import testAddContract as AddContract
from testCase.contracts import testDeleteContract as DeleteContract
class result_contract_rank_month:
def __init__(self):
self.base_url = const.BASE_URL
self.base_url2 = const.SIGN_IN_BASE_URL
self.csrf = ''
self.cookie = ''
self.dimension1 = 'month'
# self.dimension2 = 'department'
# self.userinfo_super = {'username': '13701649175', 'password': 'Ik123456','role':'超管'}
self.userinfo_super = {'username': '15000249334 ', 'password': 'Ik123456','role':'超管'}
# 上月 1月 3月 上季度 本季度 下季度 本年 上半年 下半年 第三季度 第四季度
self.date_list = [['2018-12-01','2018-12-31'],['2019-01-01','2019-01-31'],['2019-03-01','2019-03-31'],['2018-10-01','2018-12-31'],['2019-01-01','2019-03-31'],['2019-04-01','2019-06-30'],['2019-01-1','2019-12-31'],['2019-01-01','2019-06-30'],['2019-07-01','2019-12-31'],['2019-04-01','2019-08-31'],['2019-07-01','2019-09-30'],['2019-10-01','2019-12-31']]
self.title = 'case0按用户1按月查看上月合同回款金额'
self.expected = ([['"设计部子部门-用户4"', '"产品部-用户3"', '"设计部-用户1"', '"主辅部门-用户6"', '"设计部-用户2"', '"销售部-用户5"'],
['', '', '', '', '', '']],
[['"设计部子部门-用户4"', '"产品部-用户3"', '"设计部-用户1"', '"主辅部门-用户6"', '"设计部-用户2"', '"销售部-用户5"'],
[['', '', '', '', '', '']],
['"设计部子部门-用户4"', '"产品部-用户3"', '"设计部-用户1"', '"主辅部门-用户6"', '"设计部-用户2"', '"销售部-用户5"'],
['', '', '', '', '', '']],
[['"设计部子部门-用户4"', '"产品部-用户3"', '"设计部-用户1"', '"主辅部门-用户6"', '"设计部-用户2"', '"销售部-用户5"'],
['', '', '', '', '', '']]
)
self.title1 = 'case1按用户1按月查看1月合同回款金额'
self.expected1 = ((['"2019一月"' ],
[ '11440.0']),
(['"2019一月"' ],
[ '7200.0']),
(['"2019一月"' ],
['560.0']),
(['"2019一月"' ],
['640.0'])
)
self.title2 = 'case2按用户1按月查看3月合同回款金额'
self.expected2 = ((['"2019三月"' ],
[ '22880.0']),
(['"2019三月"' ],
[ '14400.0']),
(['"2019三月"' ],
['1120.0']),
(['"2019三月"' ],
['1280.0'])
)
self.title3= 'case3按用户1按月查看上季度合同回款金额'
self.expected3 = (['"设计部子部门-用户4"', '"产品部-用户3"', '"设计部-用户1"', '"主辅部门-用户6"', '"设计部-用户2"', '"销售部-用户5"'],
['', '', '', '', '', ''])
self.title4 = 'case4按用户1按月查看1 2 3季度合同回款金额'
self.expected4 = ((['"2019一月"', '"2019三月"' ],
[ '11440.0','22880.0']),
(['"2019一月"', '"2019三月"' ],
[ '7200.0', '14400.0']),
(['"2019一月"', '"2019三月"' ],
['560.0', '1120.0']),
(['"2019一月"', '"2019三月"' ],
['640.0', '1280.0'])
)
self.title5 = 'case5按用户1按月查看4 5 6季度合同回款金额'
self.expected5 = ((['"2019五月"'],
['34320.0']),
(['"2019五月"'],
['21600.0']),
(['"2019五月"'],
['1680.0']),
(['"2019五月"'],
['1920.0'])
)
self.title6 = 'case6按用户1按月查看本年合同回款金额'
self.expected6 = ((['"2019一月"', '"2019三月"', '"2019五月"', '"2019七月"', '"2019九月"', '"2019十一月"'],
['11440.0', '22880.0', '34320.0', '45760.0', '57200.0', '68640.0']),
(['"2019一月"', '"2019三月"', '"2019五月"', '"2019七月"', '"2019九月"', '"2019十一月"'],
['7200.0', '14400.0', '21600.0', '28800.0', '36000.0', '43200.0']),
(['"2019一月"', '"2019三月"', '"2019五月"', '"2019七月"', '"2019九月"', '"2019十一月"'],
['560.0', '1120.0', '1680.0', '2240.0', '2800.0', '3360.0']),
(['"2019一月"', '"2019三月"', '"2019五月"', '"2019七月"', '"2019九月"', '"2019十一月"'],
['640.0', '1280.0', '1920.0', '2560.0', '3200.0', '3840.0'])
)
self.title7 = 'case7按用户1按月查看上半年合同回款金额'
self.expected7 = ((['"2019一月"', '"2019三月"', '"2019五月"'],
['11440.0', '22880.0', '34320.0']),
(['"2019一月"', '"2019三月"', '"2019五月"'],
['7200.0', '14400.0', '21600.0']),
(['"2019一月"', '"2019三月"', '"2019五月"'],
['560.0', '1120.0', '1680.0']),
(['"2019一月"', '"2019三月"', '"2019五月"'],
['640.0', '1280.0', '1920.0'])
)
self.title8 = 'case8按用户1查看下半年合同回款金额'
self.expected8 = ((['"2019七月"', '"2019九月"', '"2019十一月"'],
['45760.0', '57200.0', '68640.0']),
(['"2019七月"', '"2019九月"', '"2019十一月"'],
['28800.0', '36000.0', '43200.0']),
(['"2019七月"', '"2019九月"', '"2019十一月"'],
['2240.0', '2800.0', '3360.0']),
(['"2019七月"', '"2019九月"', '"2019十一月"'],
['2560.0', '3200.0', '3840.0'])
)
self.title9 = 'case9按用户1按月查看04--08合同回款金额'
self.expected9 = ((['"2019五月"', '"2019七月"'],
['34320.0', '45760.0']),
(['"2019五月"', '"2019七月"'],
['21600.0', '28800.0']),
(['"2019五月"', '"2019七月"'],
['1680.0', '2240.0']),
(['"2019五月"', '"2019七月"'],
['1920.0', '2560.0'])
)
self.title10 = 'case9用户按月1按月查看 7 8 9合同回款金额'
self.expected10 = ((['"2019七月"', '"2019九月"'],
['45760.0', '57200.0']),
(['"2019七月"', '"2019九月"'],
['28800.0', '36000.0']),
(['"2019七月"', '"2019九月"'],
['2240.0', '2800.0']),
(['"2019七月"', '"2019九月"'],
['2560.0', '3200.0'])
)
self.title11 = 'case9按用户1按月查看10 11 12合同回款金额'
self.expected11 = ((['"2019十一月"'],
['68640.0']),
(['"2019十一月"'],
['43200.0']),
(['"2019十一月"'],
['3360.0']),
(['"2019十一月"'],
['3840.0'])
)
pass
def case_1(self,title,expected,data1,data2,dimension1):
#按用户查看1月合同金额
self.current_case =title
_login = login.Login()
_login.login(self.userinfo_super['username'], self.userinfo_super['password'], self.userinfo_super['role'])
self.cookie = _login.cookie
self.csrf = _login.csrf
_Roles =testGetRoles.GetRole(self.cookie,self.csrf)
list=['organization_own','self_and_subordinate_department_own','self_department_own','self_own']
for i in range(4):
_Roles.ding_set_data_authority(list[i])
# print('==========')
# print (expected[i])
# print ('============')
_result=result.result(self.cookie,self.csrf)
Actual_result =_result.gole_quarter(data1,data2,type='contract_rank',dimension=dimension1)
print (Actual_result)
test_result=operator.eq(expected[i], Actual_result)
print (test_result)
if test_result== True:
f = codecs.open('contract_rank_month.txt', 'a', 'utf-8')
a = str(self.current_case + ': ' "is right "+str(datetime.datetime.now()))
f.write(a + '\n')
else:
f = codecs.open('contract_rank_month.txt', 'a', 'utf-8')
a = str(i)+str(self.current_case + ': ' "is wrong, expected_result:'\n'" + str(expected[i]) +"Actual_result:'\n'"+ str(Actual_result)+','+str(datetime.datetime.now()))
f.write(a + '\n')
if __name__ == '__main__':
_result_amount =result_contract_rank_month()
# os.remove('contract_rank_month.txt')
# 上月
# _result_amount.case_1(title=_result_amount.title,expected=_result_amount.expected,data1=_result_amount.date_list[0][0],data2=_result_amount.date_list[0][1],dimension1='user')
# 1月----
_result_amount.case_1(title=_result_amount.title1, expected=_result_amount.expected1,
data1=_result_amount.date_list[1][0], data2=_result_amount.date_list[1][1], dimension1=_result_amount.dimension1)
# # 3月
_result_amount.case_1(title=_result_amount.title2, expected=_result_amount.expected2,
data1=_result_amount.date_list[2][0], data2=_result_amount.date_list[2][1],
dimension1=_result_amount.dimension1)
# #
# # # 上季度
# # _result_amount.case_1(title=_result_amount.title3, expected=_result_amount.expected3,
# # data1=_result_amount.date_list[3][0], data2=_result_amount.date_list[3][1],
# # dimension1=_result_amount.dimension1)
# #
# # #1 2 3
_result_amount.case_1(title=_result_amount.title4, expected=_result_amount.expected4,
data1=_result_amount.date_list[4][0], data2=_result_amount.date_list[4][1],
dimension1=_result_amount.dimension1)
# #
# # # 4 5 6
_result_amount.case_1(title=_result_amount.title5, expected=_result_amount.expected5,
data1=_result_amount.date_list[5][0], data2=_result_amount.date_list[5][1],
dimension1=_result_amount.dimension1)
# #
# # #本年
_result_amount.case_1(title=_result_amount.title6, expected=_result_amount.expected6,
data1=_result_amount.date_list[6][0], data2=_result_amount.date_list[6][1],
dimension1=_result_amount.dimension1)
#
# # #上半年
_result_amount.case_1(title=_result_amount.title7, expected=_result_amount.expected7,
data1=_result_amount.date_list[7][0], data2=_result_amount.date_list[7][1],
dimension1=_result_amount.dimension1)
# #
# # # #下半年
_result_amount.case_1(title=_result_amount.title8, expected=_result_amount.expected8,
data1=_result_amount.date_list[8][0], data2=_result_amount.date_list[8][1],
dimension1=_result_amount.dimension1)
# # # 2019-04-01--2019-08-31
_result_amount.case_1(title=_result_amount.title9, expected=_result_amount.expected9,
data1=_result_amount.date_list[9][0], data2=_result_amount.date_list[9][1],
dimension1=_result_amount.dimension1)
# # # # #
# # # #7 8 9
_result_amount.case_1(title=_result_amount.title10, expected=_result_amount.expected10,
data1=_result_amount.date_list[10][0], data2=_result_amount.date_list[10][1],
dimension1=_result_amount.dimension1)
# # # # #
# # # #10 11 12
_result_amount.case_1(title=_result_amount.title11, expected=_result_amount.expected11,
data1=_result_amount.date_list[11][0], data2=_result_amount.date_list[11][1],
dimension1=_result_amount.dimension1)
# # # #
| [
"nlnongling@163.com"
] | nlnongling@163.com |
6c856cdbc50ea95865105c3aa077d96b3ea9ad24 | 56ab1f0aa30dc0357e84fa11640c9353fd5deeed | /set7/52/set7_pro52.py | 01d24fe9296d4e88110b36a1d56cb8993b0cb214 | [] | no_license | zjwzcn07/cryptopals | c9c11a7627222291e7f5c7a2cfffc4a55b548dc5 | d44c9e8b8f1527e244de6a5425b8d4f46ef98e49 | refs/heads/master | 2021-06-10T13:35:48.517737 | 2016-11-30T11:29:59 | 2016-11-30T11:29:59 | 74,258,995 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import hmac
import sys
def bad_hash_block(msg,iv):
mac = str2bin(hmac.new(iv,msg).hexdigest())[:16]
#print mac
return mac
def str2bin(string):
string_bin = ""
for x in string:
the_bin = bin(ord(x))[2:]
if((8 - len(the_bin))>0):
the_bin = '0'*(8 - len(the_bin)) + the_bin
string_bin = string_bin + the_bin
return string_bin
def padding(msg):
over_pad = len(msg)%16
msg = msg + chr(1) + chr(0)*((over_pad - 2)%16) + chr(len(msg))
return msg
def Merkle_Damgard(msg,iv):# iv:16 Byte blank_length:16 Byte
m_block = []
if(len(iv)<8):
iv = '0'*(8 - len(iv)) + iv
msg = padding(msg)
for m in (m_block.append(msg[x:x + 16]) for x in xrange(0,len(msg), 16)):
iv = bad_hash_block(m,iv)
print iv
return iv#mac
def find_a_collision(iv):
H = {}
collision_space1 = "abcdefghijklmnop"
collision_space2 = "ABCDEFGHIJKLMNOP"
for x in xrange(0,2**16):
choice_base = bin(x)[2:]
if(len(choice_base)<16):
choice_base = '0'*(16 - len(choice_base)) + choice_base
test_string = ''
for index,x in enumerate(choice_base):
if (x == '0'):
test_string = test_string + collision_space1[index]
else:
test_string = test_string + collision_space2[index]
h = bad_hash_block(test_string,iv)
if(H.has_key(h)):
#print (h,test_string,H[h])
return (h,test_string,H[h])
H[h] = test_string
print "cant find a collision"
sys.exit(0)
def extend_collision(iv,collision):
h,co1,co2 = find_a_collision(iv)
return h, [x + co for x in collision for co in [co1, co2]]
def Merkle_Damgard_collision(iv,num):
collision = []
iv,co1,co2 = find_a_collision(iv)
collision = [co1,co2]
print "The first collision:"
print collision
for i in xrange(0,num):
iv,collision = extend_collision(iv,collision)
print "The " + str(i + 1) + " times extend:"
print collision
print len(collision)
return collision
if __name__ == '__main__':
iv = "0000000000000000"
#Merkle_Damgard("aaaaaaa",iv)
collision = Merkle_Damgard_collision(iv,3)
for x in collision:
Merkle_Damgard(x,iv) | [
"1106911190@qq.com"
] | 1106911190@qq.com |
21ce8f7efc7d9750a693501c0a68db78036b44c3 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/_operations_mixin.py | 5631cad2bfb87a3b4ce5a3932a1c06cdb5d2fd42 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,410 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._serialization import Serializer, Deserializer
from io import IOBase
from typing import Any, IO, Optional, Union
from . import models as _models
class SubscriptionClientOperationsMixin(object):
def check_resource_name(
self,
resource_name_definition: Optional[Union[_models.ResourceName, IO]] = None,
**kwargs: Any
) -> _models.CheckResourceNameResult:
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type. Is either a ResourceName type or a IO type. Default value is None.
:type resource_name_definition:
~azure.mgmt.resource.subscriptions.v2022_12_01.models.ResourceName or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2022_12_01.models.CheckResourceNameResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('check_resource_name')
if api_version == '2016-06-01':
from .v2016_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2019-11-01':
from .v2019_11_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2022-12-01':
from .v2022_12_01.operations import SubscriptionClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'check_resource_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._config.api_version = api_version
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.check_resource_name(resource_name_definition, **kwargs)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
73d0d1b1c8b75b5f329b134be4f9f52dae01419b | bc5dd7be84a43ec53f8e4215761badb9b61a13ad | /kurs_2/vertualenv/Lib/site-packages/django/contrib/messages/storage/base.py | d1f9b274323aa72fc4ab49364e624f39becd2095 | [] | no_license | MaximMak/DL_Academy_Lessons | ef4758be02e43954748031ac95c970077f71cd7e | 427576859657e88fd81683494397af3df920c674 | refs/heads/master | 2023-01-29T19:53:11.650096 | 2020-12-13T21:40:58 | 2020-12-13T21:40:58 | 276,397,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,679 | py | from django.conf import settings
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message:
"""
Represent an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepare the message for serialization by forcing the ``message``
and ``extra_tags`` to str in case they are lazy translations.
"""
self.message = str(self.message)
self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None
def __eq__(self, other):
if not isinstance(other, Message):
return NotImplemented
return self.level == other.level and self.message == other.message
def __str__(self):
return str(self.message)
@property
def tags(self):
return ' '.join(tag for tag in [self.extra_tags, self.level_tag] if tag)
@property
def level_tag(self):
return LEVEL_TAGS.get(self.level, '')
class BaseStorage:
"""
This is the base profiles for temporary message storage.
This is not a complete class; to be a usable storage profiles, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Return a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieve a list of stored messages. Return a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the profiles was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages and return a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepare a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Store all unread messages.
If the profiles has yet to be iterated, store previously stored messages
again. Otherwise, only store messages added after the last iteration.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queue a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Return the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Set a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| [
"54116778+MaximMak@users.noreply.github.com"
] | 54116778+MaximMak@users.noreply.github.com |
4e6c5b0a5c1fc42f8b453fe5a38fd77ed0d32ef8 | 1f8d05b774271e6c9321b23093baa1dda3dae650 | /utils_app/website_settings.py | 1ad5af2e18bf0beded6e8b31a84024909eaa29ea | [] | no_license | pockerman/python_lms | 5ca2f0d90692bf6c9bc7c00d0ffd59db084ead3f | 327dbbd9e5ad96c143c8abe60cf8a2cbc3d5dac0 | refs/heads/master | 2020-07-14T01:07:27.826127 | 2019-08-29T16:08:37 | 2019-08-29T16:08:37 | 205,197,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py |
site_index_title='Ustdy'
navigation_elements = ["Join","Services","Courses","Contact"]
main_page_buttons =["Courses Catalog","Request Info","Tests Catalog","Library","Learn More"]
nav_page_title=["Ustdy-Join","Ustdy-Services","Ustdy-Courses","Ustdy-Contact"]
| [
"a.giavaras@gmail.com"
] | a.giavaras@gmail.com |
6216ac8e2467c29bdefb4a44159e403691e89f88 | 9a5438bdb8e84d0167ddea5458a7f729fdd54121 | /MetaDataApi/tests/test_utils/test_json_utils.py | d364134e98feae48c1bf4083a71f2715fcc53c32 | [] | no_license | Grusinator/MetaDataApi | 740fd2be4cb97b670f827a071a0ac8c50f79f8ff | 081f881c735466ed1dbbd68646b821299c5168f8 | refs/heads/master | 2023-07-25T23:58:22.179717 | 2020-03-15T09:36:05 | 2020-03-15T09:36:05 | 149,087,967 | 5 | 1 | null | 2023-07-25T15:39:12 | 2018-09-17T07:45:09 | CSS | UTF-8 | Python | false | false | 366 | py | import django
from django.test import TransactionTestCase
class TestJsonUtils(TransactionTestCase):
# Django requires an explicit setup() when running tests in PTVS
@classmethod
def setUpClass(cls):
super(TestJsonUtils, cls).setUpClass()
django.setup()
def test_identify_json_data_sample(self):
self.assertEqual(1 + 1, 2)
| [
"grusinator@gmail.com"
] | grusinator@gmail.com |
c8b71e3b91a871f2dc99627693d550ff4be408fe | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2017_09_01/models/express_route_circuits_routes_table_summary_list_result.py | db62dab03a898b918438cd3d316dd87a3d2c32b5 | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 1,251 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: A list of the routes table.
:type value: list of :class:`ExpressRouteCircuitRoutesTableSummary
<azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitRoutesTableSummary>`
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
1d5a9de4a037605a35872b62a9cd7204b3ab99ae | a12d46e439d81ba0a60776f46eb60b1a158b8ac1 | /pfrl/utils/__init__.py | d632886211326ad92635ea4bae4dc51d21237a3a | [
"MIT"
] | permissive | TMats/pfrl | 1b2a529aec210b0e3064182433797460a64f79f5 | 9c591657a5433b8f23c589e01751408ee5d1dde5 | refs/heads/master | 2022-12-21T01:27:07.625802 | 2020-10-07T02:32:20 | 2020-10-07T02:32:20 | 284,470,130 | 0 | 0 | MIT | 2020-08-02T13:46:29 | 2020-08-02T13:46:28 | null | UTF-8 | Python | false | false | 548 | py | from pfrl.utils.batch_states import batch_states # NOQA
from pfrl.utils.conjugate_gradient import conjugate_gradient # NOQA
from pfrl.utils import env_modifiers # NOQA
from pfrl.utils.is_return_code_zero import is_return_code_zero # NOQA
from pfrl.utils.random_seed import set_random_seed # NOQA
from pfrl.utils.pretrained_models import download_model # NOQA
from pfrl.utils.contexts import evaluating # NOQA
from pfrl.utils.stoppable_thread import StoppableThread # NOQA
from pfrl.utils.clip_l2_grad_norm import clip_l2_grad_norm_ # NOQA
| [
"muupan@gmail.com"
] | muupan@gmail.com |
ba176ed298b23be1213d8b59e36b441d5f964a2c | 235c0b75028efccd508f378032935374444593c6 | /allennlp_semparse/fields/knowledge_graph_field.py | 669edeabb01190bec2bf5999f39c7f822a14cd83 | [
"Apache-2.0"
] | permissive | pdasigi/allennlp-semparse | b1c2ab33ed16e3172d5b8cd7fbfc91683f2eae14 | 843c9e5a4d15f449c8f11e6c08940d3de3e2a8c7 | refs/heads/master | 2020-07-22T23:26:32.464901 | 2019-08-28T23:01:36 | 2019-08-28T23:01:36 | 207,366,708 | 0 | 0 | Apache-2.0 | 2019-09-09T17:34:27 | 2019-09-09T17:34:27 | null | UTF-8 | Python | false | false | 23,779 | py | """
``KnowledgeGraphField`` is a ``Field`` which stores a knowledge graph representation.
"""
from typing import Callable, Dict, List, Set
from collections import defaultdict
import editdistance
from overrides import overrides
import torch
from allennlp.common import util
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.field import Field
from allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util as nn_util
from allennlp_semparse.common.knowledge_graph import KnowledgeGraph
TokenList = List[TokenType] # pylint: disable=invalid-name
class KnowledgeGraphField(Field[Dict[str, torch.Tensor]]):
"""
A ``KnowledgeGraphField`` represents a ``KnowledgeGraph`` as a ``Field`` that can be used in a
``Model``. For each entity in the graph, we output two things: a text representation of the
entity, handled identically to a ``TextField``, and a list of linking features for each token
in some input utterance.
The output of this field is a dictionary::
{
"text": Dict[str, torch.Tensor], # each tensor has shape (batch_size, num_entities, num_entity_tokens)
"linking": torch.Tensor # shape (batch_size, num_entities, num_utterance_tokens, num_features)
}
The ``text`` component of this dictionary is suitable to be passed into a
``TextFieldEmbedder`` (which handles the additional ``num_entities`` dimension without any
issues). The ``linking`` component of the dictionary can be used however you want to decide
which tokens in the utterance correspond to which entities in the knowledge graph.
In order to create the ``text`` component, we use the same dictionary of ``TokenIndexers``
that's used in a ``TextField`` (as we're just representing the text corresponding to each
entity). For the ``linking`` component, we use a set of hard-coded feature extractors that
operate between the text corresponding to each entity and each token in the utterance.
Parameters
----------
knowledge_graph : ``KnowledgeGraph``
The knowledge graph that this field stores.
utterance_tokens : ``List[Token]``
The tokens in some utterance that is paired with the ``KnowledgeGraph``. We compute a set
of features for linking tokens in the utterance to entities in the graph.
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We'll use this ``Tokenizer`` to tokenize the text representation of each entity.
token_indexers : ``Dict[str, TokenIndexer]``
Token indexers that convert entities into arrays, similar to how text tokens are treated in
a ``TextField``. These might operate on the name of the entity itself, its type, its
neighbors in the graph, etc.
feature_extractors : ``List[str]``, optional
Names of feature extractors to use for computing linking features. These must be
attributes of this object, without the first underscore. The feature extraction functions
are listed as the last methods in this class. For example, to use
:func:`_exact_token_match`, you would pass the string ``exact_token_match``. We will add
an underscore and look for a function matching that name. If this list is omitted, we will
use all available feature functions.
entity_tokens : ``List[List[Token]]``, optional
If you have pre-computed the tokenization of the table text, you can pass it in here. The
must be a list of the tokens in the entity text, for each entity in the knowledge graph, in
the same order in which the knowledge graph returns entities.
linking_features : ``List[List[List[float]]]``, optional
If you have pre-computed the linking features between the utterance and the table text, you
can pass it in here.
include_in_vocab : ``bool``, optional (default=True)
If this is ``False``, we will skip the ``count_vocab_items`` logic, leaving out all table
entity text from the vocabulary computation. You might want to do this if you have a lot
of rare entities in your tables, and you see the same table in multiple training instances,
so your vocabulary counts get skewed and include too many rare entities.
max_table_tokens : ``int``, optional
If given, we will only keep this number of total table tokens. This bounds the memory
usage of the table representations, truncating cells with really long text. We specify a
total number of tokens, not a max cell text length, because the number of table entities
varies.
"""
def __init__(self,
knowledge_graph: KnowledgeGraph,
utterance_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
tokenizer: Tokenizer = None,
feature_extractors: List[str] = None,
entity_tokens: List[List[Token]] = None,
linking_features: List[List[List[float]]] = None,
include_in_vocab: bool = True,
max_table_tokens: int = None) -> None:
self.knowledge_graph = knowledge_graph
self._tokenizer = tokenizer or WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True))
if not entity_tokens:
entity_texts = [knowledge_graph.entity_text[entity].lower()
for entity in knowledge_graph.entities]
# TODO(mattg): Because we do tagging on each of these entities in addition to just
# tokenizations, this is quite slow, and about half of our data processing time just
# goes to this (~15 minutes when there are 7k instances). The reason we do tagging is
# so that we can add lemma features. If we can remove the need for lemma / other
# hand-written features, like with a CNN, we can cut down our data processing time by a
# factor of 2.
self.entity_texts = self._tokenizer.batch_tokenize(entity_texts)
else:
self.entity_texts = entity_tokens
self.utterance_tokens = utterance_tokens
self._token_indexers: Dict[str, TokenIndexer] = token_indexers
self._include_in_vocab = include_in_vocab
self._indexed_entity_texts: Dict[str, TokenList] = None
self._max_table_tokens = max_table_tokens
feature_extractors = feature_extractors if feature_extractors is not None else [
'number_token_match',
'exact_token_match',
'contains_exact_token_match',
'lemma_match',
'contains_lemma_match',
'edit_distance',
'related_column',
'related_column_lemma',
'span_overlap_fraction',
'span_lemma_overlap_fraction',
]
self._feature_extractors: List[Callable[[str, List[Token], Token, int, List[Token]], float]] = []
for feature_extractor_name in feature_extractors:
extractor = getattr(self, '_' + feature_extractor_name, None)
if not extractor:
raise ConfigurationError(f"Invalid feature extractor name: {feature_extractor_name}")
self._feature_extractors.append(extractor)
if not linking_features:
# For quicker lookups in our feature functions, we'll additionally store some
# dictionaries that map entity strings to useful information about the entity.
self._entity_text_map: Dict[str, List[Token]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_map[entity] = entity_text
self._entity_text_exact_text: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_exact_text[entity] = set(e.text for e in entity_text)
self._entity_text_lemmas: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_lemmas[entity] = set(e.lemma_ for e in entity_text)
self.linking_features = self._compute_linking_features()
else:
self.linking_features = linking_features
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._include_in_vocab:
for indexer in self._token_indexers.values():
for entity_text in self.entity_texts:
for token in entity_text:
indexer.count_vocab_items(token, counter)
@overrides
def index(self, vocab: Vocabulary):
self._indexed_entity_texts = {}
for indexer_name, indexer in self._token_indexers.items():
indexer_arrays: Dict[str, List] = defaultdict(list)
for entity_text in self.entity_texts:
for index_name, indexed in indexer.tokens_to_indices(entity_text, vocab, indexer_name).items():
indexer_arrays[index_name].append(indexed)
self._indexed_entity_texts.update(indexer_arrays)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
num_entities = len(self.entity_texts)
num_entity_tokens = max(len(entity_text) for entity_text in self.entity_texts)
if self._max_table_tokens:
# This truncates the number of entity tokens used, enabling larger tables (either in
# the number of entities in the table, or the number of tokens per entity) to fit in
# memory, particularly when using ELMo.
if num_entities * num_entity_tokens > self._max_table_tokens:
num_entity_tokens = int(self._max_table_tokens / num_entities)
padding_lengths = {'num_entities': num_entities,
'num_utterance_tokens': len(self.utterance_tokens)}
padding_lengths['num_entity_tokens'] = num_entity_tokens
lengths = []
assert self._indexed_entity_texts is not None, ("This field is not indexed yet. Call "
".index(vocab) before determining padding "
"lengths.")
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
# This is a list of dicts, one for each token in the field.
entity_lengths = [indexer.get_padding_lengths(token)
for entity_text in self._indexed_entity_texts[indexer_name]
for token in entity_text]
# Iterate over the keys in the first element of the list. This is fine as for a given
# indexer, all entities will return the same keys, so we can just use the first one.
for key in entity_lengths[0].keys():
indexer_lengths[key] = max(x.get(key, 0) for x in entity_lengths)
lengths.append(indexer_lengths)
# Get all the keys which have been used for padding.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x.get(padding_key, 0) for x in lengths)
return padding_lengths
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
tensors = {}
desired_num_entities = padding_lengths['num_entities']
desired_num_entity_tokens = padding_lengths['num_entity_tokens']
desired_num_utterance_tokens = padding_lengths['num_utterance_tokens']
for indexer_name, indexer in self._token_indexers.items():
padded_entities = util.pad_sequence_to_length(self._indexed_entity_texts[indexer_name],
desired_num_entities,
default_value=lambda: [])
padded_tensors = []
for padded_entity in padded_entities:
padded_tensor = indexer.as_padded_tensor({'key': padded_entity},
{'key': desired_num_entity_tokens},
padding_lengths)['key']
padded_tensors.append(padded_tensor)
tensor = torch.stack(padded_tensors)
tensors[indexer_name] = tensor
padded_linking_features = util.pad_sequence_to_length(self.linking_features,
desired_num_entities,
default_value=lambda: [])
padded_linking_arrays = []
default_feature_value = lambda: [0.0] * len(self._feature_extractors)
for linking_features in padded_linking_features:
padded_features = util.pad_sequence_to_length(linking_features,
desired_num_utterance_tokens,
default_value=default_feature_value)
padded_linking_arrays.append(padded_features)
linking_features_tensor = torch.FloatTensor(padded_linking_arrays)
return {'text': tensors, 'linking': linking_features_tensor}
def _compute_linking_features(self) -> List[List[List[float]]]:
linking_features = []
for entity, entity_text in zip(self.knowledge_graph.entities, self.entity_texts):
entity_features = []
for token_index, token in enumerate(self.utterance_tokens):
token_features = []
for feature_extractor in self._feature_extractors:
token_features.append(feature_extractor(entity,
entity_text,
token,
token_index,
self.utterance_tokens))
entity_features.append(token_features)
linking_features.append(entity_features)
return linking_features
@overrides
def empty_field(self) -> 'KnowledgeGraphField':
return KnowledgeGraphField(KnowledgeGraph(set(), {}), [], self._token_indexers)
@overrides
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
# pylint: disable=no-self-use
batched_text = nn_util.batch_tensor_dicts(tensor['text'] for tensor in tensor_list) # type: ignore
batched_linking = torch.stack([tensor['linking'] for tensor in tensor_list])
return {'text': batched_text, 'linking': batched_linking}
# Below here we have feature extractor functions. To keep a consistent API for easy logic
# above, some of these functions have unused arguments.
# pylint: disable=unused-argument,no-self-use
# These feature extractors are generally pretty specific to the logical form language and
# problem setting in WikiTableQuestions. This whole notion of feature extraction should
# eventually be made more general (or just removed, if we can replace it with CNN features...).
# For the feature functions used in the original parser written in PNP, see here:
# https://github.com/allenai/pnp/blob/wikitables2/src/main/scala/org/allenai/wikitables/SemanticParserFeatureGenerator.scala
# One notable difference between how the features work here and how they worked in PNP is that
# we're using the table text when computing string matches, while PNP used the _entity name_.
# It turns out that the entity name is derived from the table text, so this should be roughly
# equivalent, except in the case of some numbers. If there are cells with different text that
# normalize to the same name, you could get `_2` or similar appended to the name, so the way we
# do it here should just be better. But it's a possible minor source of variation from the
# original parser.
# Another difference between these features and the PNP features is that the span overlap used
# a weighting scheme to downweight matches on frequent words (like "the"), and the lemma
# overlap feature value was calculated a little differently. I'm guessing that doesn't make a
# huge difference...
def _number_token_match(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
# PNP had a "spanFeatures" function that said whether an entity was a-priori known to link
# to a token or set of tokens in the question. This was only used for numbers, and it's
# not totally clear to me how this number feature overlapped with the token match features
# in the original implementation (I think in most cases it was the same, except for things
# like "four million", because the token match is derived from the entity name, which would
# be 4000000, and wouldn't match "four million").
#
# Our implementation basically just adds a duplicate token match feature that's specific to
# numbers. It'll break in some rare cases (e.g., "Which four had four million ..."), but
# those shouldn't be a big deal.
if ":" in entity:
# This check works because numbers are the only entities that don't contain ":". All
# others in both WikiTables languages do (e.g.: fb:row.row.column_name,
# date_column:year, string:usl_a_league etc.).
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _exact_token_match(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _contains_exact_token_match(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
return 0.0
def _lemma_match(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_lemma_match(entity, entity_text, token, token_index, tokens)
def _contains_lemma_match(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[entity]:
return 1.0
return 0.0
def _edit_distance(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
edit_distance = float(editdistance.eval(' '.join(e.text for e in entity_text), token.text))
return 1.0 - edit_distance / len(token.text)
def _related_column(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith('fb:row.row') and "_column:" not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
return 0.0
def _related_column_lemma(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith('fb:row.row') and '_column:' not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[neighbor]:
return 1.0
return 0.0
def _span_overlap_fraction(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
entity_words = set(entity_token.text for entity_token in entity_text)
if not entity_words:
# Some tables have empty cells.
return 0
seen_entity_words = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].text in entity_words:
seen_entity_words.add(tokens[token_index].text)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].text in entity_words:
seen_entity_words.add(tokens[token_index_left].text)
token_index_left -= 1
return len(seen_entity_words) / len(entity_words)
def _span_lemma_overlap_fraction(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
entity_lemmas = set(entity_token.lemma_ for entity_token in entity_text)
if not entity_lemmas:
# Some tables have empty cells.
return 0
seen_entity_lemmas = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index].lemma_)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index_left].lemma_)
token_index_left -= 1
return len(seen_entity_lemmas) / len(entity_lemmas)
# pylint: enable=unused-argument,no-self-use
| [
"mattg@allenai.org"
] | mattg@allenai.org |
96c96efe9ecb296cc2033b388891cae9edfce3eb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_stooges.py | 1d4b04a9490a2094070e8d4dab9ac1b03b79dfd3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._stooge import _STOOGE
#calss header
class _STOOGES(_STOOGE, ):
def __init__(self,):
_STOOGE.__init__(self)
self.name = "STOOGES"
self.specie = 'nouns'
self.basic = "stooge"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ea00ac9cf0ba38ce3d14ec9351fb5ce1d8ba4246 | a72f39b82966cd6e2a3673851433ce7db550429a | /openchat/openchat/models/imagemodel.py | f1310a355504ba28f45bdb8db17b6ed4d232bf6c | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 2,534 | py | import torch
from openchat.models.base_model import BaseModel
import openchat.config as cfg
from openchat.utils.prepare_image import detect_objects_on_single_image
from openchat.utils.transforms import build_transforms
import cv2
import sys
sys.path.insert(0, '/home/datasets/mix_data/openchat/scene_graph_benchmark-main')
class LxmertBot(BaseModel):
def __init__(self, env, device, max_context_length):
super().__init__('imagemodel', env)
# self.model = MobileNetV2(num_classes=5)
self.devices = device.lower()
self.max_context_length = max_context_length
# self.tokenizer = .from_pretrained()
self.eos = '</s><s>'
self.lxmert_model = torch.load(cfg.lxmert_weight_path)
self.transforms = build_transforms()
self.detect_model = torch.load(cfg.detect_weight_path)
# self.model.to(device)
@torch.no_grad()
def predict(self, image_id: str, text: str) -> str:
torch.cuda.empty_cache()
input_ids_list: list = []
num_of_stacked_tokens: int = 0
print(text)
if image_id not in self.env.histories.keys():
self.env.clear(image_id, text)
user_histories = reversed(self.env.histories[image_id]['user'])
bot_histories = reversed(self.env.histories[image_id]['bot'])
for user, bot in zip(user_histories, bot_histories):
user_tokens = self.tokenizer.encode(user, return_tensors='pt')
bot_tokens = self.tokenizer.encode(bot, return_tensors='pt')
num_of_stacked_tokens += user_tokens.shape[-1] + bot_tokens.shape[-1]
if num_of_stacked_tokens <= self.max_context_length:
input_ids_list.append(bot_tokens)
input_ids_list.append(user_tokens)
else:
break
img_path = cfg.image_path + image_id
img = cv2.imread(img_path)
dets = detect_objects_on_single_image(self.detect_model, self.transforms, img)
data = {}
data['feats'] = torch.stack([det['features'] for det in dets]).unsqueeze(dim=0)
data['boxes'] = torch.stack([torch.tensor(det['rect'], dtype=torch.float32) for det in dets]).unsqueeze(dim=0)
feats = data['feats'].to('cuda')
boxes = data['boxes'].to('cuda')
sent = [text]
output_dict = self.lxmert_model.model(feats, boxes, sent)
max_score = output_dict['scores'].argmax(dim=-1)
print(max_score)
ans = cfg.answer_table[max_score]
return ans
| [
"hsslab.inspur@gmail.com"
] | hsslab.inspur@gmail.com |
0b25ae4c71c7ade397cad1698cadef5233b1b817 | ecb7e109a62f6a2a130e3320ed1fb580ba4fc2de | /reference-code/lambda/cm-premembers-backend/premembers/check/logic/check_asc/asc_item_16_logic.py | 8cec4f13cbcf037fbbe58d663cd87397596d2cb4 | [] | no_license | nisheeth84/prjs_sample | df732bc1eb58bc4fd4da6e76e6d59a2e81f53204 | 3fb10823ca4c0eb3cd92bcd2d5d4abc8d59436d9 | refs/heads/master | 2022-12-25T22:44:14.767803 | 2020-10-07T14:55:52 | 2020-10-07T14:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,861 | py | import inspect
from premembers.repository.const import CheckResult
from premembers.common import common_utils, S3Utils
from premembers.const.const import CommonConst
from premembers.exception.pm_exceptions import PmError
from premembers.common import FileUtils, date_utils, aws_common
from premembers.check.logic.check_asc import asc_item_common_logic
LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
def check_asc_item_16_01(trace_id, check_history_id, organization_id,
project_id, aws_account, session, result_json_path):
cw_logger = common_utils.begin_cw_logger(trace_id, __name__,
inspect.currentframe())
check_results = []
is_authorized = True
# 取得したクレデンシャル情報を使用して、S3クライアントを作成します。
try:
s3_client = S3Utils.get_s3_client(trace_id, session, aws_account,
is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, cw_logger)
# S3バケット一覧を取得します。
try:
list_buckets = asc_item_common_logic.get_list_buckets(
trace_id, check_history_id, organization_id, project_id, s3_client,
aws_account)
except PmError as e:
return CheckResult.Error
for bucket in list_buckets["Buckets"]:
bucket_name = bucket['Name']
region_name = None
try:
# 取得したS3バケット一覧情報ファイルをもとに、各バケットのリージョンを取得する。
region_name = S3Utils.get_bucket_location(trace_id, s3_client,
bucket_name, aws_account,
is_cw_logger=True)
if region_name is None:
region_name = CommonConst.US_EAST_REGION
# 取得したS3バケット情報ファイルをもとに、該当のS3バケットのアクセスコントロールリストを取得する。
bucket_acl = get_bucket_acl(
trace_id, check_history_id, organization_id, project_id,
aws_account, region_name, bucket_name, s3_client)
# 取得したS3バケット情報をもとに、S3のロギング情報を取得する。
bucket_logging = S3Utils.get_bucket_logging(
trace_id, aws_account, s3_client, bucket_name, region_name,
is_cw_logger=True)
except PmError as e:
if e.cause_error.response['Error'][
'Code'] in CommonConst.S3_SKIP_EXCEPTION:
error_operation = e.cause_error.operation_name,
error_code = e.cause_error.response['Error']['Code'],
error_message = e.cause_error.response['Error']['Message']
if region_name is None:
region_name = CommonConst.ERROR
check_results.append(
asc_item_common_logic.get_error_authorized_result(
region_name, bucket_name, error_operation, error_code,
error_message))
is_authorized = False
continue
else:
return CheckResult.Error
# 取得したS3ロギング情報をS3に保存する(リソース情報ファイル)。
try:
s3_file_name = CommonConst.PATH_CHECK_RAW.format(
check_history_id, organization_id, project_id, aws_account,
"ASC/S3_ClientLogging_" + region_name + "_" + bucket_name +
".json")
FileUtils.upload_json(trace_id, "S3_CHECK_BUCKET", bucket_logging,
s3_file_name, is_cw_logger=True)
except PmError as e:
cw_logger.error("[%s] S3バケットロギング情報の取得に失敗しました。(%s/%s)", aws_account,
region_name, bucket_name)
return CheckResult.Error
# チェック処理
bucket_abnormity = True
try:
# Check-1. ACLによりLogDeliveryに操作権限が与えられたS3バケットが存在するか
for grant in bucket_acl["Grants"]:
if (common_utils.check_key("URI", grant['Grantee']) and
grant['Grantee']["URI"] == LOG_DELIVERY_URI):
bucket_abnormity = False
break
# Check-2. S3バケットでログ記録が有効になっていないものは存在するか
if bucket_abnormity is True and len(bucket_logging) == 0:
result = {
'Region': region_name,
'Level': CommonConst.LEVEL_CODE_21,
'DetectionItem': {
'BucketName': bucket_name
}
}
check_results.append(result)
except Exception as e:
cw_logger.error("[%s] チェック処理中にエラーが発生しました。(%s/%s)", aws_account,
region_name, bucket_name)
return CheckResult.Error
# Export File CHECK_ASC_ITEM_16_01.json
try:
current_date = date_utils.get_current_date_by_format(
date_utils.PATTERN_YYYYMMDDHHMMSS)
check_asc_item_16_01 = {
'AWSAccount': aws_account,
'CheckResults': check_results,
'DateTime': current_date
}
FileUtils.upload_s3(trace_id, check_asc_item_16_01, result_json_path,
format_json=True, is_cw_logger=True)
except Exception as e:
cw_logger.error("[%s] チェック結果JSONファイルの保存に失敗しました。", aws_account)
return CheckResult.Error
# チェック結果
if is_authorized is False:
return CheckResult.Error
if len(check_results) > 0:
return CheckResult.CriticalDefect
return CheckResult.Normal
def get_bucket_acl(trace_id, check_history_id, organization_id, project_id,
aws_account, region_name, bucket_name, s3_client):
pm_logger = common_utils.begin_logger(trace_id, __name__,
inspect.currentframe())
s3_file_name = CommonConst.PATH_CHECK_RAW.format(
check_history_id, organization_id, project_id, aws_account,
"ASC/S3_ACL_" + region_name + "_" + bucket_name + ".json")
# リソース情報取得
if (aws_common.check_exists_file_s3(trace_id, "S3_CHECK_BUCKET",
s3_file_name,
is_cw_logger=True)) is True:
try:
bucket_acl = FileUtils.read_json(trace_id, "S3_CHECK_BUCKET",
s3_file_name, is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, pm_logger)
else:
try:
bucket_acl = S3Utils.get_bucket_acl(
trace_id, s3_client, bucket_name, aws_account, region_name,
is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, pm_logger)
# 取得したS3バケットのアクセスコントロールリスト情報をS3に保存する。(アクセスコントロールリスト情報)
try:
FileUtils.upload_json(trace_id, "S3_CHECK_BUCKET", bucket_acl,
s3_file_name, is_cw_logger=True)
except PmError as e:
pm_logger.error("[%s] S3バケットACL情報のS3保存に失敗しました。(%s)/(%s)",
aws_account, region_name, bucket_name)
return bucket_acl
| [
"phamkhachoabk@gmail.com"
] | phamkhachoabk@gmail.com |
82a59df3d48129e6e0810156db4eb099d6a7257b | ca8be92d186022018ce5157a56f365602eb70158 | /src/201401/elasticnodes_learn.py | 5491db6e1de7ca6752e7bf8183e2f7e825fe4005 | [] | no_license | songgz/toys | 05a8214cd86b747bb14b0870c1a12be4794299dd | f808e7c4ed1a76db4800c8e1ee6d163242df52cc | refs/heads/master | 2020-06-19T15:36:11.175281 | 2014-09-28T13:54:41 | 2014-09-28T13:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,659 | py | #coding=utf-8
import math
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class Edge(QGraphicsItem):
Pi = math.pi
TwoPi = 2.0 * Pi
Type = QGraphicsItem.UserType + 2
def __init__(self, sourceNode, destNode):
super(Edge, self).__init__()
self.arrowSize = 5.0
self.sourcePoint = QPointF()
self.destPoint = QPointF()
self.setAcceptedMouseButtons(QtCore.Qt.NoButton)
self.source = sourceNode
self.dest = destNode
self.source.addEdge(self)
self.dest.addEdge(self)
self.adjust()
def type(self):
return Edge.Type
def sourceNode(self):
return self.source
def setSourceNode(self, node):
self.source = node
self.adjust()
def destNode(self):
return self.dest
def setDestNode(self, node):
self.dest = node
self.adjust()
def adjust(self):
if not self.source or not self.dest:
return
line = QtCore.QLineF(self.mapFromItem(self.source, 0, 0),
self.mapFromItem(self.dest, 0, 0))
length = line.length()
self.prepareGeometryChange()
if length > 20.0:
edgeOffset = QtCore.QPointF((line.dx() * 10) / length,
(line.dy() * 10) / length)
self.sourcePoint = line.p1() + edgeOffset
self.destPoint = line.p2() - edgeOffset
else:
self.sourcePoint = line.p1()
self.destPoint = line.p1()
def boundingRect(self):
if not self.source or not self.dest:
return QtCore.QRectF()
penWidth = 1.0
extra = (penWidth + self.arrowSize) / 2.0
return QtCore.QRectF(self.sourcePoint,
QtCore.QSizeF(self.destPoint.x() - self.sourcePoint.x(),
self.destPoint.y() - self.sourcePoint.y())).normalized().adjusted(-extra,
-extra,
extra,
extra)
def paint(self, painter, option, widget):
if not self.source or not self.dest:
return
# Draw the line itself.
line = QtCore.QLineF(self.sourcePoint, self.destPoint)
if line.length() == 0.0:
return
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.SolidLine,
QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
#painter.drawLine(line)
myPath = QPainterPath()
myPath.moveTo(self.sourcePoint)
myPath.cubicTo(self.sourcePoint + QPointF(10, -10), self.destPoint - QPointF(10, -10), self.destPoint)
#p=myPath.toFillPolygon()
#painter.drawPolygon(p)
painter.drawPolyline(myPath.pointAtPercent(0), myPath.pointAtPercent(0.1), myPath.pointAtPercent(0.2),
myPath.pointAtPercent(0.3), myPath.pointAtPercent(0.4),
myPath.pointAtPercent(0.5), myPath.pointAtPercent(0.6),
myPath.pointAtPercent(0.7), myPath.pointAtPercent(0.8),
myPath.pointAtPercent(0.9), myPath.pointAtPercent(1.0))
# Draw the arrows if there's enough room.
angle = math.acos(line.dx() / line.length())
if line.dy() >= 0:
angle = Edge.TwoPi - angle
sourceArrowP1 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi / 3) * self.arrowSize)
sourceArrowP2 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize)
destArrowP1 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi / 3) * self.arrowSize)
destArrowP2 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize)
painter.setBrush(QtCore.Qt.black)
#painter.drawPolygon(QtGui.QPolygonF([line.p1(), sourceArrowP1, sourceArrowP2]))
painter.drawPolygon(QtGui.QPolygonF([line.p2(), destArrowP1, destArrowP2]))
class Node(QtGui.QGraphicsItem):
Type = QtGui.QGraphicsItem.UserType + 1
def __init__(self, graphWidget):
super(Node, self).__init__()
self.graph = graphWidget
self.edgeList = []
self.newPos = QtCore.QPointF()
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
self.setZValue(1)
def type(self):
return Node.Type
def addEdge(self, edge):
self.edgeList.append(edge)
edge.adjust()
def edges(self):
return self.edgeList
def calculateForces(self):
if not self.scene() or self.scene().mouseGrabberItem() is self:
self.newPos = self.pos()
return
# Sum up all forces pushing this item away.
xvel = 0.0
yvel = 0.0
for item in self.scene().items():
if not isinstance(item, Node):
continue
line = QtCore.QLineF(self.mapFromItem(item, 0, 0),
QtCore.QPointF(0, 0))
dx = line.dx()
dy = line.dy()
l = 2.0 * (dx * dx + dy * dy)
if l > 0:
xvel += (dx * 50.0) / l
yvel += (dy * 50.0) / l
# Now subtract all forces pulling items together.
weight = (len(self.edgeList) + 1) * 10.0
for edge in self.edgeList:
if edge.sourceNode() is self:
pos = self.mapFromItem(edge.destNode(), 0, 0)
else:
pos = self.mapFromItem(edge.sourceNode(), 0, 0)
xvel += pos.x() / weight
yvel += pos.y() / weight
if QtCore.qAbs(xvel) < 0.1 and QtCore.qAbs(yvel) < 0.1:
xvel = yvel = 0.0
sceneRect = self.scene().sceneRect()
self.newPos = self.pos() + QtCore.QPointF(xvel, yvel)
self.newPos.setX(min(max(self.newPos.x(), sceneRect.left() + 10), sceneRect.right() - 10))
self.newPos.setY(min(max(self.newPos.y(), sceneRect.top() + 10), sceneRect.bottom() - 10))
def advance(self):
if self.newPos == self.pos():
return False
self.setPos(self.newPos)
return True
def boundingRect(self):
adjust = 2.0
return QtCore.QRectF(-10 - adjust, -10 - adjust, 23 + adjust,
23 + adjust)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(-10, -10, 20, 20)
return path
def paint(self, painter, option, widget):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.darkGray)
painter.drawEllipse(-7, -7, 20, 20)
gradient = QtGui.QRadialGradient(-3, -3, 10)
if option.state & QtGui.QStyle.State_Sunken:
gradient.setCenter(3, 3)
gradient.setFocalPoint(3, 3)
gradient.setColorAt(1, QtGui.QColor(QtCore.Qt.yellow).light(120))
gradient.setColorAt(0, QtGui.QColor(QtCore.Qt.darkYellow).light(120))
else:
gradient.setColorAt(0, QtCore.Qt.yellow)
gradient.setColorAt(1, QtCore.Qt.darkYellow)
painter.setBrush(QtGui.QBrush(gradient))
painter.setPen(QtGui.QPen(QtCore.Qt.black, 0))
painter.drawEllipse(-10, -10, 20, 20)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemPositionHasChanged:
for edge in self.edgeList:
edge.adjust()
self.graph.itemMoved()
return super(Node, self).itemChange(change, value)
def mousePressEvent(self, event):
self.update()
super(Node, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
self.update()
super(Node, self).mouseReleaseEvent(event)
class GraphWidget(QtGui.QGraphicsView):
def __init__(self):
super(GraphWidget, self).__init__()
self.timerId = 0
scene = QtGui.QGraphicsScene(self)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
scene.setSceneRect(-200, -200, 400, 400)
self.setScene(scene)
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
# node1 = Node(self)
node2 = Node(self)
node3 = Node(self)
node4 = Node(self)
self.centerNode = Node(self)
node6 = Node(self)
node7 = Node(self)
node8 = Node(self)
#node9 = Node(self)
# scene.addItem(node1)
scene.addItem(node2)
scene.addItem(node3)
scene.addItem(node4)
scene.addItem(node6)
scene.addItem(node7)
scene.addItem(node8)
scene.addItem(self.centerNode)
# scene.addItem(node9)
scene.addItem(Edge(self.centerNode, node2))
scene.addItem(Edge(self.centerNode, node3))
scene.addItem(Edge(node2, node4))
scene.addItem(Edge(node2, node6))
scene.addItem(Edge(node3, node7))
scene.addItem(Edge(node3, node8))
# scene.addItem(Edge(node1, node2))
# scene.addItem(Edge(node2, node3))
# scene.addItem(Edge(node2, self.centerNode))
# scene.addItem(Edge(node3, node6))
# #scene.addItem(Edge(node4, node1))
# #scene.addItem(Edge(node4, self.centerNode))
# #scene.addItem(Edge(self.centerNode, node6))
# #scene.addItem(Edge(self.centerNode, node8))
# #scene.addItem(Edge(node6, node9))
# #scene.addItem(Edge(node7, node4))
# scene.addItem(Edge(node8, node7))
# scene.addItem(Edge(node9, node8))
# node1.setPos(-50, -50)
node2.setPos(10, 10)
node3.setPos(20, 20)
node4.setPos(30, 30)
self.centerNode.setPos(40, 40)
node6.setPos(50, 50)
node7.setPos(60, 60)
node8.setPos(70, 70)
# node9.setPos(50, 50)
self.scale(0.8, 0.8)
self.setMinimumSize(400, 400)
self.setWindowTitle("Elastic Nodes")
def itemMoved(self):
if not self.timerId:
self.timerId = self.startTimer(1000 / 20)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Up:
self.centerNode.moveBy(0, -20)
elif key == QtCore.Qt.Key_Down:
self.centerNode.moveBy(0, 20)
elif key == QtCore.Qt.Key_Left:
self.centerNode.moveBy(-20, 0)
elif key == QtCore.Qt.Key_Right:
self.centerNode.moveBy(20, 0)
elif key == QtCore.Qt.Key_Plus:
self.scaleView(1.2)
elif key == QtCore.Qt.Key_Minus:
self.scaleView(1 / 1.2)
elif key == QtCore.Qt.Key_Space or key == QtCore.Qt.Key_Enter:
for item in self.scene().items():
if isinstance(item, Node):
item.setPos(-150 + QtCore.qrand() % 300, -150 + QtCore.qrand() % 300)
else:
super(GraphWidget, self).keyPressEvent(event)
def timerEvent(self, event):
nodes = [item for item in self.scene().items() if isinstance(item, Node)]
for node in nodes:
node.calculateForces()
itemsMoved = False
for node in nodes:
if node.advance():
itemsMoved = True
if not itemsMoved:
self.killTimer(self.timerId)
self.timerId = 0
def wheelEvent(self, event):
self.scaleView(math.pow(2.0, -event.delta() / 240.0))
def drawBackground(self, painter, rect):
# Shadow.
sceneRect = self.sceneRect()
rightShadow = QtCore.QRectF(sceneRect.right(), sceneRect.top() + 5, 5,
sceneRect.height())
bottomShadow = QtCore.QRectF(sceneRect.left() + 5, sceneRect.bottom(),
sceneRect.width(), 5)
if rightShadow.intersects(rect) or rightShadow.contains(rect):
painter.fillRect(rightShadow, QtCore.Qt.darkGray)
if bottomShadow.intersects(rect) or bottomShadow.contains(rect):
painter.fillRect(bottomShadow, QtCore.Qt.darkGray)
# Fill.
gradient = QtGui.QLinearGradient(sceneRect.topLeft(),
sceneRect.bottomRight())
gradient.setColorAt(0, QtCore.Qt.white)
gradient.setColorAt(1, QtCore.Qt.lightGray)
painter.fillRect(rect.intersect(sceneRect), QtGui.QBrush(gradient))
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawRect(sceneRect)
# Text.
textRect = QtCore.QRectF(sceneRect.left() + 4, sceneRect.top() + 4,
sceneRect.width() - 4, sceneRect.height() - 4)
message = "Click and drag the nodes around, and zoom with the " \
"mouse wheel or the '+' and '-' keys"
font = painter.font()
font.setBold(True)
font.setPointSize(14)
painter.setFont(font)
painter.setPen(QtCore.Qt.lightGray)
painter.drawText(textRect.translated(2, 2), message)
painter.setPen(QtCore.Qt.black)
painter.drawText(textRect, message)
def scaleView(self, scaleFactor):
factor = self.matrix().scale(scaleFactor, scaleFactor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()
if factor < 0.07 or factor > 100:
return
self.scale(scaleFactor, scaleFactor)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
QtCore.qsrand(QtCore.QTime(0, 0, 0).secsTo(QtCore.QTime.currentTime()))
widget = GraphWidget()
widget.show()
sys.exit(app.exec_())
| [
"pingf0@gmail.com"
] | pingf0@gmail.com |
fad8cd1b400beabb441d442165b3a8bf0df6762a | cbf9f600374d7510988632d7dba145c8ff0cd1f0 | /virtual/XMLProContest/07/b.py | 35635e38d107de7b0178f53764dc410a73266496 | [] | no_license | sakakazu2468/AtCoder_py | d0945d03ad562474e40e413abcec39ded61e6855 | 34bdf39ee9647e7aee17e48c928ce5288a1bfaa5 | refs/heads/master | 2022-04-27T18:32:28.825004 | 2022-04-21T07:27:00 | 2022-04-21T07:27:00 | 225,844,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | s = input()
if s[0] != "A":
print("WA")
else:
c_count = 0
for i in range(2, len(s)-1):
if s[i] == "C":
c_count += 1
if c_count == 1:
upper = 0
lower = 0
for i in s:
if 97 <= ord(i) <= 122:
lower += 1
else:
upper += 1
if upper == 2:
print("AC")
else:
print("WA")
# if upper != 2:
# print("WA")
# else:
# print("AC")
else:
print("WA")
| [
"sakakazu2468@icloud.com"
] | sakakazu2468@icloud.com |
a015ebe3fa675abc13b391e986aad5bcb118daa4 | 996bc055b89b5e103f345dd26b0883caffd1c9bc | /openstack/identity/v3/region.py | 1fdd1c20cda5780cde2af826cfa5caf2581b8f71 | [
"Apache-2.0"
] | permissive | starlingx-staging/stx-openstacksdk | be633cedf4740e8788b814eca68039df3de6fe4b | 93d4e95536c4824e2197b2a63a4438ff5c60b653 | refs/heads/master | 2020-03-18T03:00:29.254952 | 2018-11-23T18:27:59 | 2018-11-24T01:17:45 | 134,218,298 | 1 | 4 | Apache-2.0 | 2018-11-24T01:17:46 | 2018-05-21T04:34:40 | Python | UTF-8 | Python | false | false | 1,277 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource2 as resource
class Region(resource.Resource):
resource_key = 'region'
resources_key = 'regions'
base_path = '/regions'
service = identity_service.IdentityService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
patch_update = True
# Properties
#: User-facing description of the region. *Type: string*
description = resource.Body('description')
#: The links for the region resource.
links = resource.Body('links')
#: ID of parent region, if any. *Type: string*
parent_region_id = resource.Body('parent_region_id')
| [
"tengqim@cn.ibm.com"
] | tengqim@cn.ibm.com |
563575c004cb97e9beef32766003f08b7e584b18 | da386754e12ed3e251d5fb9091d9416b9f97edc7 | /sfepy/discrete/common/extmods/cmesh.pxd | 120eceb44f387f87854279f1d9895b7f2a036891 | [
"BSD-3-Clause"
] | permissive | nasseralkmim/sfepy | 5b5642f084b62632c1ca48035e510f27728e25ab | 647f1754bcd4fd103cd19a03ed36cb10ebc8fd15 | refs/heads/master | 2020-04-06T04:57:21.589694 | 2016-08-03T12:38:31 | 2016-08-03T12:38:31 | 65,736,316 | 2 | 1 | null | 2016-08-15T13:58:01 | 2016-08-15T13:58:01 | null | UTF-8 | Python | false | false | 3,893 | pxd | # -*- Mode: Python -*-
"""
C Mesh data structures and functions.
"""
cimport numpy as np
from libc.stdio cimport FILE, stdout
from types cimport uint32, int32, float64, complex128
cdef extern from 'string.h':
void *memcpy(void *dest, void *src, size_t n)
cdef extern from 'common.h':
void *pyalloc(size_t size)
void pyfree(void *pp)
void mem_statistics(int lineNo, char *funName,
char *fileName, char *dirName)
size_t mem_get_cur_usage()
size_t mem_get_max_usage()
size_t mem_get_n_frags()
cdef extern from 'mesh.h':
ctypedef struct Indices:
uint32 *indices
uint32 num
ctypedef struct Mask:
char *mask
uint32 num
uint32 n_true
ctypedef struct MeshGeometry:
uint32 num
uint32 dim
float64 *coors
ctypedef struct MeshTopology:
uint32 max_dim
uint32 num[4]
uint32 *cell_types
uint32 *face_oris
uint32 *edge_oris
MeshConnectivity *conn[16]
ctypedef struct MeshConnectivity:
uint32 num
uint32 n_incident
uint32 *indices
uint32 *offsets
uint32 offset
ctypedef struct LocalEntities:
uint32 num
MeshConnectivity **edges
MeshConnectivity **faces
ctypedef struct Mesh:
MeshGeometry geometry[1]
MeshTopology topology[1]
LocalEntities entities[1]
cdef int32 mesh_init(Mesh *mesh)
cdef int32 mesh_free(Mesh *mesh)
cdef int32 mesh_print(Mesh *mesh, FILE *file, int32 header_only)
cdef int32 conn_alloc(MeshConnectivity *conn,
uint32 num, uint32 n_incident)
cdef int32 conn_free(MeshConnectivity *conn)
cdef int32 conn_print(MeshConnectivity *conn, FILE *file)
cdef int32 mesh_set_coors(Mesh *mesh, float64 *coors, int32 num, int32 dim,
int32 tdim)
cdef int32 mesh_setup_connectivity(Mesh *mesh, int32 d1, int32 d2)
cdef int32 mesh_free_connectivity(Mesh *mesh, int32 d1, int32 d2)
cdef uint32 mesh_count_incident(Mesh *mesh, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_incident(Mesh *mesh,
MeshConnectivity *incident, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_local_ids(Mesh *mesh, Indices *local_ids,
Indices *entities, int32 dent,
MeshConnectivity *incident, int32 dim)
cdef int32 mesh_select_complete(Mesh *mesh, Mask *mask, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_centroids(Mesh *mesh, float64 *ccoors, int32 dim)
cdef int32 mesh_get_facet_normals(Mesh *mesh, float64 *normals,
int32 which)
cdef class CConnectivity:
"""
Notes
-----
The memory is allocated/freed in C - this class just wraps NumPy arrays
around that data without copying.
"""
cdef MeshConnectivity *conn
cdef public np.ndarray indices
cdef public np.ndarray offsets
cdef public int num, n_incident, offset
cdef _set_conn(self, MeshConnectivity *conn)
cdef class CMesh:
cdef Mesh mesh[1]
cdef readonly np.ndarray coors
cdef readonly np.ndarray vertex_groups
cdef readonly np.ndarray cell_types
cdef readonly np.ndarray cell_groups # ig for each cell.
cdef readonly list conns
cdef readonly dict entities
cdef readonly int n_coor, dim, n_el, tdim
cdef readonly np.ndarray num # Numbers of topological entities.
cdef readonly np.ndarray face_oris # Allocated in C.
cdef readonly np.ndarray edge_oris # Allocated in C.
cdef readonly np.ndarray facet_oris # face_oris in 3D, edge_oris in 2D
cdef readonly dict key_to_index
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
7d84de5d28cd4b40bb4268adbdf0cb11c4199569 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/analyzes/employment/models.py | 05c8ebbf391dc038ba5a93dbda7e187bdd3c6fba | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 1,074 | py | import datetime
from mongoengine import Document
from mongoengine import StringField
from mongoengine import DateTimeField
from mongoengine import ObjectIdField
from mongoengine import DecimalField
from mongoengine import ReferenceField
from modules.analyzes.employment.kat_hesap_tipleri.models import KatHesapTipleri
from modules.analyzes.employment.pursantaj_oranlari.models import PursantajOranlari
from modules.organization.models import Organization
class EmploymentAnalysis(Document):
_created_date = DateTimeField(default=datetime.datetime.utcnow)
_key_created_user = ObjectIdField()
_last_modified_date = DateTimeField(default=datetime.datetime.utcnow)
_key_last_modified_user = ObjectIdField()
_key_owner_user = ObjectIdField()
analysis_date = StringField(required=True)
_key_organization = ReferenceField(Organization, required=True)
_key_kat_hesap_tipi = ReferenceField(KatHesapTipleri, required=True)
_key_pursantaj_orani = ReferenceField(PursantajOranlari, required=True)
birim_fiyat = DecimalField(required=True)
| [
"mutlu.erdem@soft-nec.com"
] | mutlu.erdem@soft-nec.com |
b8f079041c20484d4c2101a86ff619727d3c3297 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_2bo9_E.py | 52f46a62409f621b8e01543e5c7aa75bed7aa552 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2bo9.csv'
identifier = 'E'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
9c27e79f90e511122a091902cda35327f7f6d46b | 2a3de1d5fed001261102c33aed9367bec29aaff4 | /joplin_web/_joplin.py | 31d17aca3ed146428f56a6687b36be1c06e58392 | [
"BSD-3-Clause"
] | permissive | kuyper/joplin-web | 0b2a9d274910733cd233d28feaa717f9d21fff5a | 7a13b75cbb55741ddfb58767af34c7ad164fec11 | refs/heads/master | 2020-04-04T11:38:58.871560 | 2018-11-01T14:20:02 | 2018-11-01T14:20:02 | 155,899,112 | 0 | 1 | BSD-3-Clause | 2018-11-02T17:21:46 | 2018-11-02T17:21:45 | null | UTF-8 | Python | false | false | 9,210 | py | # coding: utf-8
# std lib
import json
from logging import getLogger
import shlex
from subprocess import Popen, PIPE
# django
from django.conf import settings
# create logger
logger = getLogger("joplin_web.command")
"""
Joplin wrapper for joplin terminal command
# run
# 1) joplin use notebook to point on the notebook where to store the note
# 2) joplin mknote will returned the note id of the created note
# 3) joplin set note_id body xxx to add the body to the note
example:
>>> joplin = Joplin()
>>> joplin.mkbook('My Book')
joplin --profile /home/foxmask/.config/joplin-desktop/ mkbook "My Book"
(b'', b'', 0)
>>> joplin.mknote('My Book', 'My title', 'My body')
joplin --profile /home/foxmask/.config/joplin-desktop/ use "My Book"
joplin --profile /home/foxmask/.config/joplin-desktop/ mknote "My title"
joplin --profile /home/foxmask/.config/joplin-desktop/ ls -n 1 -s created_time -t n -f json
set b7f4c body "My body"
joplin --profile /home/foxmask/.config/joplin-desktop/ set b7f4c body "My body"
(b'', b'', 0)
"""
class Joplin:
profile_path = ''
def __init__(self):
"""
set the profile path of joplin
"""
self.profile_path = settings.JOPLIN_PROFILE_PATH
def _run(self, line):
"""
Build the command to be running
:param line: the command to run
:return: message and exitcode
"""
cmd = "joplin --profile {} {}".format(self.profile_path, line)
logger.debug(cmd)
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
logger.info("joplin %s %s %s" % (out, err, exitcode))
return out, err, exitcode
#########
# Notes #
#########
def ls(self, type_object):
"""
list notes, but extract __the line__ of the new created note
:param type_object: n = note - t = task
:return: message and exitcode
"""
line = " ls -n 1 -s created_time -t {} -f json".format(type_object)
return self._run(line)
def setp(self, note_id, **kwargs):
"""
set properties
:param note_id: id on the created note
:param kwargs: can contains body and some other additionals properties
:return: message and exitcode
"""
out = err = ''
exitcode = 0
line_start = 'set {note_id} '.format(note_id=note_id)
for key in kwargs:
line = line_start + ' {key} "{value}"'.format(key=key, value=kwargs.get(key))
print("SET {}", line)
logger.debug("SET %s " % line)
out, err, exitcode = self._run(line)
return out, err, exitcode
def mknote(self, notebook, title, body, type_object='n'):
"""
Create a note
:param notebook: notebook choosen to store the new note
:param title: note title to create
:param body: content of the note to add to the created note
:param type_object: type of object to create : n = note, t = task
:return: message and exitcode
"""
kwargs = dict()
out, err, exitcode = self._use(notebook)
if exitcode == 0:
line = 'mknote "{}"'.format(title)
out, err, exitcode = self._run(line)
if exitcode == 0:
out, err, exitcode = self.ls(type_object)
if exitcode == 0:
# 3) set the body of the note
payload = json.loads(out)
note_id = payload[0]['id'][:5]
logger.debug("note id %s " % note_id)
logger.debug("titre %s " % payload[0]['title'])
logger.debug("body %s " % payload[0]['body'])
logger.debug("todo %s " % payload[0]['is_todo'])
kwargs['body'] = body
out, err, exitcode = self.setp(note_id, **kwargs)
return out, err, exitcode
def editnote(self, note_id, parent_id, title, body, is_todo):
"""
Edit a note
:param note_id: note id to edit
:param parent_id: notebook choosen to store the note
:param title: note title to update
:param body: content of the note edit the note
:param is_todo: boolean 1 = todo 0 = note
:return: message and exitcode
"""
kwargs = dict()
kwargs['parent_id'] = parent_id
kwargs['title'] = title
kwargs['body'] = body
kwargs['is_todo'] = is_todo
return self.setp(note_id, **kwargs)
def cp(self, note_id, parent_id=0):
"""
Copy a note to a notebook
:param note_id: id to copy
:param parent_id: id of the target notebook, if id is 0 copy to the current folder
:return: message and exitcode
"""
line = 'cp "{}" '.format(note_id)
if parent_id > 0:
line += ' "{}"'.format(parent_id)
logger.debug(line)
return self._run(line)
def mv(self, note_id, parent_id):
"""
Move of a note to another folder
:param note_id: id to move
:param parent_id: id of the target notebook
:return: message and exitcode
"""
line = 'mv "{}" "{}"'.format(note_id, parent_id)
logger.debug(line)
return self._run(line)
def rmnote(self, note):
"""
Remove a note
:param note: id to delete
:return: message and exitcode
"""
line = 'rmnote "{}"'.format(note)
logger.debug(line)
return self._run(line)
############
# Noteooks #
############
def mkbook(self, notebook):
"""
Create a notebook
:param notebook: to create
:return: message and exitcode
"""
line = 'mkbook "{}"'.format(notebook)
logger.debug(line)
return self._run(line)
def rmbook(self, notebook):
"""
Remove a notebook
:param notebook: to delete
:return: message and exitcode
"""
line = 'rmbook '.format(notebook)
logger.debug(line)
return self._run(line)
def _use(self, notebook):
"""
point to the notebook to use
:return: message and exitcode
"""
line = 'use "{}"'.format(notebook)
logger.debug(line)
return self._run(line)
#########
# To-do #
#########
def toggle(self, note_id):
"""
set a note as a (uncomplet) To-do
then set a To-do as complet
then set a completed To-do as a (uncompleted) To-do
:return: message and exitcode
"""
line = 'todo toggle "{}"'.format(note_id)
logger.debug(line)
return self._run(line)
def clear(self, note_id):
"""
set a To-do back as note
:return: message and exitcode
"""
line = 'todo clear "{}"'.format(note_id)
logger.debug(line)
return self._run(line)
def done(self, note_id):
"""
Marks a to-do as completed.
:return: message and exitcode
"""
line = 'done "{}"'.format(note_id)
logger.debug(line)
return self._run(line)
def undone(self, note_id):
"""
Marks a to-do as non-completed.
:return: message and exitcode
"""
line = 'undone "{}"'.format(note_id)
logger.debug(line)
return self._run(line)
######################
# Note and Notebooks #
######################
def ren(self, note_or_folder_id, name):
"""
Rename a note or a folder
:param note_or_folder_id: id to rename (note or folder)
:param name: string to use for renaming
:return: message and exitcode
"""
line = 'ren "{}" "{}"'.format(note_or_folder_id, name)
logger.debug(line)
return self._run(line)
#######
# Tag #
#######
def tag(self, action, tag_id, note_id):
"""
deal with tag for a note
:param action: can be add/remove/list
:param tag_id: tag to add or remove to the note
:param note_id: id of the note where the tag is added or deleted
:return: message and exitcode
"""
line = "tag {action} {tag_id} {note_id}".format(action=action, tag_id=tag_id, note_id=note_id)
return self._run(line)
###############
# Joplin info #
###############
def version(self):
"""
Version of Joplin
:return: message and exitcode
"""
logger.debug('joplin version ?')
return self._run('version')
############
# Settings #
############
def config(self, name, value):
"""
apply a config for `name` to `value`
see doc at https://joplin.cozic.net/terminal/ `config [name] [value]`
:param name: config name
:param value: value to apply
:return: message and exitcode
"""
line = "config {name} {value}".format(name=name, value=value)
return self._run(line)
| [
"foxmaskhome@gmail.com"
] | foxmaskhome@gmail.com |
36e370f7a59eb4a0f324af58795cd112dc7ceb73 | 9a819fc91e17ef9a44e45cf68e76cf696381d06d | /cdk_examples/cdk.out/asset.50ce0df99f68b771fed9c79fa8d531712d8d56eadb6b19697b42dfa31931f8ff/chardet/cli/chardetect.py | 2f1156136c9af217a753fafdb0dd5b3a7cf52a9b | [] | no_license | Gautam3994/Dark-Knight | aef1d6383e0785130db75e80ed40f544a120579e | 327b2d58851a42da1b707addea73e40fac6a61cc | refs/heads/master | 2022-12-01T11:58:39.857379 | 2020-09-05T18:07:51 | 2020-09-05T18:07:55 | 203,866,327 | 0 | 1 | null | 2022-11-24T09:16:18 | 2019-08-22T20:14:43 | Python | UTF-8 | Python | false | false | 2,764 | py | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from functions.Canary.chardet import __version__
from functions.Canary.chardet import PY2
from functions.Canary.chardet import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings")
parser.add_argument('input',
help='File whose encoding we would like to determine. \
(default: stdin)',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin if PY2 else sys.stdin.buffer])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| [
"gautam3994@gmail.com"
] | gautam3994@gmail.com |
9a0dd6f10c652e2de3589f21c50e5edd3f286226 | 389569a591284a2adcdc38046114e7b1038afd94 | /python-script/trax/remove_duplicate.py | 01e34ae3b7890c0987c7795f288b8816e515b37c | [] | no_license | xytysingle/AnnotationTool | b797daf2fd472f602341b16f24fb1ed9b702aef1 | a217d4376ceee739e0d8c43515c403133982e86e | refs/heads/master | 2020-04-11T18:16:10.438919 | 2019-07-31T10:21:18 | 2019-07-31T10:21:18 | 161,992,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Kong Haiyang
@Date: 2018-05-21 09:40:31
"""
from __future__ import absolute_import, division, print_function
import hashlib
import os
from os import walk
import time
from PIL import Image
from tqdm import tqdm
path1 = '/Users/lingmou/Desktop/堆头项目/duitou2/'
# path2 = '/home/konghaiyang/kong/scene_classifier/data/0827待跑场景分类图片_results_results/可口可乐旗下大包装堆头带托盘'
tt = time.time()
files = []
for dirpath, dirnames, filenames in walk(path1):
if dirnames:
continue
_files = [os.path.join(dirpath, f) for f in os.listdir(dirpath)
if f.endswith('.jpg') and not f.startswith('.')]
files.extend(_files)
# for dirpath, dirnames, filenames in walk(path2):
# if dirnames:
# continue
# _files = [os.path.join(dirpath, f) for f in os.listdir(dirpath)
# if f.endswith('.jpg') and not f.startswith('.')]
# files.extend(_files)
md5_set = set()
files_dict = {}
for f in tqdm(files):
im = Image.open(f)
md5 = hashlib.md5(im.tobytes()).hexdigest()
files_dict.setdefault(md5, []).append(f)
if md5 in md5_set:
print('\n'+'='*20+md5+'='*20)
for fd in files_dict[md5]:
print(fd)
files_dict[md5].remove(f)
os.remove(f)
print('Remove {}.'.format(f))
else:
md5_set.add(md5)
print(time.time()-tt)
| [
"2463072824@qq.com"
] | 2463072824@qq.com |
d3c4e601c45ab7daa7422417c26f7028a4043b80 | cd78d84441e69c1fc40b6a6e9e235e7cf6882454 | /python/110.balanced_binary_tree.py | e5b1cf1d1f013903993e66591250ea684ad21e5e | [] | no_license | buy/leetcode | 53a12d4e0298284a5a2034c88353d0dc195aa66c | da0e834e3f2e3016396fffc96ef943ab9ec58ea4 | refs/heads/master | 2021-01-13T01:48:01.176632 | 2015-06-14T06:17:17 | 2015-06-14T06:17:17 | 31,863,627 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # Given a binary tree, determine if it is height-balanced.
# For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {boolean}
def isBalanced(self, root):
if not root:
return True
return abs(self.getHeight(root.left) - self.getHeight(root.right)) < 2 and self.isBalanced(root.left) and self.isBalanced(root.right)
def getHeight(self, root):
if not root:
return 0
return 1 + max(self.getHeight(root.left), self.getHeight(root.right))
| [
"cliu@groupon.com"
] | cliu@groupon.com |
74fca21f256479a3865581a72ae58c5bc5d98d3b | 8268afb15bebb260a65c9b4c54ff3a6eb709c7c3 | /denoiser/lib_denoiser.py | 6d5d8e12ffdb9c77dea02f213f5c263bd84144f5 | [] | no_license | edublancas/yass-private | 2dc4b5b520c7effbece6f93bf74d39af918b5e29 | 33e9e83ccd6cc7a333b0199c11cdd81363ced846 | refs/heads/master | 2021-03-24T12:48:12.715117 | 2018-12-24T20:22:29 | 2018-12-24T20:22:29 | 108,217,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | import numpy as np
from yass.templates import TemplatesProcessor
from yass.templates import util as templates_util
class SpikeIndex:
"""Wraps spike index logic
"""
def __init__(self, spike_index_array, channel_index=None, geometry=None,
neighbors=None):
self.arr = spike_index_array
self.channel_index = channel_index
self.geometry = geometry
self.neighbors = neighbors
def get_spike_index_for_channel(self, channel):
sub = self.arr[self.arr[:, 1] == channel]
return SpikeIndex(sub, self.channel_index, self.geometry,
self.neighbors)
def get_times_from_channel(self, channel, max_ptp=None, rec=None,
waveform_length=51):
if max_ptp is None:
return self.arr[self.arr[:, 1] == channel][:, 0]
else:
times = self.arr[self.arr[:, 1] == channel][:, 0]
spikes = read_waveforms(rec, times, waveform_length,
random_shift=False,
add_offset=True)
ptps = templates_util.ptps(spikes)
return times[ptps <= max_ptp]
def get_times(self):
return self.arr[:, 0]
@property
def shape(self):
return self.arr.shape
@property
def n_unique_channels(self):
return len(np.unique(self.arr[:, 1]))
def count_spikes_per_channel(self):
chs, counts = np.unique(self.arr[:, 1], return_counts=True)
return {ch: count for ch, count in zip(chs, counts)}
def read_waveforms_from_channel(self, rec, channel, waveform_length=51,
random_shift=False,
only_neighbors=True):
"""Read waveforms from rec using an array of spike times
"""
n_obs, n_channels = rec.shape
if channel is not None:
idxs = self.get_times_from_channel(channel)
# print(idxs, idxs.shape)
else:
idxs = self.get_times()
# print(idxs, idxs.shape)
out = np.empty((len(idxs), waveform_length, 7 if only_neighbors else n_channels))
half = int((waveform_length - 1)/2)
for i, idx in enumerate(idxs):
if random_shift:
offset = np.random.randint(-half, half)
else:
offset = 0
if only_neighbors:
out[i, :] = rec[idx-half + offset:idx+half+1 + offset, self.channel_index[channel]]
else:
out[i, :] = rec[idx-half + offset:idx+half+1 + offset]
return out
# FIXME: remove duplicated logic
def read_waveforms(rec, times, waveform_length, random_shift=False, add_offset=False):
"""Read waveforms from rec using an array of spike times
"""
n_obs, n_channels = rec.shape
out = np.empty((len(times), waveform_length, n_channels))
valid_time = np.ones(len(times))
half = int((waveform_length - 1)/2)
for i, time in enumerate(times):
if add_offset:
offset = -20
else:
offset = 0
if random_shift:
offset += np.random.randint(-3, 3)
s = slice(time - half + offset, time + half + 1 + offset)
if s.start >= 0 and s.stop <= n_obs:
out[i] = rec[s]
else:
valid_time[i] = 0
print('invalid time')
return out[valid_time.astype(bool)] | [
"fkq8@blancas.io"
] | fkq8@blancas.io |
0130e5fb78d51a55447ac03623a3d816d1707af0 | c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc | /프로그래머스/체육복.py | 6418eba937f097af343006fbd58235dfaf80fd2e | [] | no_license | JeongHanJun/BOJ | ae6b1c64c5b3226deef2708ae447aa1225333a92 | a865624fb0a9291b68f99af8535f708554fa0b41 | refs/heads/master | 2023-03-31T02:22:58.974437 | 2021-04-02T02:43:57 | 2021-04-02T02:43:57 | 258,809,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | # 체육복
'''
# testcase #11 실패 코드
def solution(n, lost, reserve):
answer = 0
students = [1] * (n+2)
for i in reserve:
students[i] += 1
for i in lost:
students[i] -= 1
for i in range(1, n+1):
if students[i] == 2:
if students[i-1] == 0:
students[i] -= 1
students[i-1] += 1
if students[i+1] == 0:
students[i] -= 1
students[i+1] += 1
answer = n - students.count(0)
return answer
'''
'''
# 성공코드 1
def solution(n, lost, reserve):
for i in reserve:
if i in lost:
lost.remove(i)
reserve.remove(i)# 여유분이 있는 학생이 체육복을 잃어버렸을 경우의 중복을 제외
answer = n - len(lost)# answer 에는 중복없이 전체 학생중에 체육복을 잃어 버리지 않은 학생수가 저장되있음
for i in lost:
if i in reserve:
answer += 1
reserve.remove(i)
elif i-1 in reserve:# 남는 학생이 자기 앞사람에게
answer += 1
reserve.remove(i-1)
elif i+1 in reserve:# 남는 학생이 자기 뒷사람에게
answer += 1
reserve.remove(i+1)
return answer
'''
# 성공코드 2 set을 활용한 빠른 코드
# lost set에 대해 모든 원소들에 +0, +1, -1 을 순차적으로 계산하고, 차집합으로 중복을 버리는것을 반복
def solution(n, lost, reserve):
reserve = set(reserve)
for size in [0, 1, -2]:
lost = set(map(lambda x: x+size, lost))
reserve, lost = reserve - lost, lost - reserve
return n - len(lost)
n1 = 5
l1 = [2, 4]
r1 = [1, 3, 5]
n2 = 5
l2 = [2, 4]
r2 = [3]
n3 = 3
l3 = [3]
r3 = [1]
print(solution(n1, l1, r1))
print(solution(n2, l2, r2))
print(solution(n3, l3, r3)) | [
"noreply@github.com"
] | JeongHanJun.noreply@github.com |
1a11ad3ef2340b83cae095ecd1d26ec0b98f3411 | c18781b5d4440cccbe8c417b277b342579c9ac71 | /hhtest_18111/urls.py | c863e4463544e7b92af7261c50383a90703e9fe1 | [] | no_license | crowdbotics-apps/hhtest-18111 | a21d7f45345b94698951a0758d3dbde72e338d37 | 1a9a352061667c9a68010321d6c73e863702ff67 | refs/heads/master | 2022-10-12T05:07:43.157133 | 2020-06-15T03:40:16 | 2020-06-15T03:40:16 | 272,335,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | """hhtest_18111 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "HHTest"
admin.site.site_title = "HHTest Admin Portal"
admin.site.index_title = "HHTest Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="HHTest API",
default_version="v1",
description="API documentation for HHTest App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4d012607d045af72d09fba80cfd1625c1e627d90 | 3b7474148c07df7f4755106a3d0ada9b2de5efdc | /training/c26_api_requests/client/lab05_HTTPBIN_GETJSON.py | 1e47277097b0a36464989ac7362dd529aaf4bb11 | [] | no_license | juancsosap/pythontraining | 7f67466846138f32d55361d64de81e74a946b484 | 1441d6fc9544042bc404d5c7efffd119fce33aa7 | refs/heads/master | 2021-08-26T05:37:15.851025 | 2021-08-11T22:35:23 | 2021-08-11T22:35:23 | 129,974,006 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | import requests
def main():
url = 'http://httpbin.org/get'
response = requests.get(url)
if response.status_code == 200:
response_json = response.json() # Dictionary
origin = response_json['origin']
print(origin)
if __name__ == '__main__':
main()
| [
"user.nuage@gmail.com"
] | user.nuage@gmail.com |
69443fd6ce76067dbc1afcbe80e1509d0d32777d | 411a2fc51902aef288a7382aa3d4950132c2ba7d | /prob_015.py | 679c21e047a18f465bc60e6850e0f504ab847378 | [] | no_license | bdjackson/ProjectEuler | c73a1f55a9e7995f8674b02dc486bbd433cb6ea7 | 3517c39ee5ae3e90499ced2ca47b94bef3f2fb8f | refs/heads/master | 2021-01-01T19:47:17.902466 | 2013-11-03T19:07:39 | 2013-11-03T19:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | #!/usr/bin/env python
# ============================================================================
import sys
# ===========================================================================
# = http://projecteuler.net/problem=15 =
# = - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - =
# = How paths are there in a 20x20 grid (21x21 nodes) without backtracking =
# ===========================================================================
# ----------------------------------------------------------------------------
def dumpGrid(grid):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print grid
for r in xrange(len(grid)):
line = ""
for c in xrange(len(grid[r])):
line += '%4s' % grid[r][c]
print line
# ----------------------------------------------------------------------------
def getNumPaths(rows, cols):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
grid = []*(rows+1)
for r in xrange(rows+1):
grid.append([0]*(cols+1))
for r in xrange(rows, -1, -1):
for c in xrange(cols, -1, -1):
print '%d - %d' % (r, c)
right = 0 if c+1 >= cols+1 else grid[r][c+1]
down = 0 if r+1 >= rows+1 else grid[r+1][c]
grid[r][c] = right + down
if grid[r][c] == 0:
grid[r][c] = 1
dumpGrid(grid)
print 'A %dx%d grid has %d paths without backtracking' % ( rows
, cols
, grid[0][0]
)
# ============================================================================
def main():
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
getNumPaths(1,1)
print '====================================================='
getNumPaths(2,2)
print '====================================================='
getNumPaths(3,3)
print '====================================================='
getNumPaths(4,4)
print '====================================================='
getNumPaths(20,20)
# ============================================================================
if __name__ == "__main__":
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sys.exit( main() )
# ============================================================================
# ============
# = solution =
# ============
# 137846528820
| [
"bjack3@gmail.com"
] | bjack3@gmail.com |
45de4b48e69e78beab1276afde0493a05412c0fc | de0bf3ddc4fedc14a24e085007d4babe598d2e66 | /generate_col_num.py | a30edd1329a0d56d907ba9f3750e6b63255e1cf6 | [] | no_license | Scandium627/usseful_data_fuction | aa5531d71b4f163a3580a9b9302bf55dc5298095 | 476671e9a5117b587d3dfc6f605e483841c7db1c | refs/heads/master | 2022-08-15T08:11:34.602656 | 2020-05-24T08:09:05 | 2020-05-24T08:09:05 | 208,982,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import os
import pandas as pd
import csv
def get_files(file_dir):
list_fife = []
for root, dirs, files in os.walk(file_dir):
for name in files:
if name.endswith('py') or name.endswith('css') or name.endswith('html') or name.endswith('js') or name.endswith('vue') or name.endswith('sh'):
list_fife.append(os.path.join(root, name))
return list_fife
def open_file(file_name):
#if file_name.endswith('py') or file_name.endswith('css') or file_name.endswith('html'):
try:
data = pd.read_table(file_name)
list_out = data.values.T.tolist()[0]
#list_out.insert(0,file_name.replace(project_dir , ''))
list_out.append('\n')
return list_out
except:
try:
list_out = []
with open(file_name,"r", encoding= 'utf-8') as file1:
#list_out.append(file_name.replace(project_dir, '') + '\n')
list_out.append('\n')
for row in file1.readlines():
list_out.append(row)
list_out.append('\n')
return list_out
except:
print('camnt',file_name)
return []
def build_input_col_num(input_num):
output = " "
rep = len(str(input_num))
return str(input_num)+output[rep:]
if __name__ == '__main__':
max_num = 178388
front_file = 'front_file.txt'
last_file = 'last_file.txt'
with open(r'{a}_front_40_col_num.txt'.format(a=front_file.split('.')[0]),'w',newline= '', encoding='gb18030') as file_witre:
num_front = 1
list_for_write = open_file(front_file)
for write_line in list_for_write:
file_witre.write(build_input_col_num(num_front) +str(write_line)+ '\n')#+ '\n'
num_front += 1
print(front_file,num_front)
with open(r'{a}_last_40_col_num.txt'.format(a=last_file.split('.')[0] ),'w', newline= '', encoding='gb18030') as file_witre:
list_for_write = open_file(last_file)
len_list = len(list_for_write)
num_last = max_num - len_list + 1
for write_line in list_for_write:
file_witre.write(build_input_col_num(num_last) +str(write_line)+ '\n')
num_last += 1
print(last_file,num_last)
| [
"360134299@qq.com"
] | 360134299@qq.com |
44c46ba93a07fded493503c407a780d5ac75ad25 | 11d8e5c2ea583b837469491b25c5f186368e0b78 | /test/test.py | d4e137565c58df5475b8473dd7a041ca2d254395 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sloria/pattern | dcd77a9e2de9d31677c28208d4c5a0b7dbae55c8 | d1d719540f195f3028e24f4d2e536d73e9fef210 | refs/heads/master | 2021-01-18T08:48:02.400336 | 2013-09-22T14:56:25 | 2013-09-22T14:56:25 | 11,158,672 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import os, sys; sys.path.insert(0, os.path.join(".."))
import unittest
import test_metrics
import test_web
import test_db
import test_de
import test_en
import test_es
import test_fr
import test_it
import test_nl
import test_text
import test_search
import test_vector
import test_graph
#---------------------------------------------------------------------------------------------------
# Run all tests.
# pattern.db tests require a valid username and password for MySQL.
# pattern.web tests require a working internet connection
# and API license keys (see pattern.web.api.py) for Google and Yahoo API's.
def suite():
suite = unittest.TestSuite()
suite.addTest(test_metrics.suite())
suite.addTest(test_web.suite())
suite.addTest(test_db.suite(host="localhost", port=3306, username="root", password="root"))
suite.addTest(test_de.suite())
suite.addTest(test_en.suite())
suite.addTest(test_es.suite())
suite.addTest(test_fr.suite())
suite.addTest(test_it.suite())
suite.addTest(test_nl.suite())
suite.addTest(test_text.suite())
suite.addTest(test_search.suite())
suite.addTest(test_vector.suite())
suite.addTest(test_graph.suite())
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite()) | [
"tom@organisms.be"
] | tom@organisms.be |
b415842158a3d373c9b8c0d3a087ea55ea60a8a9 | f4bf81d4e80468331a09401dbaeef12465aca853 | /lib/python/helpers/profiler/prof_util.py | b529b5ece1393da4c6c1318d21f61e7e9cfb71d6 | [] | no_license | nottyo/intellibot | 45c41d673608a0a1291c6387f9d33ef449f18837 | 0547d987deaad90260abe33db5284eae9704eb9b | refs/heads/master | 2020-12-30T23:59:29.795725 | 2017-04-10T07:53:59 | 2017-04-10T07:53:59 | 86,574,980 | 1 | 0 | null | 2017-03-29T11:37:54 | 2017-03-29T11:37:53 | null | UTF-8 | Python | false | false | 3,343 | py | __author__ = 'traff'
import threading
import os
import sys
import tempfile
from _prof_imports import Stats, FuncStat, Function
try:
execfile=execfile #Not in Py3k
except NameError:
#We must redefine it in Py3k if it's not already there
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
# It seems that the best way is using tokenize.open(): http://code.activestate.com/lists/python-dev/131251/
import tokenize
stream = tokenize.open(file) # @UndefinedVariable
try:
contents = stream.read()
finally:
stream.close()
#execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents+"\n", file, 'exec'), glob, loc)
def save_main_module(file, module_name):
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
setattr(m, '__loader__', getattr(sys.modules[module_name], '__loader__'))
m.__file__ = file
return m
class ProfDaemonThread(threading.Thread):
def __init__(self):
super(ProfDaemonThread, self).__init__()
self.setDaemon(True)
self.killReceived = False
def run(self):
self.OnRun()
def OnRun(self):
pass
def generate_snapshot_filepath(basepath, local_temp_dir=False, extension='.pstat'):
basepath = get_snapshot_basepath(basepath, local_temp_dir)
n = 0
path = basepath + extension
while os.path.exists(path):
n+=1
path = basepath + (str(n) if n>0 else '') + extension
return path
def get_snapshot_basepath(basepath, local_temp_dir):
if basepath is None:
basepath = 'snapshot'
if local_temp_dir:
basepath = os.path.join(tempfile.gettempdir(), os.path.basename(basepath.replace('\\', '/')))
return basepath
def stats_to_response(stats, m):
if stats is None:
return
ystats = Stats()
ystats.func_stats = []
m.ystats = ystats
for func, stat in stats.items():
path, line, func_name = func
cc, nc, tt, ct, callers = stat
func = Function()
func_stat = FuncStat()
func.func_stat = func_stat
ystats.func_stats.append(func)
func_stat.file = path
func_stat.line = line
func_stat.func_name = func_name
func_stat.calls_count = nc
func_stat.total_time = ct
func_stat.own_time = tt
func.callers = []
for f, s in callers.items():
caller_stat = FuncStat()
func.callers.append(caller_stat)
path, line, func_name = f
cc, nc, tt, ct = s
caller_stat.file = path
caller_stat.line = line
caller_stat.func_name = func_name
caller_stat.calls_count = cc
caller_stat.total_time = ct
caller_stat.own_time = tt
# m.validate()
| [
"traitanit.hua@ascendcorp.com"
] | traitanit.hua@ascendcorp.com |
ce54e8f4e1cf1e79e32ead00677082d80c671d90 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/utr.py | c49bc1d2bc8027dc3b30a202a0820fc69fe61ea5 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uTR':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
b542ddb755239f61ca7234b388116278a6242a35 | c163e238d0a0818a613fe107471e7d824febd280 | /users/migrations/0001_initial.py | bfb13135d37b1cdf7a5aa6a7e2761d46ee7e1063 | [] | no_license | Thxpatoxx/brigido | c6302bee8e353e0b68c6f98b4b96f899b8de9d98 | 58a5c3bdda15ce2f5acc6400251faadef3552d8a | refs/heads/master | 2020-09-12T22:27:40.537696 | 2019-11-19T02:54:40 | 2019-11-19T02:54:40 | 222,579,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,464 | py | # Generated by Django 2.2.7 on 2019-11-19 00:19
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rut', models.CharField(max_length=20)),
('nombre', models.CharField(max_length=20)),
('apellido', models.CharField(max_length=20)),
('direccion', models.CharField(max_length=50)),
('telefono', models.IntegerField()),
('correo_electronico', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Proveedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=20)),
('rut', models.CharField(max_length=20)),
('persona_contacto', models.CharField(max_length=20)),
('telefono', models.IntegerField()),
('direccion', models.CharField(max_length=50)),
('rubro', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Venta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('monto_pago', models.IntegerField()),
('detalle_venta', models.TextField()),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Cliente')),
],
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=20)),
('precio', models.IntegerField()),
('marca', models.CharField(max_length=20)),
('existencia_actual', models.IntegerField()),
('cod_familia', models.IntegerField()),
('fecha_vencimiento', models.IntegerField()),
('proveedor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Proveedor')),
],
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('detalle_pedido', models.TextField()),
('proveedor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Proveedor')),
],
),
migrations.CreateModel(
name='Credito',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('monto_pago', models.IntegerField()),
('estado', models.CharField(choices=[('CANCELADA', 'CANCELADA'), ('PENDIENTE', 'PENDIENTE')], default='DISPONIBLE', max_length=80)),
('detalle_venta', models.TextField()),
('deudor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Cliente')),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"tu@ejemplo.com"
] | tu@ejemplo.com |
7a542dffb43bea50b4904ad35af6bd28f8eb9309 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/read_pyexcal_20201111165203.py | 37ff68e7a0fcf5143221bdcfbab09e0d81af48b8 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from openpyxl import load_workbook
# wb = load_workbook(filename = 'empty_book.xlsx')
# sheet_ranges = wb['range names']
# print(sheet_ranges['D18'].value)
# wb = load_workbook(filename= "empty_book.xlsx")
# sheet_ranges = wb['range names']
# print(sheet_ranges["D12"].value)
wb = load_workbook(filename = "empty_book.xlsx")
sheet_ranges = wb['Test3']
print(sheet_ranges['F1'].value)
for i in range(1,9):
print(sheet_ranges.cell(column=i,row=i).value | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
313a803ee823c95518610dc7129331ac6979b773 | f200708b8e5a67074f6c805a736311e9b1637532 | /django_base/mysite3/bookstore/urls.py | 76d264928b1d3498fb92d6293e248ff485af607f | [] | no_license | vivid-ZLL/tedu | 48b78951eae07f5f0433ba85f7cc4e07cd76011d | 319daf56d88e92f69ee467e0ccf83c01367ed137 | refs/heads/master | 2021-04-23T22:40:11.391574 | 2020-01-25T11:26:56 | 2020-01-25T11:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^add_book", views.add_view),
url(r"^add_auth",views.auth_view),
]
| [
"283438692@qq.com"
] | 283438692@qq.com |
ee45c47184689072aa55d84bb11c3d5e6ca2b91b | 40a1ca8ddbdcd96a58703913f98b29b435a42745 | /antipodes average.py | 99638ba6aad58bc25581fe72ccc0789c1d462ff9 | [] | no_license | GaganDureja/Algorithm-practice | 3eaca2cfc03fcee3671b87b5efda1f950fd36212 | d40e08287754594d016801a093becc3f69f4bcc1 | refs/heads/master | 2023-05-06T11:58:35.471799 | 2021-06-01T03:49:58 | 2021-06-01T03:49:58 | 292,361,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #Link: https://edabit.com/challenge/oF8T7Apf7jfagC4fD
def antipodes_average(lst):
l = len(lst)//2
left = lst[:l]
right = lst[-l:][::-1]
return [(a+b)/2 for a,b in zip(left,right)]
print(antipodes_average([1, 2, 3, 4])) | [
"gagandureja675@gmail.com"
] | gagandureja675@gmail.com |
4463a41f0e2d05814c9a3f223ac97c262569bd68 | f8dd8d046100f1223713e047074f30c7ce5a59cd | /testing/epilogue/utils/cache_utils.py | 144e4fb20c3dfeaab976cb3fcc6260d51ccc9cdd | [] | no_license | dotslash227/98fitcortex | 57aed99270799eff68fdff62db0b8c1d9aabd4a2 | bd4002151e5def00c3dea1f5a1abfb06ba3e809a | refs/heads/master | 2022-12-17T00:51:20.302948 | 2019-02-27T13:54:22 | 2019-02-27T13:54:22 | 197,362,824 | 0 | 0 | null | 2022-12-08T00:02:42 | 2019-07-17T09:55:14 | HTML | UTF-8 | Python | false | false | 3,107 | py | import functools
import datetime
import types
from django.utils import timezone
from django.core.cache import cache
KEY_TEMPLATE = "%d_%s" #Template for cache "userid_key"
modules = types.SimpleNamespace()
modules.SLEEP_LOGS = "sleep_logs"
modules.SLEEP_AGGREGATE = "sleep_aggregate"
modules.SLEEP = "sleep"
modules.ACTIVITY_LOGS = "activity_logs"
modules.ACTIVITY_AGGREGATE = "activity_aggregate"
modules.ACTIVITY = "activity"
modules.DIET_DASHBOARD_STRING = "diet_dashboard_string"
modules.DIET_PLAN = "diet_plan"
modules.DIET = "diet"
@functools.lru_cache()
def get_cache_key(user, module):
'''
Get Cache key for the user for the module specified
'''
data = {
modules.SLEEP_LOGS : get_sleep_logs_cache_key,
modules.SLEEP_AGGREGATE : get_sleep_aggregate_cache_key,
modules.ACTIVITY_LOGS : get_activity_logs_cache_key,
modules.ACTIVITY_AGGREGATE : get_activity_aggregate_cache_key,
modules.DIET_DASHBOARD_STRING : get_diet_dashboard_string_cache_key,
modules.SLEEP : get_sleep_cache_keys,
modules.DIET_PLAN : get_dietplan_cache_key,
modules.DIET : get_diet_cache_keys
}
return data.get(module)(user)
@functools.lru_cache()
def get_sleep_logs_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.SLEEP_LOGS
)
@functools.lru_cache()
def get_sleep_aggregate_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.SLEEP_AGGREGATE
)
@functools.lru_cache()
def get_activity_logs_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.ACTIVITY_LOGS
)
@functools.lru_cache()
def get_activity_aggregate_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.ACTIVITY_AGGREGATE
)
@functools.lru_cache()
def get_diet_dashboard_string_cache_key(user):
return KEY_TEMPLATE %(
user.id, modules.DIET_DASHBOARD_STRING
)
@functools.lru_cache()
def get_dietplan_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.DIET_PLAN
)
@functools.lru_cache()
def get_sleep_cache_keys(user):
'''
Return all the cache keys for a user belonging to a particular module
'''
return [
KEY_TEMPLATE%(user.id, e) for e in [modules.SLEEP_LOGS, modules.SLEEP_AGGREGATE]
]
@functools.lru_cache()
def get_diet_cache_keys(user):
'''
Return all the keys for a user belonging to diet module
'''
return [
KEY_TEMPLATE%(user.id, e) for e in [
modules.DIET_DASHBOARD_STRING, modules.DIET_PLAN
]
]
def get_time_to_midnight(time = None):
'''
Return the seconds to coming midnight
'''
if not time:
time = datetime.datetime.now( tz = timezone.get_current_timezone())
mid = time.replace(hour = 0, minute = 0, second = 0)
return 86400 - (time - mid).seconds
def invalidate_cache(user, module):
'''
Invalidate Caches of a module for a user
'''
key = get_cache_key(user, module)
if isinstance(key, list):
return cache.delete_many(key)
elif isinstance(key, str):
return cache.delete(key)
return
| [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
25c41d6ca45c55f09b718638c22bb3d7861e20cd | 66a530b297725b1a2d1c95f95883145c04614ae1 | /0x08-python-more_classes/6-rectangle.py | fa82234524d1f9de24bfbba4566ad040c31042c0 | [] | no_license | Yagomfh/holbertonschool-higher_level_programming | 4e6f28186eae18eaba60017fe49ac446a02cbdc5 | 1d15597a6040a8ee15b08447c478d0a2e79b5854 | refs/heads/main | 2023-04-23T18:23:28.096644 | 2021-05-18T08:12:27 | 2021-05-18T08:12:27 | 319,253,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | #!/usr/bin/python3
"""This module defines a rectangle"""
class Rectangle():
"""This class defines a empty rectangle
"""
number_of_instances = 0
def __init__(self, width=0, height=0):
"""Initialise a rectangle
Args:
width (int): width of the rectangle
height (int): height of the rectangle
"""
self.width = width
self.height = height
Rectangle.number_of_instances += 1
def __str__(self):
"""Output for end user: a rectangle
Returns: a string
"""
res = ""
for y in range(self.height):
for x in range(self.width):
res += '#'
if y != self.height - 1:
res += '\n'
return res
def __repr__(self):
"""Output for developer: info about the class and its storage in mem
Returns: a string
"""
return 'Rectangle(%d, %d)' % (self.width, self.height)
def __del__(self):
"""Deletes a rectangle"""
Rectangle.number_of_instances -= 1
print("Bye rectangle...")
def area(self):
"""Function that returns the area of the rectangle"""
return self.width * self.height
def perimeter(self):
"""Function that returns the perimeter of a rectangle"""
if self.width == 0 or self.height == 0:
return 0
return (self.width * 2) + (self.height * 2)
@property
def width(self):
"""Gets/sets width of the rectangle"""
return self.__width
@width.setter
def width(self, value):
if isinstance(value, int) is False:
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@property
def height(self):
"""Gets/sets the height of the rectangle"""
return self.__height
@height.setter
def height(self, value):
if isinstance(value, int) is False:
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
| [
"yagomfh@gmail.com"
] | yagomfh@gmail.com |
01302c0d283cbbef5b6f0169d78465af038bf961 | 27044bb88c709e7ffa5278afc7c81f37e0b6e9e4 | /venv/lib/python3.10/site-packages/markdown_it/ruler.py | c71264120d1f324a87b4d9d9255195460684a7f9 | [] | no_license | mesaye85/organika_Inventory_project-with-React- | 48c93efb6aba64d5e9310c57e4c5a06d3f2cc502 | 6aa3d29e8be3e22b8dc9163d558cdcc8c9122fd1 | refs/heads/main | 2023-02-19T10:11:47.880754 | 2023-02-14T01:55:43 | 2023-02-14T01:55:43 | 298,101,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/7b/f1/76/7fc82ff783abb8aa49b96dfce8dee15abcd460b56b76733317d2895dad | [
"67935330+mesaye85@users.noreply.github.com"
] | 67935330+mesaye85@users.noreply.github.com |
e26cb78ff75c0518fef95e5020ff201bdd5bfd96 | afd7207ec79198ed8b515c66a4ff951692fc5756 | /Backend/users/views.py | 005de0eee1fc2083a3fa4f7aa1f26b3fb1605619 | [] | no_license | mdarifulislamroni21/Backend-project | 469e58ee1c8395a56f45434efc238eccd2adea77 | 4a999c7cb520c811fb0a051015822944f5d8479d | refs/heads/master | 2023-06-24T19:55:57.562157 | 2021-07-23T08:39:37 | 2021-07-23T08:39:37 | 388,731,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | from django.shortcuts import render
from form.models import main_user,buyproduct
from form.form import main_users,buyproducts
# Create your views here.
def s_user(request):
load_main_user=main_user.objects.order_by('user_name')
diction={'main_user_data':load_main_user}
return render(request,'pages/main_page/view_user.html',context=diction)
def view_user(request,user_id):
load_main_user=main_user.objects.get(pk=user_id)
load_buyproduct=buyproduct.objects.filter(user_name=user_id)
diction={'main_user_data':load_main_user,'buyproduct_data':load_buyproduct}
return render(request,'pages/argument/view_user.html',context=diction)
def edid_user(request,user_ids):
load_main_user=main_user.objects.get(pk=user_ids)
load_main_form=main_users(instance=load_main_user)
diction={'edid_user_form':load_main_form}
if request.method == 'POST':
submit_data=main_users(request.POST,instance=load_main_user)
if submit_data.is_valid():
submit_data.save(commit=True)
diction.update({'success':'Thank You! Your User Data Changed Successfully'})
return render(request,'pages/argument/edid_user.html',context=diction)
#######
def edid_product(request,product_id):
load_product=buyproduct.objects.get(pk=product_id)
load_product_form=buyproducts(instance=load_product)
diction={'edid_product':load_product_form}
if request.method == 'POST':
load_product_data=buyproducts(request.POST,instance=load_product)
if load_product_data.is_valid():
load_product_data.save(commit=True)
diction.update({'success':'Thank You! Your Product Data Changed Successfully'})
return render(request,'pages/argument/edid_product.html',context=diction)
| [
"mdarifulislamroni21@gmail.com"
] | mdarifulislamroni21@gmail.com |
3ab3b1e47de5a98b1b9f8d73aaef599497dfd142 | 3f01eb21ce140e6e8d6e9f6c037a0ed3acfd0e1b | /Project_Test_GEOJSON.py | 2c5c2aa244467c86920f0fcf229c09a9a7a1789c | [
"MIT"
] | permissive | manushah17/Capstone_2019 | 01d45e3d8f925dac88c1911d853ec1b8762d5b1f | 381094fc778906810e13d7611bfdb2c74cac326e | refs/heads/master | 2022-12-16T21:08:29.385969 | 2019-09-07T15:08:42 | 2019-09-07T15:08:42 | 206,984,224 | 0 | 0 | MIT | 2022-12-08T01:22:56 | 2019-09-07T15:04:22 | HTML | UTF-8 | Python | false | false | 6,594 | py | import pandas as pd
import googlemaps
import json
import datetime
from pandas import DataFrame
import logging
import os
from shapely.geometry import shape, Point
import psycopg2
from django.conf import settings
from googlemaps import Client
#TODO - MAP THE DATABASE CREDENTIALS USING ENV VARIABLES
#Get lat long details of all US counties in json format
GOOGLE_MAPS_API_KEY = os.environ.get('GOOGLE_MAPS_API_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_ACCESS_KEY_ID=os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY=os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
dirname = os.path.dirname(__file__)
county_file = os.path.join(dirname,'home/static/GEOJSON/USCounties_final.geojson')
district_file = os.path.join(dirname,'home/static/GEOJSON/ID2.geojson')
output_filename = os.path.join(dirname,'home/static/GEOJSON/Project.geojson') #The file will be saved under static/GEOJSON
currentDT = datetime.datetime.now()
#TODO - MAP THE DATABASE CREDENTIALS USING ENV VARIABLES
#Get lat long details of all US counties in json format
# conn = psycopg2.connect("dbname=postgres user=postgres password=admin")
conn = psycopg2.connect(user="fhhzsyefbuyjdp",
password="e13f9084680555f19d5c0d2d48dd59d4b8b7a2fcbd695b47911335b514369304",
host="ec2-75-101-131-79.compute-1.amazonaws.com",
port="5432",
database="dal99elrltiq5q",
sslmode="require")
#Another way of querying the database
df = pd.read_sql_query("SELECT project_name,pe.name as engagement_type, pa.name as activity_type, pro.description,ay.academic_year, semester, total_uno_students, total_uno_hours, total_k12_students, total_k12_hours, total_other_community_members, total_uno_faculty,total_economic_impact, other_details, outcomes, pc.name as community_partner, p.name as campus_partner, hm.mission_name as mission ,pp.mission_type as mission_type, ps.name as status, pro.address_line1 as Address_Line1, pro.address_line2, pro.city as City, pro.state as State, pro.zip as Zip, part_comm.community_type , uc.college_name FROM projects_project pro left join projects_projectcommunitypartner proCommPartnerLink on pro.id = proCommPartnerLink.project_name_id inner join partners_communitypartner pc on proCommPartnerLink.community_partner_id = pc.id left join projects_projectcampuspartner proCampPartnerLink on pro.id=proCampPartnerLink.project_name_id inner join partners_campuspartner p on proCampPartnerLink.campus_partner_id = p.id left join projects_projectmission pp on pro.id = pp.project_name_id inner join home_missionarea hm on pp.mission_id = hm.id left join projects_engagementtype pe on pro.engagement_type_id = pe.id left join projects_activitytype pa on pro.activity_type_id = pa.id left join projects_academicyear ay on pro.academic_year_id = ay.id left join projects_status ps on pro.status_id = ps.id inner join university_college uc on p.college_name_id = uc.id inner join partners_communitytype part_comm on pc.community_type_id = part_comm.id", con=conn)
conn.close()
gmaps = Client(key=GOOGLE_MAPS_API_KEY)
collection = {'type': 'FeatureCollection', 'features': []}
df['fulladdress'] = df[["address_line1", "city","state"]].apply(lambda x: ' '.join(x.astype(str)), axis=1)
with open(district_file) as f:
geojson = json.load(f)
district = geojson["features"]
#
def feature_from_row(Projectname, Engagement, Activity, Description, Year, College, Campus, Community, Mission,CommunityType, Address, City, State, Zip):
feature = {'type': 'Feature', 'properties': {'Project Name': '', 'Engagement Type': '', 'Activity Type': '',
'Description': '', 'Academic Year': '',
'Legislative District Number':'','College Name': '',
'Campus Partner': '', 'Community Partner':'', 'Mission Area':'','Community Partner Type':'',
'Address Line1':'', 'City':'', 'State':'', 'Zip':''},
'geometry': {'type': 'Point', 'coordinates': []}
}
if (Address != "N/A"):
geocode_result = gmaps.geocode(Address)
if (geocode_result[0]):
latitude = geocode_result[0]['geometry']['location']['lat']
longitude = geocode_result[0]['geometry']['location']['lng']
feature['geometry']['coordinates'] = [longitude, latitude]
coord = Point([longitude, latitude])
for i in range(len(district)): # iterate through a list of district polygons
property = district[i]
polygon = shape(property['geometry']) # get the polygons
if polygon.contains(coord): # check if a partner is in a polygon
feature['properties']['Legislative District Number'] = property["properties"][
"id"] # assign the district number to a partner
feature['properties']['Project Name'] = Projectname
feature['properties']['Engagement Type'] = Engagement
feature['properties']['Activity Type'] = Activity
feature['properties']['Description'] = Description
feature['properties']['Academic Year'] = Year
feature['properties']['College Name'] = College
feature['properties']['Campus Partner'] = Campus
feature['properties']['Community Partner'] = Community
feature['properties']['Mission Area'] = Mission
feature['properties']['Community Partner Type']=CommunityType
feature['properties']['Address Line1'] = Address
feature['properties']['City'] = City
feature['properties']['State'] = State
feature['properties']['Zip'] = Zip
collection['features'].append(feature)
return feature
geojson_series = df.apply(lambda x: feature_from_row(x['project_name'], x['engagement_type'], x['activity_type'], x['description'],x['academic_year'], x['college_name'], x['campus_partner'], x['community_partner'],x['mission'],x['community_type'], str(x['address_line1']), str(x['city']), str(x['state']), str(x['zip'])), axis=1)
jsonstring = pd.io.json.dumps(collection)
print("Project GeoJSON "+ repr(len(df)) + " records are generated at "+ str(currentDT))
with open(output_filename, 'w') as output_file:
output_file.write(format(jsonstring)) | [
"manushah@unomaha.edu"
] | manushah@unomaha.edu |
e8dd6d093db04e14c283f3fd1568fca70c9c39bb | b8b26feac86b66b0b534996cf9c3fbf7ec660240 | /codejam/18/0q/b-trouble-sort.py | 1eb82f601040fa07b503be04eeaee11988becb31 | [
"MIT"
] | permissive | neizod/problems | 775fffe32166c5b124d0e4c973b8d0aba7f3900b | 180aaf7d0ecfc3d0dd5f1d4345a7a4d83b1b884a | refs/heads/master | 2021-07-08T12:30:31.100320 | 2021-05-26T09:34:19 | 2021-05-26T09:34:19 | 6,245,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python3
from itertools import count
def trouble_sort(ls):
ls[0::2] = sorted(ls[0::2])
ls[1::2] = sorted(ls[1::2])
return ls
def check_trouble_sort(ls):
ls = trouble_sort(ls)
it = iter(ls); next(it)
for index, a, b in zip(count(), ls, it):
if a > b:
return index
return -1
def main():
for case in range(int(input())):
input()
ls = [int(n) for n in input().split()]
answer = check_trouble_sort(ls)
if answer == -1:
answer = 'OK'
print('Case #{}: {}'.format(case+1, answer))
if __name__ == '__main__':
main()
| [
"neizod@gmail.com"
] | neizod@gmail.com |
ba38f0f9fe91ce95584b1cdebbee4b4300ddff82 | 31c75012d7b1e86bc12c0b3bcc811e6b6acb5e6f | /tests/test_utils_pcap_utils.py | e1c9248d6c474a433d0df011161937ba1ffd3d10 | [
"Apache-2.0"
] | permissive | lostminty/NetML-arff | 46b3228ef233e00141894f34245acbb896a4f9fa | 11e933f8772f8502f5b45acf226eaab08abfb090 | refs/heads/master | 2023-04-07T10:42:49.202946 | 2020-01-06T21:42:49 | 2020-01-06T21:42:49 | 228,263,765 | 0 | 0 | Apache-2.0 | 2023-03-31T14:49:53 | 2019-12-15T22:45:41 | Python | UTF-8 | Python | false | false | 2,268 | py | from networkml.parsers.pcap.pcap_utils import extract_macs
from networkml.parsers.pcap.pcap_utils import extract_protocol
from networkml.parsers.pcap.pcap_utils import extract_session_size
from networkml.parsers.pcap.pcap_utils import is_external
from networkml.parsers.pcap.pcap_utils import is_private
from networkml.parsers.pcap.pcap_utils import is_protocol
from networkml.parsers.pcap.pcap_utils import packet_size
def test_extract_macs():
source, dest = extract_macs('123456789ABCDEF123456780')
assert dest == '12:34:56:78:9A:BC'
assert source == 'DE:F1:23:45:67:80'
def test_is_private():
private = is_private('10.10.10.10')
assert private == True
private = is_private('1.2.3.4')
assert private == False
private = is_private('192.168.1.1')
assert private == True
private = is_private('192.169.1.1')
assert private == False
private = is_private('172.16.4.4')
assert private == True
private = is_private('172.15.1.3')
assert private == False
private = is_private('172.32.3.1')
assert private == False
private = is_private('2014::1')
assert private == False
private = is_private('fe80::1')
assert private == True
private = is_private('fd13::13')
assert private == True
private = is_private('asdf')
assert private == False
def test_packet_size():
packet = ['0', '1234567890123456789012345678901234567890']
size = packet_size(packet)
assert size == 13398
def test_extract_session_size():
session = [['0', '1234567890123456789012345678901234567890']]
session_size = extract_session_size(session)
assert session_size == 13398
def test_extract_protocol():
session = [['0', '12345678901234567890123456789012345678901234567890']]
protocol = extract_protocol(session)
assert protocol == '78'
def test_is_external():
external = is_external('10.10.10.10', '192.168.0.1')
assert external == False
external = is_external('10.10.10.10', '1.2.3.4')
assert external == True
def test_is_protocol():
session = [['0', '12345678901234567890123456789012345678901234567890']]
protocol = is_protocol(session, '78')
assert protocol == True
protocol = is_protocol(session, 78)
assert protocol == False
| [
"clewis@iqt.org"
] | clewis@iqt.org |
23c31ae42243e1603913f6239aa8310c2ae362a8 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Twitter/Tweets/StatusesDestroy.py | 07e9c00de2b583db36cbd869f08bd139b4004dfe | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 4,450 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# StatusesDestroy
# Deletes a specified status.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class StatusesDestroy(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the StatusesDestroy Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(StatusesDestroy, self).__init__(temboo_session, '/Library/Twitter/Tweets/StatusesDestroy')
def new_input_set(self):
return StatusesDestroyInputSet()
def _make_result_set(self, result, path):
return StatusesDestroyResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return StatusesDestroyChoreographyExecution(session, exec_id, path)
class StatusesDestroyInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the StatusesDestroy
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(StatusesDestroyInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(StatusesDestroyInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(StatusesDestroyInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(StatusesDestroyInputSet, self)._set_input('ConsumerSecret', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The numerical ID of the status to delete.)
"""
super(StatusesDestroyInputSet, self)._set_input('ID', value)
def set_TrimUser(self, value):
"""
Set the value of the TrimUser input for this Choreo. ((optional, boolean) When set to true, each tweet returned in a timeline will include a user object including only the status authors numerical ID.)
"""
super(StatusesDestroyInputSet, self)._set_input('TrimUser', value)
class StatusesDestroyResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the StatusesDestroy Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class StatusesDestroyChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return StatusesDestroyResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
d3fe008be694769fe5816841e56399d04c7fc6fe | 8f70ad12af7eba07efa52eb29b8f99ed3900dbb9 | /AGTGA data/AGTGA/LifeHack/LifeHack 2/TestSuite/TestSuite/TestCase06.py | 19733c6a1b7ac5041c28934fdfc987459aa9461d | [] | no_license | Georgesarkis/AGTGARowData | 768952dc03dc342bcbe0902bf2fb1720853d0e14 | e1faa7dc820b051a73b0844eac545e597a97da16 | refs/heads/master | 2022-10-01T17:06:04.758751 | 2020-06-05T07:25:41 | 2020-06-05T07:25:41 | 267,772,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from TestSuite.TestSuiteHelper import ElementFinder
port = 'http://localhost:4723/wd/hub'
driver = webdriver.Remote(command_executor=port, desired_capabilities={'automationName' : 'UiAutomator2','deviceName': 'Moto G (5)','platformName': 'Android', 'app': 'C:/Users/ze0396/Desktop/AGTGA/APKS/LifeHack.apk' , 'autoGrantPermissions' : 'true', 'appWaitActivity' : '*.*','fullreset' : 'false','noReset' : 'true' } )
time.sleep(2)
time.sleep(2)
el = ElementFinder(driver, 66,1260)
el.click()
time.sleep(2)
el = ElementFinder(driver, 198,370)
el.click()
time.sleep(2)
el = ElementFinder(driver, 918,87)
el.click()
driver.press_keycode(3)
driver.close_app()
driver.quit()
print('TestCase finished successfully') | [
"32592901+Georgesarkis@users.noreply.github.com"
] | 32592901+Georgesarkis@users.noreply.github.com |
2d8d20e898416a657352704a147c29c99f0df41e | 18508cea9458b2879017b44e6f18520cd8cf4f6c | /UCMDBPython/src/netutils.py | 0617df9d5148b86b64c6df36f77062f8a832eb45 | [] | no_license | kvt11/dd-git | 7d4935962e06d835ad0023c4abb185876a5a9e77 | 49aafa7081b861c5f6d0e1753b425e78948116d0 | refs/heads/master | 2022-11-23T19:03:19.763423 | 2016-04-04T14:54:18 | 2016-04-04T14:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,069 | py | #coding=utf-8
"""
This library can be used to retrieve network and TCP information, such as getting the Operating
system name, checking if a MAC Address is valid, check if an IP Address is valid etc.
"""
from __future__ import with_statement
from contextlib import contextmanager
from java.net import InetAddress, URL
from java.net import Socket
from java.net import InetSocketAddress
from java.net import SocketTimeoutException
from java.lang import Exception as JException
from java.io import IOException
from java.util import Properties
from appilog.common.utils import IPv4
from org.apache.commons.httpclient.methods import GetMethod, HeadMethod
from org.apache.commons.httpclient import HttpClient
from com.hp.ucmdb.discovery.library.communication.downloader.cfgfiles import (
KnownPortsConfigFile, PortInfo)
from com.hp.ucmdb.discovery.library.clients import ClientsConsts
from com.ziclix.python.sql import PyConnection
from java.lang import Class, Thread
from java.net import UnknownHostException
from com.hp.ucmdb.discovery.library.communication.downloader import ConfigFilesManagerImpl
from com.hp.ucmdb.discovery.library.common import CollectorsParameters
from appilog.common.system.types import ObjectStateHolder
from appilog.common.system.types.vectors import StringVector
from appilog.common.utils import RangeType
import logger
import re
import sys
import string
import ip_addr
import dns_resolver
import shellutils
def getHostAddress(dnsName, defaultValue=None):
"""
@deprecated: Use SocketDnsResolver().resolve_ips(dnsName)
Resolves the host name to the IP Address in DNS table.
On failure to resolve the name, the default value parameter is returned.
@param dnsName: host name to resolve
@type dnsName: string
@param defaultValue: the value to return if the host name cannot be resolved
@return: IP address of the resolved host name
@rtype: string
"""
if dnsName and dnsName.strip():
try:
return str(InetAddress.getByName(dnsName).getHostAddress())
except:
logger.debugException('Could not resolve DNS name: ', dnsName)
return defaultValue
def getHostName(ipAddress, defaultValue=None):
"""
@deprecated: Use SocketDnsResolver().resolve_hostnames(ip_address)
Resolves the IP address to the host name in the DNS table.
On failure to resolve the address the , default value parameter is returned.
@param ipAddress: IP address to resolve
@type ipAddress: string
@param defaultValue: the value to return if the address cannot be resolved
@return: host name of the resolved IP address
@rtype: string
"""
if isValidIp(ipAddress):
try:
dnsName = str(InetAddress.getByName(ipAddress).getHostName())
if dnsName == ipAddress:
return defaultValue
else:
return dnsName
except:
defultHostName = defaultValue
if defultHostName == None:
defultHostName = 'null'
logger.debug('Could not resolve DNS name for : ', ipAddress,
',returning default value:', defultHostName,
';reason:', str(sys.exc_info()[1]))
return defaultValue
def isValidIp(ipAddr, filter_client_ip=None):
"""
@deprecated: this method only supports IPv4,
use ip_addr.isValidIpAddress if you need IPv6 support
Checks whether the given IP address is a valid IPv4 address.
@param ipAddr: IP address to check
@type ipAddr: string
@return: true if the IP is valid IPv4 address, else false
@rtype: Boolean
"""
if ipAddr and filter_client_ip and DOMAIN_SCOPE_MANAGER.isClientIp(ipAddr):
return None
# in some cases windows machines report such IP address for DHCP Server
# because of misconfiguration
if ipAddr and ipAddr.strip() == '255.255.255.255':
return None
if ipAddr and ipAddr.strip() == '0.0.0.0':
return None
return IPv4.isValidIp(ipAddr)
def convertAsciiCodesToHex(asciiSeq):
"""
Converts chars in given string from hex codes to ascii symbols
e.g. '4A415641' -> 'JAVA'
@param asciiSeq: hex string for conversion
@rtype: string
@note: Don't be frustrated by the name of function!
'4A415641' - is ascii codes; returned value 'JAVA' - is a hex string
"""
seq = list(asciiSeq)
return ''.join([chr(int(''.join(l), 16)) for l in zip(seq[0::2], seq[1::2])])
def parseMac(origmac):
"""
Parses the given macAddress and converts it to the system format I{XXXXXXXXXXXX},
where X is an uppercase hexadecimal digit.
@param origmac: raw or formated MAC address
@type origmac: string
@return: parsed MAC address in the converted format
@rtype: string
@raise ValueError: If address has invalid format
"""
if origmac.isdigit() and len(origmac) > 12:
origmac = convertAsciiCodesToHex(origmac)
# Clean up any whitespace
# Make all the characters upper case
mac = origmac.strip().upper()
# remove all leading 0x in ocets
mac = mac.replace('0X', '')
macRe = r'\A([0-9A-F]{1,2}[-]?){4,8}\Z|\A([0-9A-F]{1,2}[:]?){4,8}\Z'
m = re.search(macRe, mac)
if m:
compactedParts = re.findall('[0-9A-F]{1,2}', mac)
compactedMac = ''
for part in compactedParts:
if len(part) == 1:
part = '0%s' % part
compactedMac += part
if (len(compactedMac) in (8, 12, 16)):
return compactedMac
raise ValueError('Failed parsing MAC address: ' + origmac)
_IGNORED_INTERFACE_MACS = ("204153594EFF", "33506F453030", "505054503030", "444553540000", "444553544200", "001986002B48", "020054554E01", "7A7700000001",\
"BAD0BEEFFACE", "00F1D000F1D0", "80000404FE80", "000613195800", "7A8020000200", "FEFFFFFFFFFF", "028037EC0200", "8000600FE800",\
"0200CAFEBABE", "020054746872", "000FE207F2E0", "020255061358", "080058000001", "000419000001", "002637BD3942", "025041000001",\
"009876543210", "582C80139263", "00ADE1AC1C1A", "02004C4F4F50", "444553547777")
_IGNORED_INTERFACE_MAC_PATTERNS = ('^0{6}', '0{6}$', 'F{12}', '^00FF[\dA-F]{8}', '^0250F200000[\dA-F]', '^000AFA020[\dA-F]00', '^00E00900000[\dA-F]',\
'^02BF[\dA-F]{8}', '^5455434452[\dA-F]{2}', '^020000000[\dA-F]{3}', '^00A0D5FFFF[\dA-F]{2}', '^005056C0000[\dA-F]',\
'^00059A3C7[\dA-F]00', '^001C4200000[\dA-F]')
def isVirtualMac(mac):
"""
Checks whether the specified MAC address is in the list of virtual , ignored by default macs.
@param mac: MAC address to check
@type mac: string
@return: 1 if MAC is virtual, 0 otherwise
@rtype: integer
"""
try:
mac = parseMac(mac)
if mac in _IGNORED_INTERFACE_MACS:
logger.warn('Interface with MAC: %s is virtual as it\'s in the ignored list' % mac)
return 1
for ignorePattern in _IGNORED_INTERFACE_MAC_PATTERNS:
if re.search(ignorePattern, mac):
logger.warn('Interface with MAC: %s is virtual as it\'s in the ignored pattern list' % mac)
return 1
return 0
except:
logger.warn('Mac %s is invalid.' % mac)
return 0
def isValidMac(mac, ifType='ethernetCsmacd'):
"""
Checks whether the specified MAC address is valid.
@param mac: MAC address to check
@type mac: string
@return: 1 if MAC is valid, 0 otherwise
@rtype: integer
"""
if ifType != 'ethernetCsmacd':
return 0
try:
mac = parseMac(mac)
# "204153594EFF", "33506F453030", "505054503030" - are virtual
# and must be ignored since they create lots of noise in the system
if mac in _IGNORED_INTERFACE_MACS:
logger.warn('Interface with MAC: %s is ignored as it\'s in the ignored list' % mac)
return 0
for ignorePattern in _IGNORED_INTERFACE_MAC_PATTERNS:
if re.search(ignorePattern, mac):
return 0
return 1
except:
return 0
def parseNetMask(origmask):
"""
Parses the supplied network mask into the common format of %d.%d.%d.%d
@param origmask: NetMask address to parse
@type origmask: string
@return: parsed NetMask address
@rtype: string
"""
# Clean up any spaces on the ends
mask = string.strip(origmask)
# Make all the characters upper case
mask = string.upper(mask)
# Is the mask in hex? convert it to the traditional ip format
if(re.match('[0-9A-F]{8}\s*', mask)):
nmask = ''
m = re.match('(..)(..)(..)(..)', mask)
for i in range(4):
x = m.group(i + 1)
nmask = nmask + str(int(x, 16))
if(i != 3):
nmask = nmask + '.'
mask = nmask
if(re.match('\d+\.\d+\.\d+\.\d+', mask)):
return mask
else:
raise Exception('Failed to parse invalid network mask: ' + origmask)
def getOSName(client, cmd):
#~~~ What's cmd?
"""
Retrieves the operating system name.
@param client: pre-connected shell client
@type client: shellClinet
@return: operating system name
@rtype: string
"""
r = client.executeCmd(cmd)
if r == None:
return ''
osname = ''
if(re.search('Microsoft Windows', r)):
osname = 'Windows'
else:
lines = r.split('\n')
uname = ''
for line in lines:
if(not re.search('uname', line)):
uname = line
break
if(len(uname) < 3):
osname = ''
else:
token = string.split(uname)
os_name = token[0]
if(re.search('SunOS', os_name)):
osname = 'SunOS'
elif(re.search('Linux', os_name)):
osname = 'Linux'
elif(re.search('FreeBSD', os_name)):
osname = 'FreeBSD'
elif(re.search('HP-UX', os_name)):
osname = 'HP-UX'
elif(re.search('AIX', os_name)):
osname = 'AIX'
elif re.search('VMkernel', os_name):
osname = 'VMkernel'
elif re.search('Darwin', os_name):
osname = 'MacOs'
elif re.search('OpenBSD', os_name):
osname = 'OpenBSD'
else:
logger.debug('unknown OS: ' + os_name)
osname = ''
if osname == '':
if r.find('SunOS') != -1:
osname = 'SunOS'
elif r.find('Linux') != -1:
osname = 'Linux'
elif r.find('FreeBSD') != -1:
osname = 'FreeBSD'
elif r.find('HP-UX') != -1:
osname = 'HP-UX'
elif r.find('AIX') != -1:
osname = 'AIX'
elif r.find('VMkernel') != -1:
osname = 'VMkernel'
elif r.find('Darwin') != -1:
osname = 'MacOs'
elif r.find('OpenBSD') != -1:
osname = 'OpenBSD'
else:
logger.debug('unknown OS: ' + r)
return osname
def isLoopbackIp(ip):
"""
@deprecated: use ip_addr.IPAddress(ip).is_loopback
Checks whether the specified IP is loopback (assumes the given IP is *valid*).
Loopback IPs are any IP which starts with '127.'
@param ip: IP address to check
@type ip: String
@return: 1 if the IP is loopback, else 0
@rtype: int
"""
return ip.startswith('127.')
def isRoutableIp(ipObj):
"""
ip_addr.IPAddress -> bool
"""
return not (ipObj.is_loopback or ipObj.is_unspecified or
ipObj.get_is_link_local())
def isLocalIp(ip):
"""
@deprecated: Use "(ip.is_loopback or ip.is_unspecified)"
where ip is ip_addr.IPAddress
Checks whether the specified IP is local (assumes the given IP is *valid*).
Local IPs are any IP which starts with '127.' or '0.'
@param ip: IP address to check
@type ip: String or ip_addr
@return: 1 if the IP is local, else 0
@rtype: int
"""
return ip.startswith('0.') or ip.startswith('127.')
def isPrivateIp(ip):
"""
@deprecated: Use ip.is_private where ip is ip_addr.IPAddress
Checks is the specified IP belongs to private network (assumes the given IP is *valid*).
Private IP addresses described in RFC 1918 (Private Address Space section)
They are:
10.0.0.0 - 10.255.255.255 (10/8 prefix)
172.16.0.0 - 172.31.255.255 (172.16/12 prefix)
192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
@param ip: IP address to check
@type ip: String
@return: 1 if the IP belongs to private network, else 0
@rtype: int
"""
if ip.startswith('10.') or ip.startswith('192.168.'):
return 1
low_172_ip = convertIpToInt('172.16.0.0')
high_172_ip = convertIpToInt('172.31.255.255')
int_ip = convertIpToInt(ip)
return low_172_ip <= int_ip <= high_172_ip
def getAvailableProtocols(Framework, PROT_TYPE, IP, DOMAIN=None):
"""
Returns available protocols of desired type defined in domain scope
document for concrete IP
"""
protocols = Framework.getAvailableProtocols(IP, PROT_TYPE)
preferredCredId = Framework.loadState()
if preferredCredId is not None and preferredCredId in protocols:
tmp = [preferredCredId]
for protocol in protocols:
if protocol != preferredCredId:
tmp.append(protocol)
protocols = tmp
return protocols
def resolveFQDN(shell, ip):
'''
@deprecated: use
dns_resolver.NsLookupDnsResolver and
dns_resolver.SocketDnsResolver
Resolves fqdn of a host by ip.
NsLookup is used first and then @netutils.getHostName used on fallback
@types: ip -> str?
'''
fqdn = None
if isValidIp(ip):
dnsResolver = dns_resolver.NsLookupDnsResolver(shell)
fqdn = dnsResolver.resolve_hostnames(ip)
if not fqdn:
try:
hostnames = dns_resolver.SocketDnsResolver().resolve_hostnames(ip)
if hostnames:
fqdn = hostnames[0]
except:
logger.warn('Failed to resolve host IP through socket')
else:
fqdn = fqdn[0]
return fqdn
def resolveIP(shell, hostName):
'''
Resolves ip address of a host by its name(dns)
NsLookup is used first and then destinations' hosts file on fallback
@types: Shell, str -> str?
'''
dnsResolver = DNSResolver(shell)
ip = None
try:
ips = dnsResolver.resolveIpByNsLookup(hostName)
ip = ips and ips[0] or dnsResolver.resolveHostIpByHostsFile(hostName)
except:
logger.warn('Failed to resolve host ip throught nslookup')
if not ip:
ip = getHostAddress(hostName)
return ip
@contextmanager
def _create_http_client_wrapper():
from com.hp.ucmdb.discovery.library.clients.http import ApacheHttpClientWrapper as HttpClientWrapper
client = HttpClientWrapper()
try:
yield client
finally:
client.close()
def isUrlAvailable(url, acceptedStatusCodeRange, timeout=10000):
'''
Checks whether url is available
str, list(str), int -> bool
'''
from com.hp.ucmdb.discovery.library.clients import SSLContextManager
from com.hp.ucmdb.discovery.library.clients.http import ApacheHttpClientWrapper as HttpClientWrapper
if not url or not acceptedStatusCodeRange:
return 0
with _create_http_client_wrapper() as client:
client.setSocketTimeout(timeout)
try:
jurl = URL(url)
if jurl.getProtocol() == 'https':
port = jurl.getPort() or HttpClientWrapper.DEFAULT_HTTPS_PORT
context = SSLContextManager.getAutoAcceptSSLContext()
client.registerProtocol(context, port)
except:
logger.warn('Failed parsing url % ' % url)
try:
httpResult = client.get(url)
return httpResult.statusCode in acceptedStatusCodeRange
except:
logger.warn('Get Failed: %s' % logger.prepareJavaStackTrace())
return 0
def doHttpGet(url, timeout=20000, requestedData='body', headerName=None):
"""
Performs HTTP(S) Connection to the specified URL
Returns data according to the requestedData flag:
'body': Full Response Body as String
'header': Returns the response header with the specified headerName
"""
if requestedData == 'header':
method = HeadMethod(url)
else:
method = GetMethod(url)
client = HttpClient()
client.getHttpConnectionManager().getParams().setConnectionTimeout(timeout)
client.getHttpConnectionManager().getParams().setSoTimeout(timeout)
client.executeMethod(method)
if (requestedData == 'body'):
return method.getResponseBodyAsString()
elif (requestedData == 'header'):
if headerName:
return method.getResponseHeader(headerName)
else:
result = method.getResponseHeaders()
return ''.join([s.toString() for s in result])
else:
raise ValueError('Response part %s in not supported' % requestedData)
def checkTcpConnectivity(ipAddress, portNumber, timeout):
"""
Checks the TCP connection to the given ipAddress on the given port.
@param ipAddress: IP address of the remote computer to check
@type ipAddress: String
@param portNumber: The port number to check
@type portNumber: int
@param timeout: connection timeout in millisecondes
@type timeout: int
@return 1 if the TCP connection succeeded, else
"""
socket = None
try:
socket = Socket()
socket.connect(InetSocketAddress(ipAddress, portNumber), timeout)
logger.debug('Connected to port:', portNumber, ' on host by ip:',
ipAddress)
#check that the object is not null
if (socket != None):
try:
#try to close the socket
socket.close()
except:
#we failed to close the socket - let's log it...
logger.debug('Failed to close socket')
return 1
except IOException, e:
logger.debug('Failed to connect to port:', portNumber,
' on host by ip:', ipAddress,
' IOException(', e.getMessage(), ')')
return 0
except SocketTimeoutException, e:
logger.debug('Failed to connect to port:', portNumber,
' on host by ip:', ipAddress,
' SocketTimeoutException(', e.getMessage(), ')')
return 0
def pingIp(Framework, ipAddress, timeout):
"""
Ping the specified device
@param ipAddress: the IP address to ping
@type ipAddress: string
@param timeout: ping timeout in milliseconds
@type timeout: int
@return: 1 if the machine is pingable, else 0
"""
properties = Properties()
properties.setProperty('timeoutDiscover', timeout)
properties.setProperty('retryDiscover', '1')
client = Framework.createClient(ClientsConsts.ICMP_PROTOCOL_NAME, properties)
_ipForICMPList = []
_ipForICMPList.append(ipAddress)
res = client.executePing(_ipForICMPList)
if (res == None) or len(res) == 0:
#the ping failed
return 0
#we succeeded to connect to the machine
return 1
def isIpBroadcast(ipAddress, netmask):
"""
Checks whether the given IP is a broadcast IP
@param ipAddress: IP address to check
@type ipAddress: string
@param netmask: corresponding IP network mask
@type netmask: string
@return: boolean
"""
bcast = None
if ipAddress and netmask and isValidIp(ipAddress):
netMask = parseNetMask(netmask)
parsedIp = IPv4(ipAddress, netMask)
if netMask != "255.255.255.255" and netMask != "255.255.255.254":
broadcastIp = parsedIp.getLastIp()
if parsedIp == broadcastIp:
bcast = 1
return bcast
def isIpAnycast(ipAddress, interfaceName):
"""
Checks whether the given IP is anycast
Anycast is a network addressing and routing scheme whereby data is routed
to the "nearest" or "best" destination as viewed by the routing topology
Can be obtained in following way: non-local IP on loop-back interface
@param ipAddress: IP address to check
@type ipAddress: string
@param interfaceName: name of the network interface
@type interfaceName: string
"""
return (ipAddress
and interfaceName
and (re.match('lo.*', interfaceName, re.IGNORECASE)
or re.search('.*Loopback.*', interfaceName))
and not isLocalIp(ipAddress))
def getPortDescription(portNumber, portType):
"""
Return port name for the given port number and type.
@param portNumber: The port number
@param portType: The port type (TCP / UDP)
@return: String the description
"""
portConfig = ConfigFilesManagerImpl.getInstance().getConfigFile(CollectorsParameters.KEY_COLLECTORS_SERVERDATA_PORTNUMBERTOPORTNAME)
return portConfig.getPortNameByNumberAndType(int(portNumber), portType)
def getDBConnection(userName, password, driverName, connectionURL):
"""
Return PyConnection to the database (see Jython's zxJDBC description)
@param userName: the username to connect to DB with
@param password: the password to connect to DB with
@param driverName: name of the driver to connect through
@return: com.ziclix.python.sql.PyConnection
"""
jdbcDriver = Class.forName(driverName, 1,
Thread.currentThread().getContextClassLoader()).newInstance()
props = Properties()
props.put('user', userName)
props.put('password', password)
return PyConnection(jdbcDriver.connect(connectionURL, props))
def getLeadingOnesCount(number):
"""
Returns the count of 1 in binary representation of given number
@type number: integer
@rtype: integer
"""
if number > 0:
return (number % 2) + getLeadingOnesCount(number >> 1)
else:
return 0
def getShortMask(netmask):
"""
Returns the count of set bit in given network mask
e.g. for mask '255.255.255.0' return value will be 24
@type netmask: string
@rtype: integer
"""
shortMask = 0
if IPv4.isValidIp(netmask):
octets = netmask.split('.')
for octet in octets:
shortMask += getLeadingOnesCount(int(octet))
return shortMask
def decodeSubnetMask(routingPrefix, isIpV4=1):
'''
@precondition: IpV6 is not supported
Decode routing prefix to IpV4 (dotDecimal) mask or IpV6 mask
@param routingPrefix: routing prefix - natural numbers
@param isIpV4: if true dot-decimal mask will be used instead of IpV6
@return: subnet mask
@rtype: string
'''
if not routingPrefix in range(0, 33):
raise ValueError('routingPrefix should be in 0..32 range for ipV4')
subNetMask = None
if isIpV4:
routingPrefix = int(routingPrefix)
bitStr = '%s%s' % ('1' * routingPrefix, '0' * (32 - routingPrefix))
iByte = 8
octets = []
for i in xrange(0, len(bitStr), iByte):
octet = bitStr[i: i + iByte]
octets.append(int(octet, 2))
subNetMask = ('%s.%s.%s.%s' % tuple(octets))
else:
raise Exception('IpV6 mask decoding is not implemented')
return subNetMask
def obtainDotDecimalTuple(cidrNotationBlock):
'''
@precondition: only IPv4 is supported
Obtain subnet mask from cidr notation.
@param cidrNotationAddress: address string in CIDR notation (xxx.xxx.xxx.xxx/xx)
@return: subnet mask in dot-decimal notation or None
@rtype: string
'''
result = None
#- split address into ip-address and routing prefix
if cidrNotationBlock and cidrNotationBlock.count('/'):
(ip, prefix) = cidrNotationBlock.split('/')
result = (prefix.isdigit()
and (ip, decodeSubnetMask(int(prefix)))
or None)
return result
def convertIpToInt(ipString):
"""
Transforms IP address from string representation to numeric
@param ipString: given IP or network mask to transform
@type ipString: string
@rtype: integer
"""
return reduce(lambda value, token:
value * 256 + long(token), ipString.split('.'), 0)
def getLowestIp(listIp):
"""
Getting minimal IP from list of IPs
@note: if list of local IPs passed return the latest one
@param listIp: list of IPs from which need to find minimal
@type listIp: list(str)
@rtype: str
@raise ValueError: list of IPs is empty
"""
if listIp:
smallIp = None
intSmallIp = convertIpToInt('255.255.255.255')
for ip in filter(None, listIp):
ip = ip.strip()
latestLocalIp = None
# Trying to detect if this ip is local
if isLocalIp(ip):
latestLocalIp = ip
# Note: if nslookup returns few local addresses will be returns the last one
# if ip is not local and less than current minimal IP
if not isLocalIp(ip) and convertIpToInt(ip) < intSmallIp:
# same as minimal
smallIp = ip
intSmallIp = convertIpToInt(smallIp)
# if nslookup returns only local IPs - return the latest one
result = smallIp or latestLocalIp
#if no valid ip passed
if result == '255.255.255.255':
raise ValueError('Passed list does not contain valid IPs')
return result
raise ValueError('Passed empty list of IPs')
def negateNetMask(mask):
"""
Basing on integer representation of network mask returns MAXIMAL count of
IPs which can be addressed in given network
@param mask: network mask in integer representation
@type mask: integer
@see: convertIpToInt
@rtype: integer
"""
negvalue = ~(0)
for _ in xrange(32 - getLeadingOnesCount(mask)):
negvalue = negvalue << 1
return long(~negvalue)
def getNetworkClassByNetworkMask(networkMask):
if networkMask is not None:
prefixLength = getShortMask(networkMask)
if prefixLength is not None:
return getNetworkClassByNetworkPrefix(prefixLength)
return None
def getNetworkClassByNetworkPrefix(networkPrefix):
if networkPrefix is not None:
if networkPrefix >= 24:
return "C"
if networkPrefix >= 16:
return "B"
if networkPrefix >= 8:
return "A"
return None
class ProtocolType:
TCP_PROTOCOL = PortInfo.TCP_PROTOCOL
UDP_PROTOCOL = PortInfo.UDP_PROTOCOL
def values(self):
return (ProtocolType.TCP_PROTOCOL, ProtocolType.UDP_PROTOCOL)
class _PortType:
'Identify port type in application layer of OSI'
def __init__(self, name):
self.__name = name
def getName(self):
r'@types: -> str'
return self.__name
def __eq__(self, other):
r'@types: _PortType -> bool'
return (other and isinstance(other, _PortType)
and self.getName() == other.getName())
def __ne__(self, other):
r'@types: _PortType -> bool'
return not self.__eq__(other)
def __str__(self):
return self.__name
def __repr__(self):
return '_PortType(%s)' % self.__name
def __hash__(self):
return hash(self.__name)
class _PortTypeEnum:
def __init__(self, **portTypes):
# initialize set of defined protocol types once
# make validation
if filter(lambda pt: not isinstance(pt, _PortType), portTypes.values()):
raise ValueError("Value of wrong type specified")
self.__portTypeByName = portTypes
def __getattr__(self, name):
value = self.__portTypeByName.get(name)
if value:
return value
raise AttributeError
def contains(self, portType):
r'@types: _PortType -> bool'
return self.__portTypeByName.values().count(portType) > 0
def merge(self, otherEnum):
r'@types: _PortTypeEnum -> _PortTypeEnum'
if not isinstance(otherEnum, _PortTypeEnum):
raise ValueError("Wrong enum type")
extended = self.items().copy()
extended.update(otherEnum.items())
return _PortTypeEnum(**extended)
def items(self):
return self.__portTypeByName.copy()
def values(self):
r'@types: -> list[_PortType]'
return self.__portTypeByName.values()
def findByName(self, signature):
r''' Find port type by name
@types: str -> _PortType or None'''
if signature:
for pt in self.values():
if signature.strip().lower() == pt.getName():
return pt
PortTypeEnum = _PortTypeEnum(
HTTP=_PortType('http'),
HTTPS=_PortType('https'),
SNMP=_PortType('snmp'),
SMTP=_PortType('smtp'))
class Endpoint:
def __init__(self, port, protocol, address, isListen=0,
portType=None):
r'''@types: number, ProtocolType, str, bool, _PortType
@raise ValueError: Port is not specified
@raise ValueError: Protocol is incorrect
@raise ValueError: Address is not specified
If both portType and portTypes are specified, portTypes takes prevalence
'''
self.__port = int(port)
if not protocol in ProtocolType().values():
raise ValueError("Protocol is incorrect")
self.__protocol = protocol
if not (address and str(address).strip()):
raise ValueError("Address is not specified")
self.__address = address
self.__isListen = isListen
self.__portType = portType
def getPort(self):
r'''@types: -> int'''
return self.__port
def getAddress(self):
r'@types: -> str'
return self.__address
def isListen(self):
r'@types: -> bool'
return self.__isListen
def getPortType(self):
r'@types: -> _PortType'
return self.__portType
def getProtocolType(self):
r'@types: -> int'
return self.__protocol
def __repr__(self):
return "Endpoint('%s', %s)" % (self.getAddress(), self.getPort())
def __eq__(self, other):
isEq = (isinstance(other, Endpoint)
and self.getPort() == other.getPort()
and self.getAddress() == other.getAddress())
return isEq
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.getPort(), self.getAddress()))
def createTcpEndpoint(address, port, portType=None):
r'@types: str, number, _PortType, [_PortType] -> Endpoint'
return Endpoint(port, ProtocolType.TCP_PROTOCOL, address,
portType=portType)
def updateEndpointAddress(endpoint, address):
r'@types: Endpoint, str -> Endpoint'
return Endpoint(endpoint.getPort(), endpoint.getProtocolType(), address,
endpoint.getPortType())
class ConnectivityEndpoint:
def __init__(self, key, endPointList):
r'@types: object, list[netutils.Endpoint]'
if key is not None:
self.__key = key
else:
raise ValueError("key is empty or None")
if endPointList:
self.__endPointList = endPointList
else:
raise ValueError("endPointList is None")
def getKey(self):
return self.__key
def getEndpoints(self):
return self.__endPointList
class BaseEndpointBuilder:
r'Base builder for endpoint as we have two types: URI and IP service endpoint'
def visitEndpoint(self, endpoint):
r'@types: netutils.Endpoint -> ObjectStateHolder'
raise NotImplementedError()
class UriEndpointBuilder(BaseEndpointBuilder):
def visitEndpoint(self, endpoint):
r'@types: netutils.Endpoint -> ObjectStateHolder'
osh = ObjectStateHolder('uri_endpoint')
uri = "%s:%s" % (endpoint.getAddress(), endpoint.getPort())
osh.setAttribute('uri', uri)
return osh
class ServiceEndpointBuilder(BaseEndpointBuilder):
@staticmethod
def updateServiceNames(osh, service_names):
if not service_names:
raise ValueError('Invalid service_names')
osh.setAttribute('service_names', StringVector(service_names))
return osh
def visitEndpoint(self, endpoint):
r'''
@types: netutils.Endpoint -> ObjectStateHolder
@raise ValueError: Not supported protocol type
@raise ValueError: Invalid IP address
'''
address = endpoint.getAddress()
if not isinstance(address, (ip_addr.IPv4Address, ip_addr.IPv6Address)):
address = ip_addr.IPAddress(address)
ipServerOSH = ObjectStateHolder('ip_service_endpoint')
uri = "%s:%s" % (address, endpoint.getPort())
ipServerOSH.setAttribute('ipserver_address', uri)
ipServerOSH.setAttribute('network_port_number', endpoint.getPort())
if endpoint.getProtocolType() == ProtocolType.TCP_PROTOCOL:
portType = ('tcp', 1)
elif endpoint.getProtocolType() == ProtocolType.UDP_PROTOCOL:
portType = ('udp', 2)
else:
raise ValueError("Not supported protocol type")
ipServerOSH.setAttribute('port_type', portType[0])
ipServerOSH.setEnumAttribute('ipport_type', portType[1])
ipServerOSH.setAttribute('bound_to_ip_address', str(address))
self.__setServiceNames(ipServerOSH, endpoint)
return ipServerOSH
def __setServiceNames(self, ipServerOSH, endpoint):
if endpoint.getPortType():
serviceName = str(endpoint.getPortType())
ipServerOSH.setStringAttribute('ip_service_name', serviceName)
serviceNamesList = StringVector((serviceName,))
ipServerOSH.setAttribute('service_names', serviceNamesList)
def setNameAttr(self, ipEndpointOSH, value):
ipEndpointOSH.setAttribute('name', value)
class EndpointReporter:
def __init__(self, builder):
r'@types: EndpointBuilder'
if not builder:
raise ValueError("Endpoint builder is not specified")
self.__builder = builder
def reportEndpoint(self, endpoint, containerOsh):
r'''@types: Endpoint, ObjectStateHolder -> ObjectStateHolder
@raise ValueError: Endpoint is not specified
@raise ValueError: Container is not specified
'''
if not endpoint:
raise ValueError("Endpoint is not specified")
if not containerOsh:
raise ValueError("Container is not specified")
osh = self.__builder.visitEndpoint(endpoint)
osh.setContainer(containerOsh)
return osh
def reportHostFromEndpoint(self, endpoint):
r'''@types: Endpoint -> ObjectStateHolder
@raise ValueError: Endpoint is not specified
@raise ValueError: Invalid IP address
'''
if not endpoint:
raise ValueError("Endpoint is not specified")
if not ip_addr.isValidIpAddressNotZero(endpoint.getAddress()):
raise ValueError("Invalid IP address")
exec("import modeling")
return modeling.createHostOSH(str(endpoint.getAddress())) # @UndefinedVariable
WINDOWS_HOSTS_CONFIG = '%SystemRoot%\system32\drivers\etc\hosts'
UNIX_HOSTS_CONFIG = '/etc/hosts'
def __convertIpsToStrings(ipObjectsList):
return [str(ip) for ip in ipObjectsList]
def __filterOutIpv6(ipAddressList):
'''
leaves only IPv4 addresses
'''
return filter(isValidIp, ipAddressList)
class DNSResolver:
"""
Class responsible for getting nslookup results on a client's machine
"""
def __init__(self, shell):
self.shell = shell
def resolveIpByNsLookup(self, dnsName, dnsServer=''):
"""
@deprecated:
Use dns_resolver.NsLookupDnsResolver(shell, dns_server=dnsServer).resolve_ips(dnsName)
Resolves (or not) IP addresses by given machine name
@param dnsName: the machine name to resolve IPs
@type dnsName: string
@param dnsServer: the dns server used
@rtype: list
"""
resolver = dns_resolver.NsLookupDnsResolver(self.shell, dns_server=dnsServer)
ipAddressList = __filterOutIpv6(__convertIpsToStrings(resolver.resolve_ips(dnsName)))
return ipAddressList
def resolveHostIp(self, hostName, dnsServer=''):
"""
@deprecated: This method is broken, you should always expect
multiple addresses, use dns_resolver.NsLookupDnsResolver
Resolves (or not) IP addresses by given machine name
Returns the lowest ip amongst resolved or None
@param dnsName: the machine name to resolve IPs
@type dnsName: string
@rtype: string
"""
resolvedIp = None
resolvedIps = self.resolveIpByNsLookup(hostName, dnsServer=dnsServer)
if resolvedIps:
try:
resolvedIp = getLowestIp(resolvedIps)
except:
logger.warnException('Failed to find a minimal IP in the %s' % resolvedIps)
return resolvedIp
def resolveFQDNByNsLookup(self, dnsName):
"""
@deprecated: Use dns_resolver.NsLookupDnsResolver
Resolves (or not) FQDN by given machine name
@param dnsName: the machine name to resolve FQDN
@type dnsName: string
@rtype: string
"""
resolver = dns_resolver.NsLookupDnsResolver(self.shell)
fqdn = resolver.resolve_fqdn(dnsName)
return fqdn
def resolveDnsNameByNslookup(self, ipAddr, dnsServer=''):
"""
@deprecated: Use dns_resolver.NsLookupDnsResolver
Resolves (or not) machine DNS name by given IP
@param dnsName: the machine name to resolve IPs
@type dnsName: string
@rtype: string
@return: IP address if resolved; None if not resolved
"""
resolver = dns_resolver.NsLookupDnsResolver(self.shell)
dnsNames = resolver.resolve_hostnames(ipAddr)
if dnsNames:
return dnsNames[0]
def resolveHostIpByHostsFile(self, dnsName):
"""
@deprecated: Use HostsFileDnsResolver, expect multiple addresses
Resolves (or not) machine DNS name by given IP using system's "hosts" file
@param dnsName: the machine name to resolve IPs
@type dnsName: string
@rtype: string
@return: IP address if resolved; None if not resolved
"""
resolver = dns_resolver.HostsFileDnsResolver(self.shell)
ips = __filterOutIpv6(__convertIpsToStrings(resolver.resolve_ips(dnsName)))
if ips:
return ips[0]
class IpResolver:
"""
@deprecated: use dns_resolver.SocketDNSResolver or dns_resolver.NSLookup
and handle multiple addresses
Class responsible for resolving IP addresses on probe machine's side
"""
def __init__(self, remoteDnsAddress, framework):
self.remoteDnsAddress = remoteDnsAddress
self.framework = framework
self.localShell = None
def resolveHostIpWithLocalDns(self, hostName):
"""
Resolves (or not) IP address by given machine name
@param hostName: the machine name to resolve IPs
@type hostName: string
@rtype: string
"""
try:
return InetAddress.getByName(hostName).getHostAddress()
except UnknownHostException:
pass
def resolveHostIpWithRemoteDns(self, hostName, remoteDns):
"""
Resolves (or not) IP address by given machine name using nslookup command on probe machine
@param hostName: the machine name to resolve IP
@type hostName: string
@param remoteDns: the remate DNS name (or IP) to resolve host IP
@type remoteDns: string
@rtype: string
"""
if not self.localShell:
self.localShell = shellutils.ShellUtils(self.getLocalShell())
if self.localShell:
resolver = DNSResolver(self.localShell)
return resolver.resolveHostIp(hostName, dnsServer=remoteDns)
def resolveHostIp(self, hostName):
"""
Tries to resolve host IP using resolveHostIpWithLocalDns and resolveHostIpWithRemoteDns
methods (in fall-back order)
@param hostName: the machine name to resolve IP
@type hostName: string
@rtype: string
"""
resultIp = self.resolveHostIpWithLocalDns(hostName)
if not resultIp:
resultIp = self.resolveHostIpWithRemoteDns(hostName, self.remoteDnsAddress)
if not resultIp:
logger.debug("Failed to resolve IP for host '%s'" % hostName)
return resultIp
def getLocalShell(self):
"""
Creates and caches local shell client.
Must not be used outside of class.
"""
try:
return self.framework.createClient(ClientsConsts.LOCAL_SHELL_PROTOCOL_NAME)
except:
logger.errorException('Failed to create LocalShell client')
def close(self):
"""
Closes local shell client.
Have to be called after usage of IpResolver
"""
if self.localShell is not None:
try:
self.localShell.close()
self.localShell = None
except:
pass
ResolveException = dns_resolver.ResolveException
class BaseDnsResolver:
'Base class for DNS resolvers'
_HOSTNAME_RESOLVE_EXCEPTION = dns_resolver._HOSTNAME_RESOLVE_EXCEPTION
_IP_RESOLVE_EXCEPTION = dns_resolver._IP_RESOLVE_EXCEPTION
def resolveHostnamesByIp(self, ip):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve hostname
'''
raise NotImplementedError()
def resolveIpsByHostname(self, hostname):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve IP
'''
raise NotImplementedError()
class FallbackResolver(BaseDnsResolver):
'''
Implementation of DNS resolving using fallback approach against different resolvers
'''
def __init__(self, resolvers):
self.__resolvers = resolvers
def resolveHostnamesByIp(self, ip):
'''
Call for each resolver resolveHostnamesByIp and if it was failed with ResolveException,
call next resolver
@types: method, *args -> list(str)
@param: method - method wich will be call for each resolver
@param: *args - arguments for the method
'''
for resolver in self.__resolvers:
try:
return resolver.resolveHostnamesByIp(ip)
except ResolveException, re:
logger.warn(str(re))
raise self._HOSTNAME_RESOLVE_EXCEPTION
def resolveIpsByHostname(self, hostname):
'''
Call for each resolver method and if it was failed with ResolveException,
call next resolver
@types: method, *args -> None
method - method wich will be call for each resolver
*args - arguments for the method
'''
for resolver in self.__resolvers:
try:
return resolver.resolveIpsByHostname(hostname)
except ResolveException, re:
logger.warn(str(re))
raise self._IP_RESOLVE_EXCEPTION
def createDefaultFallbackResolver(shell=None):
resolvers = [JavaDnsResolver()]
if shell is not None:
resolvers.append(DnsResolverByShell(shell))
return FallbackResolver(resolvers)
class DnsResolverByShell(BaseDnsResolver):
def __init__(self, shell, dnsServerAddress=None):
'@types: Shell, str, str'
self.__shell = shell
self.__dnsResolver = DNSResolver(shell)
ipResolver = IpResolver(dnsServerAddress, None)
self.__ipResolver = ipResolver
# next two lines is temporary solution for functionality reuse of
# IpResolver which creates local shell client, so to prevent such
# behaviour we work with some sort of shell - it can be or local shell
# or remote shell
ipResolver.localShell = shell
if hasattr(shell, 'execCmd'):
shell.executeCmd = shell.execCmd
def resolveHostnamesByIp(self, ip):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve hostname
'''
if not ip:
raise ValueError("IP is not specified")
dnsName = None
try:
dnsName = self.__dnsResolver.resolveDnsNameByNslookup(ip)
except Exception, ex:
logger.debugException(str(ex))
raise self._HOSTNAME_RESOLVE_EXCEPTION
if not dnsName:
raise self._HOSTNAME_RESOLVE_EXCEPTION
return [dnsName]
def resolveIpsByHostname(self, hostname):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve IPs
@note: When resolved IP is local (loopback) it will be replaced with
destination IP address if such was specified
while initializing this DNS resolver
'''
if not hostname:
raise ValueError("Hostname is not specified")
try:
ip = self.__ipResolver.resolveHostIp(hostname)
except Exception, ex:
logger.debugException(str(ex))
raise self._IP_RESOLVE_EXCEPTION
if not ip:
raise self._IP_RESOLVE_EXCEPTION
return [ip]
def createDnsResolverByShell(shell, dnsServerAddress=None):
r''' Factory method to create DNS resolver
@types: Shell -> BaseDnsResolver'''
return DnsResolverByShell(shell, dnsServerAddress)
class JavaDnsResolver(BaseDnsResolver):
'''
DNS Resolver that uses java API - InetAddress
@deprecated: use dns_resolver.LocalDnsResolver
'''
def resolveHostnamesByIp(self, ip):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve hostnames
'''
if not ip:
raise ValueError("Ip is not specified")
dnsName = None
try:
dnsName = str(InetAddress.getByName(ip).getHostName())
except JException, je:
logger.debug(str(je))
raise self._HOSTNAME_RESOLVE_EXCEPTION
if not dnsName or dnsName == ip:
raise self._HOSTNAME_RESOLVE_EXCEPTION
return [dnsName]
def resolveIpsByHostname(self, hostname):
'''@types: str -> list[str]
@raise ResolveException: Failed to resolve IPs
@note: When resolved IP is local (loopback) it will be replaced
with destination IP address if such was specified
while initializing this DNS resolver
'''
if not hostname:
raise ValueError("hostname is not specified")
ip = None
try:
ip = str(InetAddress.getByName(hostname).getHostAddress())
except JException, ex:
logger.debug(str(ex))
raise self._IP_RESOLVE_EXCEPTION
if not ip:
raise self._IP_RESOLVE_EXCEPTION
return [ip]
class __IPProtocols:
def __init__(self, ipProtocols):
self.__ipProtocols = ipProtocols
def getProtocolCode(self, protocol):
'''@types: str -> int'''
return self.__ipProtocols.getProtocolCode(protocol)
from com.hp.ucmdb.discovery.library.communication.downloader.cfgfiles import IPProtocols as JIPProtocols
IPPROTOCOLS = __IPProtocols(JIPProtocols)
class __DomainScopeManager:
def __init__(self, domainScopeManager):
self.__domainScopeManager = domainScopeManager
def isIpOutOfScope(self, ipAddress):
'''@types: str -> bool '''
return self.__domainScopeManager.isIpOutOfScope(ipAddress)
def isClientIp(self, ipAddress):
'''@types: str -> ipType '''
ipType = self.__domainScopeManager.getRangeTypeByIp(ipAddress)
return ipType and ipType.equals(RangeType.CLIENT)
from com.hp.ucmdb.discovery.library.scope import DomainScopeManager as JDomainScopeManager
DOMAIN_SCOPE_MANAGER = __DomainScopeManager(JDomainScopeManager)
| [
"bluesteelkc@gmail.com"
] | bluesteelkc@gmail.com |
5d868440f1095331ef8563a58ada332644b08b8f | 684a9016bf00e132eab3c9cf4534639ae096cfc5 | /Main/dlnaupnpserver/sqlalchemy/orm/state.py | 76fddc6212122041936936f6953254fc5271f81d | [
"BSD-3-Clause",
"MIT"
] | permissive | pszafer/dlna_upnp_invention | d97a0c641d1d7b170378f0fad5b978d8e5576966 | 497d173a9e3883412dbbb17cafa826a0394ff849 | refs/heads/master | 2021-01-02T09:35:04.628203 | 2012-07-10T15:02:53 | 2012-07-10T15:02:53 | 1,980,638 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,589 | py | # orm/state.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
from sqlalchemy.util import EMPTY_SET
import weakref
from sqlalchemy import util
from sqlalchemy.orm import exc as orm_exc, attributes, interfaces,\
util as orm_util
from sqlalchemy.orm.attributes import PASSIVE_OFF, PASSIVE_NO_RESULT, \
PASSIVE_NO_FETCH, NEVER_SET, ATTR_WAS_SET, NO_VALUE
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
import sys
class InstanceState(object):
"""tracks state information at the instance level."""
session_id = None
key = None
runid = None
load_options = EMPTY_SET
load_path = ()
insert_order = None
mutable_dict = None
_strong_obj = None
modified = False
expired = False
deleted = False
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.callables = {}
self.committed_state = {}
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def pending(self):
return {}
@property
def has_identity(self):
return bool(self.key)
def detach(self):
self.session_id = None
def dispose(self):
self.detach()
del self.obj
def _cleanup(self, ref):
instance_dict = self._instance_dict()
if instance_dict:
instance_dict.discard(self)
self.callables = {}
self.session_id = None
del self.obj
def obj(self):
return None
@property
def dict(self):
o = self.obj()
if o is not None:
return attributes.instance_dict(o)
else:
return {}
def initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:]
manager = self.manager
manager.dispatch.init(self, args, kwargs)
#if manager.mutable_attributes:
# assert self.__class__ is MutableAttrInstanceState
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
manager.dispatch.init_failure(self, args, kwargs)
raise
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def get_pending(self, key):
if key not in self.pending:
self.pending[key] = PendingCollection()
return self.pending[key]
def value_as_iterable(self, dict_, key, passive=PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = self.manager[key].impl
x = impl.get(self, dict_, passive=passive)
if x is PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, 'get_collection'):
return [
(attributes.instance_state(o), o) for o in
impl.get_collection(self, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def __getstate__(self):
d = {'instance':self.obj()}
d.update(
(k, self.__dict__[k]) for k in (
'committed_state', 'pending', 'parents', 'modified', 'expired',
'callables', 'key', 'load_options', 'mutable_dict'
) if k in self.__dict__
)
if self.load_path:
d['load_path'] = interfaces.serialize_path(self.load_path)
self.manager.dispatch.pickle(self, d)
return d
def __setstate__(self, state):
from sqlalchemy.orm import instrumentation
self.obj = weakref.ref(state['instance'], self._cleanup)
self.class_ = state['instance'].__class__
self.manager = manager = instrumentation.manager_of_class(self.class_)
if manager is None:
raise orm_exc.UnmappedInstanceError(
state['instance'],
"Cannot deserialize object of type %r - no mapper() has"
" been configured for this class within the current Python process!" %
self.class_)
elif manager.is_mapped and not manager.mapper.configured:
mapperlib.configure_mappers()
self.committed_state = state.get('committed_state', {})
self.pending = state.get('pending', {})
self.parents = state.get('parents', {})
self.modified = state.get('modified', False)
self.expired = state.get('expired', False)
self.callables = state.get('callables', {})
if self.modified:
self._strong_obj = state['instance']
self.__dict__.update([
(k, state[k]) for k in (
'key', 'load_options', 'mutable_dict'
) if k in state
])
if 'load_path' in state:
self.load_path = interfaces.deserialize_path(state['load_path'])
manager.dispatch.unpickle(self, state)
def initialize(self, key):
"""Set this attribute to an empty value or collection,
based on the AttributeImpl in use."""
self.manager.get_impl(key).initialize(self, self.dict)
def reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
dict_.pop(key, None)
self.callables.pop(key, None)
def expire_attribute_pre_commit(self, dict_, key):
"""a fast expire that can be called by column loaders during a load.
The additional bookkeeping is finished up in commit_all().
This method is actually called a lot with joined-table
loading, when the second table isn't present in the result.
"""
dict_.pop(key, None)
self.callables[key] = self
def set_callable(self, dict_, key, callable_):
"""Remove the given attribute and set the given callable
as a loader."""
dict_.pop(key, None)
self.callables[key] = callable_
def expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.modified = False
pending = self.__dict__.get('pending', None)
mutable_dict = self.mutable_dict
self.committed_state.clear()
if mutable_dict:
mutable_dict.clear()
if pending:
pending.clear()
for key in self.manager:
impl = self.manager[key].impl
if impl.accepts_scalar_loader and \
(impl.expire_missing or key in dict_):
self.callables[key] = self
dict_.pop(key, None)
self.manager.dispatch.expire(self, None)
def expire_attributes(self, dict_, attribute_names):
pending = self.__dict__.get('pending', None)
mutable_dict = self.mutable_dict
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
self.callables[key] = self
dict_.pop(key, None)
self.committed_state.pop(key, None)
if mutable_dict:
mutable_dict.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def __call__(self, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if passive is PASSIVE_NO_FETCH:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
for k in toload.intersection(self.callables):
del self.callables[k]
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def expired_attributes(self):
"""Return the set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs.
"""
return set([k for k, v in self.callables.items() if v is self])
def _instance_dict(self):
return None
def _is_really_none(self):
return self.obj()
def modified_event(self, dict_, attr, previous, collection=False):
if attr.key not in self.committed_state:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# the "or not self.modified" is defensive at
# this point. The assertion below is expected
# to be True:
# assert self._strong_obj is None or self.modified
if self._strong_obj is None or not self.modified:
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
self._strong_obj = self.obj()
if self._strong_obj is None:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
orm_util.state_class_str(self)
))
self.modified = True
def commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
class_manager = self.manager
if class_manager.mutable_attributes:
for key in keys:
if key in dict_ and key in class_manager.mutable_attributes:
self.committed_state[key] = self.manager[key].impl.copy(dict_[key])
else:
self.committed_state.pop(key, None)
else:
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers/callables for attributes loaded are removed.
Attributes marked as "expired" can potentially remain "expired" after this step
if a value was not populated in state.dict.
"""
self.committed_state.clear()
self.__dict__.pop('pending', None)
callables = self.callables
for key in list(callables):
if key in dict_ and callables[key] is self:
del callables[key]
for key in self.manager.mutable_attributes:
if key in dict_:
self.committed_state[key] = self.manager[key].impl.copy(dict_[key])
if instance_dict and self.modified:
instance_dict._modified.discard(self)
self.modified = self.expired = False
self._strong_obj = None
class MutableAttrInstanceState(InstanceState):
"""InstanceState implementation for objects that reference 'mutable'
attributes.
Has a more involved "cleanup" handler that checks mutable attributes
for changes upon dereference, resurrecting if needed.
"""
@util.memoized_property
def mutable_dict(self):
return {}
def _get_modified(self, dict_=None):
if self.__dict__.get('modified', False):
return True
else:
if dict_ is None:
dict_ = self.dict
for key in self.manager.mutable_attributes:
if self.manager[key].impl.check_mutable_modified(self, dict_):
return True
else:
return False
def _set_modified(self, value):
self.__dict__['modified'] = value
modified = property(_get_modified, _set_modified)
@property
def unmodified(self):
"""a set of keys which have no uncommitted changes"""
dict_ = self.dict
return set([
key for key in self.manager
if (key not in self.committed_state or
(key in self.manager.mutable_attributes and
not self.manager[key].impl.check_mutable_modified(self, dict_)))])
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
dict_ = self.dict
return set([
key for key in keys
if (key not in self.committed_state or
(key in self.manager.mutable_attributes and
not self.manager[key].impl.check_mutable_modified(self, dict_)))])
def _is_really_none(self):
"""do a check modified/resurrect.
This would be called in the extremely rare
race condition that the weakref returned None but
the cleanup handler had not yet established the
__resurrect callable as its replacement.
"""
if self.modified:
self.obj = self.__resurrect
return self.obj()
else:
return None
def reset(self, dict_, key):
self.mutable_dict.pop(key, None)
InstanceState.reset(self, dict_, key)
def _cleanup(self, ref):
"""weakref callback.
This method may be called by an asynchronous
gc.
If the state shows pending changes, the weakref
is replaced by the __resurrect callable which will
re-establish an object reference on next access,
else removes this InstanceState from the owning
identity map, if any.
"""
if self._get_modified(self.mutable_dict):
self.obj = self.__resurrect
else:
instance_dict = self._instance_dict()
if instance_dict:
instance_dict.discard(self)
self.dispose()
def __resurrect(self):
"""A substitute for the obj() weakref function which resurrects."""
# store strong ref'ed version of the object; will revert
# to weakref when changes are persisted
obj = self.manager.new_instance(state=self)
self.obj = weakref.ref(obj, self._cleanup)
self._strong_obj = obj
obj.__dict__.update(self.mutable_dict)
# re-establishes identity attributes from the key
self.manager.dispatch.resurrect(self)
return obj
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| [
"pszafer@gmail.com"
] | pszafer@gmail.com |
7f55ae48c78a49d9999ea1b01e0ad976109446fb | 61be48226158a3f4f24a7e465f511397e98ce7a9 | /05 Spider/爬虫案列/taobao.py | fa6eae77d89ba8b965f382a29d0951aef8b56dde | [] | no_license | yangzongwu/mynote | 9f9e1eddf9c589d3063c8ba1f07e81e269997ddc | ee38af9b63652b917dd78a7d253d6efd41276f10 | refs/heads/master | 2023-02-18T01:52:22.338777 | 2021-01-20T06:35:12 | 2021-01-20T06:35:12 | 255,619,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,547 | py | import requests
from lxml import etree
import json
import time
def tbSearch(input='孤独星球',n=10):
cookie='thw=cn; UM_distinctid=17226824d596e0-0575a27ff0dfc5-d373666-100200-17226824d5a672; cna=B6scF9WLgnACAXANP9/aEdRn; _m_h5_tk=ce77126788aaaaf8c8f5f1d0e5357a30_1596783335113; _m_h5_tk_enc=428da61375cb4c464acdbc54c144dca3; sgcookie=EaRK2eZCRrSldICbEfYxl; uc3=lg2=V32FPkk%2Fw0dUvg%3D%3D&vt3=F8dBxG2jilDlMHlh1Nw%3D&nk2=GgoRmsJ6omland7Uu0E%3D&id2=UoWzW98vT5ar; lgc=yangzongwu1987; uc4=id4=0%40UO2kHgAg6TsN%2BKWISlPbWO6j3ug%3D&nk4=0%40GIauBIP1iExONbAq4CWbxfFxW35jq3iNHA%3D%3D; tracknick=yangzongwu1987; _cc_=V32FPkk%2Fhw%3D%3D; enc=1JhRCSWddtrcbgjSin4A4aEnQ1XzfBWYr5F6oHdh6JR7OYOL0jgtw9CZvimv9YfphJrN3W%2FMzAfhghlJ6iGK2g%3D%3D; mt=ci=9_1; hng=CN%7Czh-CN%7CCNY%7C156; v=0; cookie2=140665a3d151b870531e63bc455ce885; uc1=cookie14=UoTV6hxPRuJMwA%3D%3D; t=266326f334fa6d8c6fe398635c1ba24f; _tb_token_=eb3b5e37e4ebe; tfstk=clNCBFADWHxC2-ByTy_ZY5c8r2l5ZP8jP9i3RRl0uCGRhvaCiDRqcP-7E3dtHV1..; l=eBOSyk2qOrNIYM7kBOfZnurza77TpIRVguPzaNbMiOCPOV1p5VCPWZopNkY9CnGVnsgvR35e4jETB08UhPatCc1l1fc2cMpTdd8h.; isg=BLu7RzuKYO5Qt13gIkDsL41PSp8lEM8SOkx8ga14y7rRDNjuNeGGYO4GJqxCLCcK'
url = 'https://s.taobao.com/search?q='+input+'&ie=utf8&sort=sale-desc'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36','cookie':cookie}
page_text = requests.get(url,headers=headers).text
tree = etree.HTML(page_text)
good_json=tree.xpath('/html/head/script[8]//text()')[0].strip()
good_json=good_json.split('\n')[0][16:-1]
good_dic = json.loads(good_json)
good_list=good_dic['mods']['itemlist']['data']['auctions']
taobao_result=[]
for good in good_list[:n]:
try:
time.sleep(1)
good_name = good['raw_title']
good_pic = good['pic_url'] + '_.webp'
good_url = 'https:' + good['detail_url']
good_price = good['view_price']
comment_count = good['comment_count']
good_comment_rate = (good['shopcard']['description'][0] + good['shopcard']['service'][0] +
good['shopcard']['delivery'][0]) / 3
detail_page_text = requests.get(good_url, headers=headers).text
detail_page_tree = etree.HTML(detail_page_text)
try:
good_ISBN = detail_page_tree.xpath('//*[@id="J_AttrUL"]/li[2]/text()')[0]
except:
good_ISBN = None
if good_ISBN and good_ISBN[0:4] != 'ISBN':
try:
good_ISBN = detail_page_tree.xpath('//*[@id="J_AttrUL"]/li[3]/text()')[0]
except:
good_ISBN = None
if good_ISBN[0:4] == 'ISBN':
good_ISBN = good_ISBN[8:]
else:
good_ISBN = None
good_comment_rate=str(round(good_comment_rate/5,1))+'%'
res = {
'good_ISBN': good_ISBN,
'good_name': good_name,
'good_pic': good_pic,
'good_price': good_price,
'good_price_unit': good_price,
'good_from': 'Taobao',
'good_url': good_url,
'good_comment_rate': good_comment_rate,
'good_num_comment': comment_count,
}
taobao_result.append(res)
except:
continue
print(taobao_result)
return taobao_result
def tbSearch_example():
example=""
return example | [
"yangzong5@gmail.com"
] | yangzong5@gmail.com |
28c55f04edb78696643ed582fe5f5af8380b09c7 | c68b99bf1671d1fb5a1a5a0d6df7bb164dd1d20d | /Medium/394-DecodeString.py | 4af7e17cc5a884ed7770314baf35d36aaface6e3 | [] | no_license | r50206v/Leetcode-Practice | 8db9333e2e3d2a335f439d7e9e57e8c36b69ae6d | f9302e93c441f06cc14949605da20978c4289202 | refs/heads/master | 2022-05-17T18:09:48.857263 | 2022-04-27T01:02:12 | 2022-04-27T01:02:12 | 192,258,017 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | '''
Iteration + 2 Stack
Time: O(N)
Space: O(m + n)
m: the number of letters in s
n: the number of numbers in s
'''
class Solution:
def decodeString(self, s: str) -> str:
result = ""
strStack = []
countStack = []
base = ""
for index in range(len(s)):
if s[index].isdigit():
base += s[index]
elif s[index] == '[':
strStack.append(result)
result = ""
countStack.append(int(base))
base = ""
elif s[index] == ']':
print(strStack, countStack, result)
# very important !!
result = strStack.pop() + (result * countStack.pop())
else:
result += s[index]
return result
| [
"r50206v@gmail.com"
] | r50206v@gmail.com |
863f4da890f4e1a30367db93383f73c09e951f1c | 9ac99a99dc8f79f52fbbe3e8a5b311b518fe45d9 | /apps/lms/api/services/LMS_SurveyTemplateService.py | 6c4bd27a38f6d95a6418cab80b7915b8834b249a | [] | no_license | nttlong/quicky-01 | eb61620e01f04909d564244c46a03ca2b69dfecc | 0f5610aa7027429bdd9ca9b45899a472c372c6cc | refs/heads/master | 2020-03-25T17:45:31.633347 | 2018-11-27T15:02:30 | 2018-11-27T15:02:30 | 143,994,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | from bson import ObjectId
from .. import models
import logging
import threading
from .. import common
from ..views.views import SYS_VW_ValueList
import qmongo
def get_list(args):
searchText = args['data'].get('search', '')
pageSize = args['data'].get('pageSize', 0)
pageIndex = args['data'].get('pageIndex', 20)
sort = args['data'].get('sort', 20)
where = args['data'].get('where')
pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)
pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)
ret = qmongo.models.LMS_SurveyTemplate.aggregate
if where.has_key("survey_type") and where["survey_type"] != None:
ret.match("survey_type == {0}", where["survey_type"])
ret.left_join(qmongo.models.auth_user_info, "created_by", "username", "uc")
ret.left_join(qmongo.models.auth_user_info, "modified_by", "username", "um")
ret.left_join(qmongo.views.SYS_VW_ValueList, "survey_type", "value", "val")
ret.match("val.list_name == {0}", "LMS_LSurveyType")
if (sort != None):
ret.sort(sort)
ret.project(
survey_id = 1,
survey_type = 1,
temp_survey_1 = 1,
temp_survey_2 = 1,
order = 1,
created_by="uc.login_account",
created_on="created_on",
modified_on="switch(case(modified_on!='',modified_on),'')",
modified_by="switch(case(modified_by!='',um.login_account),'')",
active=1,
question_list=1,
survey_type_name="val.caption"
)
data = ret.get_page(pageIndex, pageSize)
return data
def insert_survey_template(data):
result = qmongo.models.LMS_SurveyTemplate.insert(data)
return result
def update_survey_template(data):
code = data['survey_id']
del data['survey_id']
result = qmongo.models.LMS_SurveyTemplate.update(
data,
"survey_id == {0}",
code
)
return result
def delete_survey_template(data):
result = qmongo.models.LMS_SurveyTemplate.delete(
"survey_id in @id",
id = [x['survey_id'] for x in data]
)
return result
def get_list_value_and_caption(data):
result =qmongo.models.LMS_SurveyTemplate.aggregate()\
.project(
_id = 0,
survey_id = 1,
temp_survey_1 = 1
).get_list()
return result | [
"zugeliang2000@gmail.com"
] | zugeliang2000@gmail.com |
63aa115716ff7fe5f2eaf64fe7b6afc4667e0365 | d4280eca1a9badb0a4ad2aa22598616eedece373 | /Tools/repleace.py | 04ff63a750e4afd2bcff0e396159483b20cd2d02 | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #!/usr/bin/env python
import os
folders = filter(os.path.isdir, os.listdir())
for folder in folders:
os.chdir(folder)
try:
process = os.popen('git remote -v')
output = process.read()
output = output.split()
output = output[1]
output = output.replace('172.31.103.221', 'www.qxxlgogs.com')
os.system('git remote remove origin')
os.system('git remote add origin %s'%output)
except:
pass
os.chdir('..')
| [
"littlecaptain@foxmail.com"
] | littlecaptain@foxmail.com |
34720c2c7fb2d14e18209377a6429e1ffb825f7b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_getting.py | 12500bcd909462b932ec388267fef20736df99a6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py |
from xai.brain.wordbase.verbs._get import _GET
#calss header
class _GETTING(_GET, ):
def __init__(self,):
_GET.__init__(self)
self.name = "GETTING"
self.specie = 'verbs'
self.basic = "get"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0d5933e334cdf1f8a9aa2c79d39b3949cd95af95 | db303c68682dfd18965a04026ff14e15c1ba6120 | /ch09/ans89.py | 48bb019536f5610904e0a23682919dd0e224fdf9 | [] | no_license | Toshiyana/nlp100v2020 | 1a89f164de0c720da6d42c19b3fa60f8013d662c | 37d4d208d5d527d163356793b630f36eb7595779 | refs/heads/master | 2023-07-15T15:01:28.454515 | 2021-08-21T13:20:03 | 2021-08-21T13:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | from tqdm import tqdm
import torch
from torch import optim
from torchtext import data
from transformers import BertForSequenceClassification
def eval_net(model, data_loader, device='cpu'):
model.eval()
ys = []
ypreds = []
for x, y, _ in data_loader:
with torch.no_grad():
loss, logit = model(input_ids=x, labels=y)
_, y_pred = torch.max(logit, 1)
ys.append(y)
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
print(f'test acc: {(ys == ypreds).sum().item() / len(ys)}')
return
TEXT = data.Field(sequential=True, lower=True, batch_first=True)
LABELS = data.Field(sequential=False, batch_first=True, use_vocab=False)
train, val, test = data.TabularDataset.splits(
path='ch06', train='train2.txt',
validation='valid2.txt', test='test2.txt', format='tsv',
fields=[('TEXT', TEXT), ('LABEL', LABELS)])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
train_iter, val_iter, test_iter = data.Iterator.splits(
(train, val, test), batch_sizes=(64, 64, 64), device=device, repeat=False, sort=False)
TEXT.build_vocab(train, min_freq=2)
LABELS.build_vocab(train)
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4)
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in tqdm(range(10)):
losses = []
model.train()
for batch in train_iter:
x, y = batch.TEXT, batch.LABEL
loss, logit = model(input_ids=x, labels=y)
model.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
_, y_pred_train = torch.max(logit, 1)
eval_net(model, test_iter, device)
| [
"upura0@gmail.com"
] | upura0@gmail.com |
ef72c65accec37d5eb0fb33a531bfa3395a4a130 | 74e8b6ad9fa20b6caa61f7ced4825442f671f506 | /curator/__init__.py | a9b39d35f54f3dab18f6bc3ed85d95b68be2e251 | [
"Apache-2.0"
] | permissive | agent001/curator | 2297191ea05903cdca5f2510b6423cf333961ef5 | 86fd4af224bbb1eb7993b754e47cd67e32f10d7a | refs/heads/master | 2021-01-18T23:06:19.334462 | 2016-07-14T16:23:34 | 2016-07-14T16:23:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from .exceptions import *
from .settings import *
from .logtools import *
from .utils import *
from .indexlist import IndexList
from .snapshotlist import SnapshotList
from .actions import *
from .cli import *
from .repomgrcli import *
| [
"aaron@mildensteins.com"
] | aaron@mildensteins.com |
12e4bdeb469eebeac17b5a318bd159f07f6041d2 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_6404600001200128_1/Python/DaniTunes/p1.py | cffa3f03a45fe53db87b1c794440401ea7e6dd94 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!/bin/env python
# google code jam 2015 problem 1
def solve(ms):
a = 0
n = len(ms)
md = 0
for i in range(1, n):
d = ms[i-1] - ms[i]
md = max(d, md)
if d > 0:
a += d
b = 0
for i in range(0, n-1):
b += min(ms[i], md)
return a, b
tests = int(raw_input())
for k in range(tests):
n = int(raw_input())
ms = [int(x) for x in raw_input().split()]
#print n, len(ms), ms
a, b = solve(ms)
print "Case #%d: %d %d" % (k+1, a, b)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
043b8011270462a6381d0d82af17f50853f783bf | ad38b9a924911b3249b9ffec01d78a2b1048fa0d | /动态调试/Immunity Debugger v1.73/Lib/test/test_codecs.py | 8fa88f2f004f4d8c0464c32276983eb47d5b05a9 | [] | no_license | h3len/HackerToolBox | 77c5a45553784d20104db21ac5fe8f840ca519a6 | 4397b0c25cfd0eb3f92484f396745cc664af2531 | refs/heads/master | 2020-04-04T22:57:47.376773 | 2018-10-10T15:43:06 | 2018-10-10T15:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,219 | py | from test import test_support
import unittest
import codecs
import sys, StringIO, _testcapi
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read every available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the rest method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
vw.append((i*200)*u"\3042" + lineend)
vwo.append((i*200)*u"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in u"\n \r\n \r \u2028".split():
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assert_(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEquals(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
# No test_partial() yet, because UTF-7 doesn't support it.
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
]
)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEquals(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEquals(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEquals(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEquals(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
self.assertEquals("unicode_internal", ex.encoding)
self.assertEquals("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEquals(4, ex.start)
self.assertEquals(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEquals((u"ab", 12), ignored)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEquals(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEquals(unicode("python.org", "idna"), u"python.org")
self.assertEquals(unicode("python.org.", "idna"), u"python.org.")
self.assertEquals(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEquals(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEquals(u"python.org".encode("idna"), "python.org")
self.assertEquals("python.org.".encode("idna"), "python.org.")
self.assertEquals(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEquals(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEquals(r.read(), u"")
def test_incremental_decode(self):
self.assertEquals(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEquals(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEquals(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEquals(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEquals(decoder.decode("xn--xam", ), u"")
self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEquals(decoder.decode(u"rg"), u"")
self.assertEquals(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEquals(decoder.decode("xn--xam", ), u"")
self.assertEquals(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEquals(decoder.decode("rg."), u"org.")
self.assertEquals(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEquals(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEquals(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEquals(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEquals(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEquals(encoder.encode(u"\xe4x"), "")
self.assertEquals(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEquals(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEquals(encoder.encode(u"\xe4x"), "")
self.assertEquals(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEquals(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEquals(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEquals(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEquals(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEquals(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEquals(f.readlines(), [u'\ud55c\n', u'\uae00'])
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assert_(isinstance(sout, str))
def test_readline(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assert_(isinstance(sout, str))
all_unicode_encodings = [
"ascii",
"base64_codec",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hex_codec",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
if encoding != "unicode_internal":
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEquals(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertEquals(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEquals(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEquals(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEquals(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEquals(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
def test_main():
test_support.run_unittest(
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest
)
if __name__ == "__main__":
test_main()
| [
"redleavessun@gmail.com"
] | redleavessun@gmail.com |
23953d581f20bd506f4a54c433e69cc12562b56e | 5864e86954a221d52d4fa83a607c71bacf201c5a | /uthread/__init__.py | 0baa971480f6994ce969c32ebb529f9c2f5428d3 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,798 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\uthread\__init__.py
__version__ = '0.1'
__license__ = 'Python Microthread Library version 0.1\nCopyright (C)2000 Will Ware, Christian Tismer\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the names of the authors not be\nused in advertising or publicity pertaining to distribution of the\nsoftware without specific, written prior permission.\n\nWILL WARE AND CHRISTIAN TISMER DISCLAIM ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS. IN NO EVENT SHALL WILL WARE OR CHRISTIAN TISMER BE LIABLE FOR\nANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.'
import stackless
import sys
import blue
import bluepy
import weakref
import collections
import contextlib
import logmodule as log
import blue.heapq as heapq
import locks
import threading
from stacklesslib.util import atomic
SEC = 10000000L
MIN = 60 * SEC
tasks = []
schedule = stackless.schedule
stackless.globaltrace = True
ctxtfilter = bluepy.ctxtfilter
def new(func, *args, **kw):
return bluepy.CreateTaskletExt(func, *args, **kw)
class TaskletExtTimeoutable(bluepy.TaskletExt):
__slots__ = bluepy.TaskletExt.__slots__ + ['doneChannel', 'isTimedOut']
def newJoinable(func, *args, **kw):
def wrapper(selfTasklet, *args, **kw):
exce = None
result = None
try:
result = func(*args, **kw)
except Exception as e:
exce = sys.exc_info()
try:
if not selfTasklet.isTimedOut:
stackless.channel.send(selfTasklet.doneChannel, (result, exce))
else:
log.general.Log("newJoinable timed out on function '" + str(func) + "' with args '" + strx(args)[:2048] + "'", log.LGWARN)
finally:
exce = None
return result
ctx = ctxtfilter.sub('at (snip)', repr(func))
ctx = blue.pyos.taskletTimer.GetCurrent().split('^')[-1] + '^' + ctx
t = TaskletExtTimeoutable(ctx, wrapper)
t.isTimedOut = False
t.doneChannel = stackless.channel()
t(t, *args, **kw)
return t
def waitForJoinable(t, timeout):
resultOk, data = ChannelWait(t.doneChannel, timeout=timeout)
if resultOk:
result, exce = data
if exce is not None:
try:
raise exce[0], exce[1], exce[2]
finally:
exce = None
return result
t.isTimedOut = True
raise TaskletWaitTimeout()
class TaskletWaitTimeout(Exception):
pass
idIndex = 0
def uniqueId():
global idIndex
z = idIndex
idIndex += 1
return z
def irandom(n):
import random
n = random.randrange(0, n)
return n
semaphores = weakref.WeakKeyDictionary()
def GetSemaphores():
return semaphores
class Semaphore:
__guid__ = 'uthread.Semaphore'
def __init__(self, semaphoreName = None, maxcount = 1, strict = True):
self.semaphoreName = semaphoreName
self.maxcount = maxcount
self.count = maxcount
self.waiting = stackless.channel()
self.n_waiting = 0
self.waiting.preference = 0
self.threads = []
self.lockedWhen = None
self.strict = strict
locks.Register(self)
def IsCool(self):
return self.count == self.maxcount and not self.n_waiting
def __repr__(self):
return '<Semaphore %r, t:%f at %#x>' % (self.semaphoreName, self.LockedFor(), id(self))
def __del__(self):
if not self.IsCool():
log.general.Log("Semaphore '" + str(self.semaphoreName) + "' is being destroyed in a locked or waiting state", 4, 0)
def acquire(self):
with atomic():
if self.strict:
if self.count <= 0 and stackless.getcurrent() in self.threads:
raise RuntimeError, 'tasklet deadlock, acquiring tasklet holds strict semaphore'
while self.count == 0:
self.n_waiting += 1
try:
self.waiting.receive()
except:
self._safe_pump()
raise
finally:
self.n_waiting -= 1
self.count -= 1
self.lockedWhen = blue.os.GetWallclockTime()
self.threads.append(stackless.getcurrent())
claim = acquire
def try_acquire(self):
with atomic():
if self.strict:
if self.count <= 0 and stackless.getcurrent() in self.threads:
raise RuntimeError, 'tasklet deadlock, acquiring tasklet holds strict semaphore'
if self.count > 0:
self.count -= 1
self.lockedWhen = blue.os.GetWallclockTime()
self.threads.append(stackless.getcurrent())
return True
return False
def release(self, override = False):
with atomic():
if self.strict and not override:
if stackless.getcurrent() not in self.threads:
raise RuntimeError, 'wrong tasklet releasing strict semaphore'
self.count += 1
self.threads.remove(stackless.getcurrent())
self.lockedWhen = None
self._pump()
def _safe_pump(self):
try:
self._pump()
except Exception:
pass
def _pump(self):
for i in range(min(self.count, -self.waiting.balance)):
self.waiting.send(None)
def __enter__(self):
self.acquire()
def __exit__(self, exc, val, tb):
self.release()
def WaitingTasklets(self):
r = []
first = next = self.waiting.queue
while next:
r.append(next)
next = next.next
if next == first:
break
return r
def HoldingTasklets(self):
return self.threads[:]
def LockedAt(self):
return self.lockedWhen
def LockedFor(self):
if self.lockedWhen:
return (blue.os.GetWallclockTime() - self.lockedWhen) * 1e-07
else:
return -1.0
class CriticalSection(Semaphore):
__guid__ = 'uthread.CriticalSection'
def __init__(self, semaphoreName = None, strict = True):
Semaphore.__init__(self, semaphoreName)
self.__reentrantRefs = 0
def __repr__(self):
return '<CriticalSection %r, t:%f at %#x>' % (self.semaphoreName, self.LockedFor(), id(self))
def _owns(self):
if stackless.getcurrent() in self.threads:
return True
for t in self.threads:
if locks.Inherits(t):
return True
return False
def acquire(self):
if self.count <= 0 and self._owns():
self.__reentrantRefs += 1
else:
Semaphore.acquire(self)
def try_acquire(self):
if self.count <= 0 and self._owns():
self.__reentrantRefs += 1
return True
else:
return Semaphore.try_acquire(self)
def release(self):
if self.__reentrantRefs:
if not self._owns():
raise RuntimeError, 'wrong tasklet releasing reentrant CriticalSection'
self.__reentrantRefs -= 1
else:
Semaphore.release(self)
def FNext(f):
first = stackless.getcurrent()
try:
cursor = first.next
while cursor != first:
if cursor.frame.f_back == f:
return FNext(cursor.frame)
cursor = cursor.next
return f
finally:
first = None
cursor = None
class RWLock(object):
__guid__ = 'uthread.RWLock'
def __init__(self, lockName = ''):
self.name = lockName
self.rchan = stackless.channel()
self.wchan = stackless.channel()
self.rchan.preference = self.wchan.preference = 0
self.state = 0
self.tasklets = []
self.lockedWhen = None
locks.Register(self)
def __repr__(self):
return '<RWLock %r, state:%d, rdwait:%d, wrwait:%d, t:%f at %#x>' % (self.name,
self.state,
-self.rchan.balance,
-self.wchan.balance,
self.LockedFor(),
id(self))
def RDLock(self):
if not self.TryRDLock():
self.rchan.receive()
def TryRDLock(self):
if self.state >= 0:
if self.wchan.balance == 0 or stackless.getcurrent() in self.tasklets:
self.state += 1
self._AddTasklet()
return True
return False
else:
return self.TryWRLock()
def WRLock(self):
if not self.TryWRLock():
if stackless.getcurrent() in self.tasklets:
raise RuntimeError('Deadlock. Trying to WRLock while holding a RDLock')
self.wchan.receive()
def TryWRLock(self):
if self.state == 0 or self.state < 0 and stackless.getcurrent() == self.tasklets[0]:
self.state -= 1
self._AddTasklet()
return True
return False
def Unlock(self, tasklet = None):
if tasklet is None:
tasklet = stackless.getcurrent()
try:
self.tasklets.remove(tasklet)
except ValueError:
raise RuntimeError('Trying to release a rwlock without a matching lock!')
if self.state > 0:
self.state -= 1
else:
self.state += 1
if self.state == 0:
self.lockedWhen = None
self._Pump()
def _AddTasklet(self, tasklet = None):
if not tasklet:
tasklet = stackless.getcurrent()
self.tasklets.append(tasklet)
if not self.lockedWhen:
self.lockedWhen = blue.os.GetWallclockTime()
def _Pump(self):
while True:
chan = self.wchan
if chan.balance:
if self.state == 0:
self.state = -1
self._AddTasklet(chan.queue)
chan.send(None)
return
else:
chan = self.rchan
if chan.balance and self.state >= 0:
self.state += 1
self._AddTasklet(chan.queue)
chan.send(None)
continue
return
def __enter__(self):
self.RDLock()
def __exit__(self, e, v, tb):
self.Unlock()
class WRCtxt(object):
def __init__(self, lock):
self.lock = lock
def __enter__(self):
self.lock.WRLock()
def __exit__(self, e, v, tb):
self.lock.Unlock()
def WRLocked(self):
return self.WRCtxt(self)
def RDLocked(self):
return self
def IsCool(self):
return self.state == 0
@property
def thread(self):
if self.tasklets:
return self.tasklets[0]
def WaitingTasklets(self):
r = []
for chan in (self.rchan, self.wchan):
first = t = chan.queue
while t:
r.append(t)
t = t.next
if t is first:
break
return r
def HoldingTasklets(self):
return list(self.tasklets)
def LockedAt(self):
return self.lockedWhen
def IsWRLocked(self):
return self.state < 0
def IsRDLocked(self):
return self.state > 0
def LockedFor(self):
if self.lockedWhen:
return (blue.os.GetWallclockTime() - self.lockedWhen) * 1e-07
else:
return -1.0
channels = weakref.WeakKeyDictionary()
def GetChannels():
global channels
return channels
class Channel(stackless.channel):
__guid__ = 'uthread.Channel'
def __new__(cls, channelName = None):
return stackless.channel.__new__(cls)
def __init__(self, channelName = None):
stackless.channel.__init__(self)
self.channelName = channelName
channels[self] = 1
class FIFO(object):
__slots__ = ('data',)
def __init__(self):
self.data = [[], []]
def push(self, v):
self.data[1].append(v)
def pop(self):
d = self.data
if not d[0]:
d.reverse()
d[0].reverse()
return d[0].pop()
def front(self):
d = self.data
if d[0]:
return d[0][-1]
return d[1][0]
def __contains__(self, o):
d = self.data
return o in d[0] or o in d[1]
def __len__(self):
d = self.data
return len(d[0]) + len(d[1])
def clear(self):
self.data = [[], []]
def remove(self, o):
d = self.data
if d[0]:
d[0].reverse()
d[1] = d[0] + d[1]
d[0] = []
d[1].remove(o)
class Queue(FIFO):
__guid__ = 'uthread.Queue'
__slots__ = 'chan'
def __init__(self, preference = 0):
FIFO.__init__(self)
self.chan = stackless.channel()
self.chan.preference = preference
def put(self, x):
if self.chan.balance < 0:
self.chan.send(x)
else:
self.push(x)
non_blocking_put = put
def get(self):
if len(self):
return self.pop()
else:
return self.chan.receive()
def LockCheck():
while 1:
each = None
blue.pyos.synchro.SleepWallclock(60660)
now = blue.os.GetWallclockTime()
try:
for each in locks.GetLocks():
if each.LockedAt() and each.WaitingTasklets():
problem = now - each.LockedAt()
if problem >= 1 * MIN:
break
else:
problem = 0
if not problem:
continue
with log.general.open(log.LGERR) as s:
print >> s, 'Locks have been held for a long time (%ss). Locking conflict log' % (problem / SEC)
foundCycles = locks.LockCycleReport(out=s, timeLimit=60)
if not foundCycles:
print >> s, 'logical analysis found no cycles.'
if not foundCycles:
print >> s, 'Full dump of locks with waiting tasklets:'
locks.OldLockReport(None, out=s)
print >> s, 'End of locking conflict log'
except StandardError:
log.LogException()
sys.exc_clear()
def PoolWorker(ctx, func, *args, **keywords):
return PoolWithoutTheStars(ctx, func, args, keywords, True)
def PoolWorkerWithoutTheStars(ctx, func, args, keywords):
return PoolWithoutTheStars(ctx, func, args, keywords, True)
def PoolWithoutTheStars(ctx, func, args, kw, notls = False):
if not ctx:
ctx = ctxtfilter.sub('at (snip)', repr(func))
if ctx[0] != '^':
prefix = blue.pyos.taskletTimer.GetCurrent().split('^')[-1]
ctx = prefix + '^' + ctx
tasklet = bluepy.TaskletExt(ctx, func)
if notls:
tasklet.localStorage.clear()
tasklet(*args, **kw)
return tasklet
def Pool(ctx, func, *args, **keywords):
return PoolWithoutTheStars(ctx, func, args, keywords)
def ParallelHelper(ch, idx, what):
queue, parentTasklet = ch
try:
with locks.Inheritance(parentTasklet):
if len(what) > 2:
result = what[0](*what[1], **what[2])
else:
result = what[0](*what[1])
ret = (1, (idx, result))
except:
ret = (0, sys.exc_info())
queue.put(ret)
def Parallel(funcs, exceptionHandler = None, maxcount = 30, contextSuffix = None, funcContextSuffixes = None):
if not funcs:
return
context = blue.pyos.taskletTimer.GetCurrent()
if contextSuffix:
context += '::' + contextSuffix
ch = (Queue(preference=-1), stackless.getcurrent())
n = len(funcs)
ret = [None] * n
if n > maxcount:
n = maxcount
for i in xrange(n):
funcContext = context
if funcContextSuffixes is not None:
funcContext = funcContext + '::' + funcContextSuffixes[i]
Pool(funcContext, ParallelHelper, ch, i, funcs[i])
error = None
try:
for i in xrange(len(funcs)):
ok, bunch = ch[0].get()
if ok:
idx, val = bunch
ret[idx] = val
else:
error = bunch
if n < len(funcs):
funcContext = context
if funcContextSuffixes is not None:
funcContext = funcContext + '::' + funcContextSuffixes[i]
Pool(funcContext, ParallelHelper, ch, n, funcs[n])
n += 1
if error:
if exceptionHandler:
exceptionHandler(error[1])
else:
raise error[0], error[1], error[2]
return ret
finally:
del error
class TaskletSequencer(object):
def __init__(self, expected = None):
self.queue = []
self.expected = expected
self.lastThrough = None
self.closed = False
def State(self):
return [self.expected, self.lastThrough, self.closed]
def IsClosed(self):
return self.closed
def close(self):
self.closed = True
while self.queue:
heapq.heappop(self.queue)[1].insert()
def Assert(self, expression):
if not expression:
raise AssertionError(expression)
def Pass(self, sequenceNo):
if self.closed:
return
if self.expected is None:
self.expected = sequenceNo
if sequenceNo < self.expected:
return
while sequenceNo > self.expected:
me = (sequenceNo, stackless.getcurrent())
heapq.heappush(self.queue, me)
self.OnSleep(sequenceNo)
stackless.schedule_remove()
self.OnWakeUp(sequenceNo)
if self.closed:
return
self.Assert(self.expected == sequenceNo)
self.expected += 1
expected = self.expected
while self.queue and self.queue[0][0] == expected:
self.OnWakingUp(sequenceNo, expected)
expected += 1
other = heapq.heappop(self.queue)
other[1].insert()
if self.lastThrough is not None:
self.Assert(self.lastThrough + 1 == sequenceNo)
self.lastThrough = sequenceNo
def OnSleep(self, sequenceNo):
pass
def OnWakeUp(self, sequenceNo):
pass
def OnWakingUp(self, sequenceNo, target):
pass
def CallOnThread(cmd, args = (), kwds = {}):
chan = stackless.channel()
def Helper():
try:
r = cmd(*args, **kwds)
chan.send(r)
except:
e, v = sys.exc_info()[:2]
chan.send_exception(e, v)
finally:
blue.pyos.NextScheduledEvent(0)
thread = threading.Thread(target=Helper)
thread.start()
return chan.receive()
namedlocks = collections.defaultdict(lambda : [None, 0])
def Lock(object, *args):
t = (id(object), args)
l = namedlocks[t]
if not l[0]:
l[0] = Semaphore(t, strict=False)
l[1] += 1
l[0].acquire()
def TryLock(object, *args):
t = (id(object), args)
l = namedlocks[t]
if not l[0]:
l[0] = Semaphore(t, strict=False)
l[1] += 1
if not l[0].try_acquire():
l[1] -= 1
return False
return True
def ReentrantLock(object, *args):
t = (id(object), args)
l = namedlocks[t]
if not l[0]:
l[0] = CriticalSection(t)
l[1] += 1
l[0].acquire()
def UnLock(object, *args):
t = (id(object), args)
l = namedlocks[t]
l[0].release()
l[1] -= 1
if not l[1]:
del namedlocks[t]
def CheckLock(object, *args):
t = (id(object), args)
l = namedlocks[t]
if not l[1]:
del namedlocks[t]
return False
return True
@contextlib.contextmanager
def Locked(object, *args):
Lock(object, *args)
try:
yield
finally:
UnLock(object, *args)
class BlockTrapSection(object):
__guid__ = 'uthread.BlockTrapSection'
def __init__(self):
self.oldBlocktrap = False
def __enter__(self):
self.oldBlockTrap = stackless.getcurrent().block_trap
stackless.getcurrent().block_trap = True
def __exit__(self, exc, val, tb):
stackless.getcurrent().block_trap = self.oldBlockTrap
def ChannelWait(chan, timeout = None):
if timeout is None:
return (True, chan.receive())
waiting_tasklet = stackless.getcurrent()
def break_wait():
try:
blue.pyos.synchro.SleepWallclock(int(timeout * 1000))
except _TimeoutError:
return
with atomic():
if waiting_tasklet and waiting_tasklet.blocked:
waiting_tasklet.raise_exception(_TimeoutError)
with atomic():
breaker = new(break_wait)
try:
result = chan.receive()
if breaker.blocked:
breaker.raise_exception(_TimeoutError)
return (True, result)
except _TimeoutError:
return (False, None)
finally:
waiting_tasklet = None
class _TimeoutError(Exception):
pass
parallel = Parallel
worker = PoolWorker
workerWithoutTheStars = PoolWorkerWithoutTheStars
pool = Pool
exports = {'uthread.parallel': parallel,
'uthread.worker': worker,
'uthread.workerWithoutTheStars': workerWithoutTheStars,
'uthread.new': new,
'uthread.pool': Pool,
'uthread.irandom': irandom,
'uthread.uniqueId': uniqueId,
'uthread.schedule': schedule,
'uthread.GetChannels': GetChannels,
'uthread.GetSemaphores': GetSemaphores,
'uthread.FNext': FNext,
'uthread.Lock': Lock,
'uthread.TryLock': TryLock,
'uthread.ReentrantLock': ReentrantLock,
'uthread.UnLock': UnLock,
'uthread.TaskletSequencer': TaskletSequencer,
'uthread.CallOnThread': CallOnThread,
'uthread.CheckLock': CheckLock,
'uthread.BlockTrapSection': BlockTrapSection,
'uthread.ChannelWait': ChannelWait,
'uthread.newJoinable': newJoinable,
'uthread.waitForJoinable': waitForJoinable,
'uthread.TaskletWaitTimeout': TaskletWaitTimeout}
new(LockCheck).context = '^uthread::LockCheck'
locks.Startup(new)
| [
"le02005@163.com"
] | le02005@163.com |
273b566fe8b621e0c00df877720cd345330a2152 | 2f1adce65fbec7509782be2bc50a5025c45e8dc0 | /backend/tiktok_lite_27297/urls.py | ceb8168dbf6d08c9fdf3dd1110a16ad81f532e2b | [] | no_license | crowdbotics-apps/tiktok-lite-27297 | 041a6ef444ed33d8ee342a797171e6ecbc3bde6d | 2c1796662d3f4cb0c9fa452520621ade2f3f957f | refs/heads/master | 2023-05-04T09:48:23.399812 | 2021-05-23T21:19:50 | 2021-05-23T21:19:50 | 370,159,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | """tiktok_lite_27297 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "TikTok Lite"
admin.site.site_title = "TikTok Lite Admin Portal"
admin.site.index_title = "TikTok Lite Admin"
# swagger
api_info = openapi.Info(
title="TikTok Lite API",
default_version="v1",
description="API documentation for TikTok Lite App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
315294feedef65535c8cb675b0557e0718d5e388 | 1eee2c9c105148904d0fb47cee227cfd20241b76 | /cutils/setup.py | 4b8f01285208f2e111388993010a95b9ade689a3 | [] | no_license | fred-hz/zeta | be9f6f466b75767cc1a45a4004d1c84e5d559b6b | e7b631447fff6e58928d6ac15702338b7cc8e3e7 | refs/heads/master | 2021-09-05T01:03:31.387379 | 2018-01-23T04:15:58 | 2018-01-23T04:15:58 | 118,187,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from distutils.core import setup, Extension
import numpy
from Cython.Distutils import build_ext
setup(
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("rank",
sources=["_rank.pyx", "rank.cpp"],
language="c++",
include_dirs=[numpy.get_include()])],
)
| [
"zibo_zhao@berkeley.edu"
] | zibo_zhao@berkeley.edu |
4f810e9abaca21391af002ed8d1c9698941921e4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_243/ch116_2020_03_31_22_45_24_453815.py | 400553a42665308fa6ffd15da0619b304d274ada | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | def raiz_quadrada(x):
i=1
num=0
while x>=0:
num==x-i
i+=2
if num==0:
raiz=num/i
print (raiz)
| [
"you@example.com"
] | you@example.com |
c07b661a1ea80740df76e137477763e6ae83bce8 | 27c12a37b58548c1b3ace6184d452cdcf4128168 | /src/neurounits/visitors/__init__.py | 7af18da334fdbb01db0248858e6ceb7c81b0cbb6 | [] | no_license | stevemarsh/NeuroUnits | 48ca63b42ee74bf3a3d3107aa86e50d1c9aa4bc2 | 350663588f7d6f4cf85c1472debc1d2312b877e8 | refs/heads/master | 2020-12-25T16:13:56.983904 | 2013-03-29T12:19:32 | 2013-03-29T12:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
from .bases.base_visitor import ASTVisitorBase
from .bases.base_actioner_default import ASTActionerDefault
from .bases.base_actioner import SingleVisitPredicate
| [
"mikehulluk@googlemail.com"
] | mikehulluk@googlemail.com |
a43b24f70964cf35080f97edde3f21e4610f166d | 21a341a52b0305c70def05a141243cc46160cd0d | /esp/main.py | 7bd5f8cf81395f548c29257f1a9a6c5b5ce8ff85 | [
"MIT"
] | permissive | BPI-STEAM/mpy-flasher | 980c9084fec9108a316f70a965c7c8ee032da385 | 03a1be35ae2e0aaafb71ea6fb7adcfce823e7717 | refs/heads/master | 2020-05-25T03:38:08.464466 | 2019-05-28T05:14:02 | 2019-05-28T05:14:02 | 187,608,277 | 5 | 4 | MIT | 2019-11-02T21:20:18 | 2019-05-20T09:19:44 | Python | UTF-8 | Python | false | false | 4,127 | py | #!/usr/bin/env python
# coding: utf-8
'''
@File :erase.py
@Author :youxinweizhi
@Date :2019/3/28
@Github :https://github.com/youxinweizhi
'''
import esp.control
import sys
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from esp.mainWindow import Ui_Form
from PyQt5.QtCore import QTimer
import threading
mutex = threading.Lock()
class MyWindow(QMainWindow, Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.main)
self.checkBox_3.stateChanged.connect(self.disable_op)
# self.comboBox.highlighted.connect(self.get_com)
# self.comboBox.activated.connect(self.get_com)
self.setFixedSize(self.width(), self.height()) # 固定窗口大小
# self.setWindowIcon(QIcon('./image/icon.ico'))
self.statusBar().showMessage("Put the firmware in the same directory can be burning")
self.list_serial = []
self.get_com()
self.get_bin()
self.get_borad()
self.get_config()
self.timer = QTimer(self)
self.timer.timeout.connect(self.get_com)
self.timer.start(3000)
def get_config(self):
import esp.config
config = esp.config.get_confg_dict()
if 'erase' in config:
self.checkBox.setCheckState(config['erase'] == 'True')
if 'advanced' in config:
self.checkBox_3.setCheckState(config['advanced'] == 'True')
if 'auto' in config and config['auto'] == 'True':
self.main()
def disable_op(self):
if self.checkBox_3.isChecked():
self.comboBox_2.setDisabled(True)
self.comboBox_3.setDisabled(True)
self.checkBox.setDisabled(True)
else:
self.comboBox_2.setDisabled(False)
self.comboBox_3.setDisabled(False)
self.checkBox.setDisabled(False)
def get_com(self):
tmp = esp.control.list_serial()
# print(tmp)
if len(tmp) != len(self.list_serial):
self.list_serial = tmp
self.comboBox.clear()
self.comboBox.addItems(tmp)
# print(self.comboBox.count())
def check_com(self):
result = len(self.com) > 1 # and open(com) test
if result is False:
self.statusBar().showMessage('The selected serial port is not exist')
return result
def get_bin(self):
self.comboBox_2.addItems(esp.control.list_bin())
def get_borad(self):
self.comboBox_3.addItems(esp.control.list_board())
def erase(self):
self.statusBar().showMessage('Start to erase firmware...')
self.statusBar().showMessage(esp.control.flash_erase(self.com))
time.sleep(1)
self.flash()
def flash(self):
self.statusBar().showMessage('Start to flash firmware...')
try:
self.statusBar().showMessage(esp.control.flash_bin(self.board, self.com, self.firmware))
finally:
self.pushButton.setDisabled(False)
self.statusBar().showMessage('Ready To GO')
def flash_adv(self):
self.statusBar().showMessage('Start to advanced flash...')
try:
import esp.advanced
self.statusBar().showMessage(esp.advanced.flash_bin(self.com))
finally:
self.pushButton.setDisabled(False)
self.statusBar().showMessage('Ready To GO')
def main(self):
self.com = self.comboBox.currentText().split(" - ", 1)[0]
self.firmware = self.comboBox_2.currentText()
if self.check_com():
self.board = self.comboBox_3.currentText()
print(self.com,self.firmware,self.board)
self.pushButton.setDisabled(True)
with mutex:
task = self.flash_adv if self.checkBox_3.isChecked() else self.erase if self.checkBox.isChecked() else self.flash
threading.Thread(target=task).start()
def run():
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run() | [
"junhuanchen@qq.com"
] | junhuanchen@qq.com |
9777359834936fdc8271ec94988ad65e6584a4ce | 031dbb2a3ea47a0483db310db9f98796cc83c500 | /658_Find K Closest Elements.py | f6a1201e4d36fb08d5c4bbdcd397c0ad6874f1d6 | [] | no_license | Shwan-Yu/Data_Structures_and_Algorithms | 429fb127983e32931f2168f44ef1484c1cc4c87f | 9126c2089e41d4d7fd3a204115eba2b5074076ad | refs/heads/master | 2020-03-27T11:46:59.947303 | 2019-08-23T15:15:21 | 2019-08-23T15:15:21 | 146,507,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
# O(lg(n-k)) keep track of the interval
l, r = 0, len(arr)-k
while l < r:
mid = (l+r)//2
if x-arr[mid] > arr[mid+k]-x:
l = mid+1
else:
r = mid
return arr[l:l+k]
# passed, write myself, O(lg(n)+k)
# def bs(arr, x):
# l, r = 0, len(arr)-1
# while l <= r:
# mid = (l+r)//2
# val = arr[mid]
# if val == x: return mid
# elif val < x: l = mid+1
# else: r = mid-1
# return l
# pos = bs(arr, x)
# l, r = pos-1, pos
# while k > 0:
# if l < 0:
# r += k
# break
# elif r > len(arr)-1:
# l -= k
# break
# if abs(arr[l]-x) <= abs(arr[r]-x): l-=1
# else: r += 1
# k -= 1
# return arr[l+1:r]
| [
"noreply@github.com"
] | Shwan-Yu.noreply@github.com |
a0cb2913c3bfaa4b5be6c7b4e94e17a951198076 | 9ef548f1e5457a18fe56f5c38d84cb835f98c7c3 | /main/seq2seq.py | 073d6fac5e16077011a0df330a49fa70b2ae6a9c | [] | no_license | sarikayamehmet/txt-summarization | 00c799535a5e1513fa35ac476eb094e8fbb16042 | 132340e2ba75497793686841b819abb35c47d76d | refs/heads/master | 2020-11-25T09:17:41.299564 | 2019-05-27T20:53:16 | 2019-05-27T20:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,767 | py | import torch.nn as nn
import torch.nn.functional as f
from main.encoder import Encoder
from main.reduce_encoder import ReduceEncoder
from main.decoder import Decoder
from main.encoder_attention import *
from main.decoder_attention import DecoderAttention
from main.common.vocab import *
from main.common.common import *
class Seq2Seq(nn.Module):
def __init__(self, vocab: Vocab, embedding=None):
super(Seq2Seq, self).__init__()
self.emb_size = conf('emb-size')
self.enc_hidden_size = conf('enc-hidden-size')
self.dec_hidden_size = conf('dec-hidden-size')
self.vocab_size = conf('vocab-size')
self.max_dec_steps = conf('max-dec-steps')
self.share_dec_weight = conf('share-dec-weight')
self.pointer_generator = conf('pointer-generator')
self.vocab = vocab
self.embedding = embedding
if self.embedding is None:
self.embedding = nn.Embedding(self.vocab.size(), self.emb_size, padding_idx=TK_PADDING['id'])
self.encoder = Encoder()
self.reduce_encoder = ReduceEncoder()
self.decoder = Decoder()
self.enc_att = EncoderAttention()
self.dec_att = DecoderAttention()
combined_hidden_size = self.dec_hidden_size + 2 * self.enc_hidden_size + self.dec_hidden_size
if self.pointer_generator is True:
self.ptr_gen = nn.Linear(combined_hidden_size, 1)
# sharing decoder weight
if self.share_dec_weight is True:
proj_layer = nn.Linear(combined_hidden_size, self.emb_size)
output_layer = nn.Linear(self.emb_size, self.vocab_size)
output_layer.weight.data = self.embedding.weight.data # sharing weight with embedding
self.vocab_gen = nn.Sequential(
proj_layer,
output_layer
)
else:
self.vocab_gen = nn.Linear(combined_hidden_size, self.vocab_size)
'''
:params
x : B, L
x_len : B
extend_vocab_x : B, L
max_oov_len : C
:returns
y : B, L
att : B, L
'''
def forward(self, x, x_len, extend_vocab_x, max_oov_len):
batch_size = len(x)
x = self.embedding(x)
enc_outputs, (enc_hidden_n, enc_cell_n) = self.encoder(x, x_len)
enc_hidden_n, enc_cell_n = self.reduce_encoder(enc_hidden_n, enc_cell_n)
# initial decoder hidden
dec_hidden = enc_hidden_n
# initial decoder cell
dec_cell = cuda(t.zeros(batch_size, self.dec_hidden_size))
# initial decoder input
dec_input = cuda(t.tensor([TK_START['id']] * batch_size))
# encoder padding mask
enc_padding_mask = t.zeros(batch_size, max(x_len))
for i in range(batch_size):
enc_padding_mask[i, :x_len[i]] = t.ones(1, x_len[i])
enc_padding_mask = cuda(enc_padding_mask)
# stop decoding mask
stop_dec_mask = cuda(t.zeros(batch_size))
# initial encoder context vector
enc_ctx_vector = cuda(t.zeros(batch_size, 2 * self.enc_hidden_size))
enc_attention = None
enc_temporal_score = None
pre_dec_hiddens = None
y = None
for i in range(self.max_dec_steps):
# decoding
vocab_dist, dec_hidden, dec_cell, enc_ctx_vector, enc_att, enc_temporal_score, _, _ = self.decode(
dec_input,
dec_hidden,
dec_cell,
pre_dec_hiddens,
enc_outputs,
enc_padding_mask,
enc_temporal_score,
enc_ctx_vector,
extend_vocab_x,
max_oov_len)
enc_attention = enc_att.unsqueeze(1).detach() if enc_attention is None else t.cat([enc_attention, enc_att.unsqueeze(1).detach()], dim=1)
## output
dec_output = t.max(vocab_dist, dim=1)[1].detach()
y = dec_output.unsqueeze(1) if y is None else t.cat([y, dec_output.unsqueeze(1)], dim=1)
## stop decoding mask
stop_dec_mask[dec_output == TK_STOP['id']] = 1
if len(stop_dec_mask[stop_dec_mask == 1]) == len(stop_dec_mask):
break
pre_dec_hiddens = dec_hidden.unsqueeze(1) if pre_dec_hiddens is None else t.cat([pre_dec_hiddens, dec_hidden.unsqueeze(1)], dim=1)
dec_input = dec_output
return y, enc_attention
'''
:params
dec_input : B
dec_hidden : B, DH
dec_cell : B, DH
pre_dec_hiddens : B, T, DH
enc_hiddens : B, L, EH
enc_padding_mask : B, L
enc_temporal_score : B, L
enc_ctx_vector : B, 2EH
extend_vocab_x : B, L
max_oov_len : C
:returns
vocab_dist : B, V + OOV
dec_hidden : B, DH
dec_cell : B, DH
enc_ctx_vector : B, 2EH
enc_attention : B, L
enc_temporal_score : B, L
dec_ctx_vector : B, DH
dec_attention : B, L
'''
def decode(self, dec_input,
dec_hidden,
dec_cell,
pre_dec_hiddens,
enc_hiddens,
enc_padding_mask,
enc_temporal_score,
enc_ctx_vector,
extend_vocab_x,
max_oov_len):
dec_input = self.embedding(dec_input)
dec_hidden, dec_cell = self.decoder(dec_input, dec_hidden, dec_cell, enc_ctx_vector)
# intra-temporal encoder attention
enc_ctx_vector, enc_att, enc_temporal_score = self.enc_att(dec_hidden, enc_hiddens, enc_padding_mask, enc_temporal_score)
# intra-decoder attention
dec_ctx_vector, dec_att = self.dec_att(dec_hidden, pre_dec_hiddens)
# vocab distribution
combined_input = t.cat([dec_hidden, enc_ctx_vector, dec_ctx_vector], dim=1)
vocab_dist = f.softmax(self.vocab_gen(combined_input), dim=1)
# pointer-generator
if self.pointer_generator is True:
ptr_prob = t.sigmoid(self.ptr_gen(combined_input))
ptr_dist = ptr_prob * enc_att
vocab_dist = (1 - ptr_prob) * vocab_dist
final_vocab_dist = cuda(t.zeros(len(dec_input), self.vocab.size() + max_oov_len))
final_vocab_dist[:, :self.vocab.size()] = vocab_dist
final_vocab_dist.scatter_add(1, extend_vocab_x, ptr_dist)
else:
final_vocab_dist = vocab_dist
return final_vocab_dist, dec_hidden, dec_cell, enc_ctx_vector, enc_att, enc_temporal_score, dec_ctx_vector, dec_att
'''
:params
x : article
:returns
y : summary
att : attention
'''
def evaluate(self, x):
self.eval()
words = x.split()
x = cuda(t.tensor(self.vocab.words2ids(words) + [TK_STOP['id']]).unsqueeze(0))
x_len = cuda(t.tensor([len(words) + 1]))
extend_vocab_x, oov = self.vocab.extend_words2ids(words)
extend_vocab_x = extend_vocab_x + [TK_STOP['id']]
extend_vocab_x = cuda(t.tensor(extend_vocab_x).unsqueeze(0))
max_oov_len = len(oov)
y, att = self.forward(x, x_len, extend_vocab_x, max_oov_len)
return ' '.join(self.vocab.ids2words(y[0].tolist(), oov)), att[0]
| [
"leangsotheara@gmail.com"
] | leangsotheara@gmail.com |
a7c89e3ba731b9cccd6ce9f60d3b144001fa296b | bc02e2c69f425e03b609f466b0a2d52a455765dc | /1212/1239. Maximum Length of a Concatenated String with Unique Characters.py | 662a56e27f9c558011a42f71d223602f521d25f4 | [] | no_license | gaberani/AlgorithmStudy | d795f449fe185c3993df90173f27b7eb74e02366 | 6d9d20ac29446d22f2e0ef7037f131c4a2f48762 | refs/heads/master | 2023-02-03T03:24:45.039238 | 2020-12-22T12:53:52 | 2020-12-22T12:53:52 | 287,101,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | # Given an array of strings arr.
# String s is a concatenation of a sub-sequence of arr which have unique characters.
# Return the maximum possible length of s.
# 1 <= arr.length <= 16
# 1 <= arr[i].length <= 26
# arr[i] contains only lower case English letters.
class Solution(object):
def maxLength2(self, arr):
if len(arr) == 1:
return len(arr[0])
answer = 0
L = len(arr)
for i in range(L-1):
for j in range(i+1, L):
one_case = len(arr[i]+arr[j])
if one_case > answer and len(set(arr[i]+arr[j])) == one_case:
print(arr[i]+arr[j])
answer = one_case
return answer
def maxLength(self, arr):
uniqELements = ['']
answer = 0
for i in range(len(arr)):
sz = len(uniqELements)
print(sz)
for j in range(sz):
x = arr[i] + uniqELements[j]
# print(x)
if (len(x) == len(set(x))):
uniqELements.append(x)
print(uniqELements)
answer = max(answer, len(x))
print(uniqELements)
return answer
test = Solution()
print(test.maxLength(["un","iq","ue"])) # 4
print(test.maxLength(["cha","r","act","ers"])) # 6
print(test.maxLength(["abcdefghijklmnopqrstuvwxyz"])) # 26 | [
"khs0783@naver.com"
] | khs0783@naver.com |
bfe2190a8c6124aa6bad4f012e9a3873003340da | 83e21dcd88961e01d7b6d76c1e7d3e0c405bb7a2 | /tests/components/climate/common.py | 4ac6f553091470acdb1d8908a859bc38e18e9aee | [
"Apache-2.0"
] | permissive | skalavala/home-assistant | 0a61886a8e399d6c46bf791927a69557edfdebb3 | 66d6db7934db1af0c560ccffd92cf4a114ef5841 | refs/heads/dev | 2020-04-04T11:35:24.377362 | 2018-11-02T17:40:05 | 2018-11-02T17:40:05 | 155,896,654 | 3 | 1 | Apache-2.0 | 2018-11-02T17:00:10 | 2018-11-02T17:00:09 | null | UTF-8 | Python | false | false | 3,373 | py | """Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.climate import (
_LOGGER, ATTR_AUX_HEAT, ATTR_AWAY_MODE, ATTR_FAN_MODE, ATTR_HOLD_MODE,
ATTR_HUMIDITY, ATTR_OPERATION_MODE, ATTR_SWING_MODE, ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW, DOMAIN, SERVICE_SET_AWAY_MODE, SERVICE_SET_HOLD_MODE,
SERVICE_SET_AUX_HEAT, SERVICE_SET_TEMPERATURE, SERVICE_SET_HUMIDITY,
SERVICE_SET_FAN_MODE, SERVICE_SET_OPERATION_MODE, SERVICE_SET_SWING_MODE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE)
from homeassistant.loader import bind_hass
@bind_hass
def set_away_mode(hass, away_mode, entity_id=None):
"""Turn all or specified climate devices away mode on."""
data = {
ATTR_AWAY_MODE: away_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AWAY_MODE, data)
@bind_hass
def set_hold_mode(hass, hold_mode, entity_id=None):
"""Set new hold mode."""
data = {
ATTR_HOLD_MODE: hold_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HOLD_MODE, data)
@bind_hass
def set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified climate devices auxiliary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AUX_HEAT, data)
@bind_hass
def set_temperature(hass, temperature=None, entity_id=None,
target_temp_high=None, target_temp_low=None,
operation_mode=None):
"""Set new target temperature."""
kwargs = {
key: value for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_OPERATION_MODE, operation_mode)
] if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, kwargs)
@bind_hass
def set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HUMIDITY, data)
@bind_hass
def set_fan_mode(hass, fan, entity_id=None):
"""Set all or specified climate devices fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
@bind_hass
def set_operation_mode(hass, operation_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_OPERATION_MODE: operation_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_OPERATION_MODE, data)
@bind_hass
def set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_SWING_MODE, data)
| [
"paulus@home-assistant.io"
] | paulus@home-assistant.io |
91aecef65480009d7dbbe337141cf68bbe514f8a | 6f56da8db171d4a6c006b5d944437bf061069faf | /XCat.v.0.0.1/source/healpy/query_disc_func.py | 860b9d7fb4bfb7b36c397658f7cf006776b6d060 | [] | no_license | afarahi/XCat | 16819bef7087e994907c413dd6331cdebde72ffb | 498602eb7f61696d169f071185115345c68bcf86 | refs/heads/master | 2021-01-21T01:59:36.907059 | 2013-05-03T05:12:07 | 2013-05-03T05:12:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,163 | py | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""This module provides the function query_disc and its helper functions.
Written by B. Crill.
"""
def ring_num(nside,z):
"""Return the ring number given z of a point on the sphere.
Input:
- nside: a power of 2
- z : a float within [-1,1]
Return:
the ring number
"""
from numpy import sqrt
twothird = 2.0 /3.0
shift = 0.5
iring = int( nside*(2.0-1.5*z) + shift)
if (z>twothird):
my_iring = int(nside*sqrt(3.0*(1-z)) + shift)
iring = my_iring
if (my_iring < 1): iring = 1
if (z < -1*twothird):
my_iring = int( nside* sqrt(3.0*(1.0+z)) + shift)
iring = my_iring
if (my_iring < 1): iring = 1
iring = 4*nside - my_iring
return iring
def ring2z(nside,iz):
"""Return the z component from ring number.
Input:
- nside: a power of 2
- iz: a ring number
Return:
the z component of the ring
"""
dth1 = 1.0 / (3.0*float(nside)**2)
dth2 = 2.0 / (3.0*float(nside))
if (iz <= (nside-1)):
zring = 1.0 - float(iz)**2 * dth1
elif (iz <=3*nside):
zring = float(2*nside-iz) * dth2
else: zring = - 1.0 + float(4L*nside-iz)**2 * dth1
return zring
def in_ring(nside,iz,phi0,dphi,nest=False):
"""Compute the list of pixels in ring number iz in phi interval [phi0,phi0+dphi]
Input:
- nside: a power of 2
- iz: ring number
- phi0: the starting longitude
- dphi: interval of longitude
Keyword:
- nested: if True, return pixel number in nested scheme. Default: False (RING)
Return:
- list of pixel numbers
"""
from numpy import pi,arange,concatenate,hstack,fabs,round
from pixelfunc import nside2npix,ring2nest
npix = nside2npix(nside)
take_all = 0
to_top = 0
twopi = 2.0 * pi
ncap = 2*nside*(nside-1)
listir = -1
nir = 0
phi_low = (phi0 - dphi) % twopi
if (phi_low < 0): phi_low = phi_low + twopi
phi_hi = (phi0 + dphi) % twopi
if (phi_hi < 0): phi_hi = phi_hi + twopi
if (fabs(dphi-pi) < 1e-6): take_all = 1
# equatorial region
if ((iz >= nside) & (iz <= 3*nside)):
ir = iz - nside + 1
ipix1 = ncap + 4*nside*(ir-1)
ipix2 = ipix1 + 4*nside - 1
kshift = ir % 2
nr = nside*4
else:
if (iz < nside):
ir = iz
ipix1 = 2*ir*(ir-1)
ipix2 = ipix1 + 4*ir - 1
else:
ir = 4*nside - iz
ipix1 = npix - 2*ir*(ir+1)
ipix2 = ipix1 + 4*ir - 1
nr = ir*4
kshift = 1
if (take_all == 1):
nir = ipix2 - ipix1 + 1
listir = arange(ipix1,ipix2+1,1)
if (take_all == 0):
shift = kshift * .5
ip_low = int(round (nr * phi_low / twopi - shift))
ip_hi = int(round(nr * phi_hi / twopi - shift))
ip_low = ip_low % nr
ip_hi = ip_hi % nr
if (ip_low > ip_hi): to_top = 1
ip_low = ip_low + ipix1
ip_hi = ip_hi + ipix1
if (to_top == 1):
nir1 = ipix2 - ip_low + 1
nir2 = ip_hi - ipix1 + 1
nir = nir1 + nir2
if ((nir1 > 0) & (nir2 > 0)):
#listir = concatenate(arange(0,nir2,1)+ipix1, arange(0,nir1,1)+ip_low)
list1 = arange(0,nir1,1)+ip_low
list2 = arange(0,nir2,1)+ipix1
listir = concatenate((list1,list2))
else:
if (nir1 == 0) : listir = arange(0,nir2,1)+ipix1
if (nir2 == 0) : listir = arange(0,nir1,1)+ip_low
else:
nir = ip_hi - ip_low + 1
listir = arange(0,nir,1)+ip_low
if (nest): listir = ring2nest(nside,listir)
return listir
def query_disc(nside,v0,radius,nest=False,deg=True):
"""Return the list of pixels within angle 'radius' from vector direction 'v0'
Input:
- nside: a power of 2
- v0: the vector describing the direction of the center of the disc
- radius: the opening angle of the disc
Keywords:
- nest: if True, pixel returned in nested scheme. Default: False (RING)
- deg: if False, radius angle expected in radian. Default: True (DEGREE)
Return:
- list of pixel (as a numpy array)
"""
# assumes input in degrees
from numpy import sqrt,sin,cos,pi,array,fabs,arccos,arcsin,size,empty,concatenate,arctan2,asarray
from pixelfunc import vec2pix,nside2npix
npix = nside2npix(nside)
ang_conv = 1.0
if (deg): ang_conv = pi/180.0
cosang = cos(radius*ang_conv)
# radius in radians
radius_eff = radius * ang_conv
v0 = asarray(v0)
v0 /= sqrt((v0**2).sum())
x0,y0,z0 = v0
a = x0*x0 + y0*y0
phi0 = 0.0
if ((x0!= 0.0)|(y0!=0.0)): phi0 = arctan2(y0, x0)
cosphi0 = cos(phi0)
rlat0 = arcsin(z0)
rlat1 = rlat0 + radius_eff
rlat2 = rlat0 - radius_eff
if (rlat1 >= pi/2.0): zmax = 1.0
else : zmax = sin(rlat1)
irmin = max(ring_num(nside,zmax) - 1,1)
if (rlat2 <= -pi/2.0): zmin = -1.0
else : zmin = sin(rlat2)
irmax = min(ring_num(nside,zmin) + 1,4*nside-1)
#first = 1
work = [[]]
for iz in xrange(irmin,irmax+1):
skip = 0
z = ring2z(nside,iz)
b = cosang - z*z0
c = 1.0 - z*z
cosdphi = b/sqrt(a*c)
if ((x0 == 0)&(y0==0)):
cosdphi = -1.0
dphi = pi
if (fabs(cosdphi) <= 1):
dphi = arccos(cosdphi)
else:
if (cosphi0 < cosdphi):
skip = 1
dphi = pi
if (skip == 0):
listir = in_ring(nside, iz, phi0, dphi,nest=nest)
nir = size(listir)
if (nir>0):
work.append(listir)
if len(work) > 1:
work = concatenate(work[1:])
else:
work = asarray([], dtype=int)
return work
| [
"aryaf66@gmail.com"
] | aryaf66@gmail.com |
b3c8d616e489ff6dc141e07a0074a2878e1459bc | 48e9d0e84238daf0de290551e3588e9ff3f49549 | /api/apiget.py | e4a87a5f3719acb9b9a59313730984215342957e | [] | no_license | celord/PythonGreencore | 9606af569738703b66d80bce6e423c9a313fa539 | 259aadcc346203f8092f6c6d286e3fca2e9fc550 | refs/heads/master | 2020-05-30T23:18:15.542876 | 2019-06-19T14:39:59 | 2019-06-19T14:39:59 | 190,014,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import requests
import json
token = "55d87efda345913f136102ce791c622ffe7341c74dcb4790c9d0e463edc831d1"
headers = {"Authorization": token}
r =requests.get('https://api.ciscopark.com/v1/people/me', headers = headers)
r.status_code | [
"celord@gmail.com"
] | celord@gmail.com |
928a56ae707e69d076cea2ac6ee12b17377f62d2 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-5059.py | accd3455111575581c05047eebbcc728bcb55a1a | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,292 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = $INT
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
1a8cd084b7d6fe1ca312a37929a9ebd93b0edb00 | 2337351b228818e41be3002bd38f68f77c2aa074 | /services/datastream/models/prefix.py | 0d10a5ac433def71fd723d7eb22131d738db29e8 | [
"BSD-3-Clause"
] | permissive | nocproject/noc | 57d40c680a1499374463e472434f9595ed6d1374 | 6e6d71574e9b9d822bec572cc629a0ea73604a59 | refs/heads/master | 2023-08-31T01:11:33.544573 | 2023-08-30T17:31:11 | 2023-08-30T17:31:11 | 107,815,776 | 105 | 33 | BSD-3-Clause | 2023-07-31T07:57:45 | 2017-10-21T21:04:33 | Python | UTF-8 | Python | false | false | 1,077 | py | # ----------------------------------------------------------------------
# prefix datastream model
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
# Third-party modules
from pydantic import BaseModel, Field
# NOC modules
from .utils import StateItem, ProjectItem
class PrefixProfileItem(BaseModel):
id: str
name: str
class VRFItem(BaseModel):
id: str
name: str
class ASItem(BaseModel):
id: str
name: str
asf: str = Field(alias="as")
class PrefixDataStreamItem(BaseModel):
id: str
name: str
change_id: str
prefix: str
afi: str
source: str
state: StateItem
profile: PrefixProfileItem
description: Optional[str]
labels: Optional[List[str]]
tags: Optional[List[str]]
project: Optional[ProjectItem]
vrf: Optional[VRFItem]
asf: Optional[ASItem] = Field(None, alias="as")
| [
"dvolodin7@gmail.com"
] | dvolodin7@gmail.com |
6c7421dce123b13a65b46ab1af4a596e22aae589 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/required_field_1.py | 277122f083ebb95072706186a5e5838369bac68f | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 650 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.required_field_name_1 import RequiredFieldName1
__NAMESPACE__ = "http://www.travelport.com/schema/common_v52_0"
@dataclass
class RequiredField1:
"""
Parameters
----------
name
The name of the required field
"""
class Meta:
name = "RequiredField"
namespace = "http://www.travelport.com/schema/common_v52_0"
name: None | RequiredFieldName1 = field(
default=None,
metadata={
"name": "Name",
"type": "Attribute",
"required": True,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
6f436c757d9095dc9ac7eb488a5fbd3eba77d8c0 | 990e7410c3debec7332ba09aa4c6504127ba2638 | /examples/opengl/opengl_core.py | 7dd09a10fd535cba1b54058f2e5fed71c6e3bdd8 | [
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause"
] | permissive | nachogon1/pyglet | 2b2699703b16118cb33dd009857e3cccccccf8ab | d6a85579f24a48e5f8e3e9f59f7bd9d5ebf2b049 | refs/heads/master | 2023-08-23T16:43:13.609153 | 2021-10-28T00:46:52 | 2021-10-28T00:46:52 | 422,730,754 | 0 | 0 | BSD-3-Clause | 2021-10-29T22:45:17 | 2021-10-29T22:45:17 | null | UTF-8 | Python | false | false | 3,702 | py | import pyglet
from pyglet.gl import *
# pyglet.options['debug_gl_shaders'] = True
window = pyglet.window.Window(width=540, height=540, resizable=True)
batch = pyglet.graphics.Batch()
print("OpenGL Context: {}".format(window.context.get_info().version))
##########################################################
# TESTS !
##########################################################
label = pyglet.text.Label("This is a test", x=0, y=180, dpi=200, color=(255, 25, 255, 150), batch=batch)
vertex_list = pyglet.graphics.vertex_list(3, ('position3f', (100, 300, 0, 200, 250, 0, 200, 350, 0)),
('colors4f', (1, 0, 0, 1, 0, 1, 0, 1, 0.3, 0.3, 1, 1)))
def create_quad_vertex_list(x, y, z, width, height):
return x, y, z, x + width, y, z, x + width, y + height, z, x, y + height, z
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', create_quad_vertex_list(480, 270, -11, 50, 50)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', (400, 400, 0, 400+50, 400, 0, 400+50, 400+50, 0, 400, 400+50, 0)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
img = pyglet.image.load("pyglet.png")
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
red = pyglet.image.SolidColorImagePattern((255, 0, 0, 255)).create_image(50, 50)
green = pyglet.image.SolidColorImagePattern((0, 255, 0, 255)).create_image(50, 50)
blue = pyglet.image.SolidColorImagePattern((0, 0, 255, 255)).create_image(50, 50)
white = pyglet.image.SolidColorImagePattern((255, 255, 255, 255)).create_image(50, 50)
sprites = [pyglet.sprite.Sprite(img=img, x=60, y=80, batch=batch),
pyglet.sprite.Sprite(img=img, x=110, y=90, batch=batch),
pyglet.sprite.Sprite(img=img, x=160, y=100, batch=batch),
pyglet.sprite.Sprite(img=img, x=210, y=110, batch=batch)]
for sprite in sprites:
sprite.opacity = 220
sprite2 = pyglet.sprite.Sprite(img=red, x=200, y=400, batch=batch)
sprite3 = pyglet.sprite.Sprite(img=green, x=300, y=300, batch=batch)
sprite4 = pyglet.sprite.Sprite(img=blue, x=400, y=200, batch=batch)
sprite5 = pyglet.sprite.Sprite(img=white, x=500, y=100, batch=batch)
standalone_sprite = pyglet.sprite.Sprite(img=white, x=600, y=0)
##########################################################
# Modify the sprite scale value by scrolling the mouse
##########################################################
@window.event
def on_mouse_scroll(x, y, mouse, direction):
for spr in sprites:
spr.scale += direction / 10
###########################################################
#
###########################################################
@window.event
def on_draw():
window.clear()
# pyglet.graphics.draw(3, GL_TRIANGLES, ('position3f', (100, 100, 0, 200, 100, 0, 150, 200, 0)),
# ('colors3f', (1, 0.5, 0.2, 1, 0.5, 0.2, 1, 0.5, 0.2)))
#
# pyglet.graphics.draw_indexed(4, GL_TRIANGLES, [0, 1, 2, 0, 2, 3],
# ('position2i', (225, 300, 250, 300, 250, 325, 225, 325)),
# ('colors3f', (0.5, 1, 0.2, 0.5, 0.2, 1, 0.2, 0.5, 1, 1, 0.5, 0.2)))
vertex_list.draw(GL_TRIANGLES)
batch.draw()
standalone_sprite.draw()
def update(dt):
for sprite in sprites:
sprite.rotation += 100 * dt % 360
if __name__ == "__main__":
pyglet.gl.glClearColor(0.2, 0.3, 0.3, 1)
pyglet.clock.schedule_interval(update, 1/60)
pyglet.app.run()
| [
"benmoran@protonmail.com"
] | benmoran@protonmail.com |
77416c87f4746c644da799b5cdbf2cdc286c0c90 | afde521f50b6be4be9e5c3071ed6459419fb5edb | /env/lib/python3.6/site-packages/pyecharts/engine.py | ce7dc39427d98ca1b9afb790031a3c753cbd7f7a | [] | no_license | guhongcheng/myblog | ddef4aa0888dedfb70933b34bfd0c5da5bb5d5cd | b11f5ee26125b9551b1f27814b96a845dd4e6a76 | refs/heads/master | 2022-12-18T20:26:46.596014 | 2018-07-26T02:46:07 | 2018-07-26T02:46:07 | 134,683,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,886 | py | # coding=utf-8
from __future__ import unicode_literals
from jinja2 import Environment, FileSystemLoader, environmentfunction, Markup
from lml.plugin import PluginManager, PluginInfo
import pyecharts.conf as conf
import pyecharts.constants as constants
import pyecharts.utils as utils
from pyecharts_javascripthon.api import TRANSLATOR
from pyecharts_javascripthon.api import FUNCTION_TRANSLATOR
LINK_SCRIPT_FORMATTER = '<script type="text/javascript" src="{}"></script>'
EMBED_SCRIPT_FORMATTER = '<script type="text/javascript">\n{}\n</script>'
CHART_DIV_FORMATTER = '<div id="{chart_id}" style="width:{width};height:{height};"></div>' # flake8: noqa
CHART_CONFIG_FORMATTER = """
var myChart_{chart_id} = echarts.init(document.getElementById('{chart_id}'), '{theme}', {{renderer: '{renderer}'}});
{custom_function}
var option_{chart_id} = {options};
myChart_{chart_id}.setOption(option_{chart_id});
"""
CHART_EVENT_FORMATTER = """
myChart_{chart_id}.on("{event_name}", {handler_name});
"""
@environmentfunction
def echarts_js_dependencies(env, *args):
"""
Render script html nodes in external link mode.
"""
current_config = env.pyecharts_config
dependencies = utils.merge_js_dependencies(*args)
if current_config.js_embed:
contents = current_config.read_file_contents_from_local(dependencies)
return Markup(
"\n".join([EMBED_SCRIPT_FORMATTER.format(c) for c in contents])
)
else:
js_links = current_config.generate_js_link(dependencies)
return Markup(
"\n".join([LINK_SCRIPT_FORMATTER.format(j) for j in js_links])
)
@environmentfunction
def echarts_js_dependencies_embed(env, *args):
"""
Render script html nodes in embed mode,Only used for local files.
"""
current_config = env.pyecharts_config
dependencies = utils.merge_js_dependencies(*args)
contents = current_config.read_file_contents_from_local(dependencies)
return Markup(
"\n".join([EMBED_SCRIPT_FORMATTER.format(c) for c in contents])
)
@environmentfunction
def echarts_container(env, chart):
"""
Render a div html element for a chart.
:param env:
:param chart: A pyecharts.base.Base object
"""
return Markup(
CHART_DIV_FORMATTER.format(
chart_id=chart.chart_id,
width=utils.to_css_length(chart.width),
height=utils.to_css_length(chart.height),
)
)
def generate_js_content(*charts):
"""
Generate the initial code fragment for one or some chart instances.
"""
contents = []
for chart in charts:
FUNCTION_TRANSLATOR.reset()
for handler in chart.event_handlers.values():
FUNCTION_TRANSLATOR.feed(handler)
javascript_snippet = TRANSLATOR.translate(chart.options)
kwargs = dict(
chart_id=chart.chart_id,
renderer=chart.renderer,
theme=chart.theme,
custom_function=javascript_snippet.function_snippet,
options=javascript_snippet.option_snippet,
)
js_content = CHART_CONFIG_FORMATTER.format(**kwargs)
for event_name, handler in chart.event_handlers.items():
# please note handler has been translated in previous block
event_args = dict(
event_name=event_name,
chart_id=chart.chart_id,
handler_name=handler.__name__,
)
js_content += CHART_EVENT_FORMATTER.format(**event_args)
contents.append(js_content)
contents = "\n".join(contents)
return contents
@environmentfunction
def echarts_js_content(env, *charts):
"""
Render script html node for echarts initial code.
"""
return Markup(EMBED_SCRIPT_FORMATTER.format(generate_js_content(*charts)))
@environmentfunction
def echarts_js_content_wrap(env, *charts):
"""
Render echarts initial code for a chart.
"""
return Markup(generate_js_content(*charts))
# Public API,see document for more detail.
ECHAERTS_TEMPLATE_FUNCTIONS = {
"echarts_js_dependencies": echarts_js_dependencies,
"echarts_js_dependencies_embed": echarts_js_dependencies_embed,
"echarts_container": echarts_container,
"echarts_js_content": echarts_js_content,
"echarts_js_content_wrap": echarts_js_content_wrap,
}
class BaseEnvironment(Environment):
"""
Add config and echarts template functions to a Environment object.
"""
def __init__(self, *args, **kwargs):
self.pyecharts_config = kwargs.pop("pyecharts_config", None)
if self.pyecharts_config is None:
raise TypeError(
"no pyecharts_config for this environment specified"
)
super(BaseEnvironment, self).__init__(*args, **kwargs)
self.globals.update(ECHAERTS_TEMPLATE_FUNCTIONS)
@PluginInfo(constants.ENVIRONMENT_PLUGIN_TYPE, tags=[constants.DEFAULT_HTML])
class EchartsEnvironment(BaseEnvironment):
"""
Built-in jinja2 template engine for pyecharts
This class provides some shortcut methods for rendering charts.
"""
def __init__(self, pyecharts_config=None, *args, **kwargs):
pyecharts_config = pyecharts_config or conf.PyEchartsConfig()
loader = kwargs.pop("loader", None)
if loader is None:
loader = FileSystemLoader(pyecharts_config.echarts_template_dir)
super(EchartsEnvironment, self).__init__(
pyecharts_config=pyecharts_config,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True,
loader=loader,
*args,
**kwargs
)
def render_container_and_echarts_code(self, chart):
"""
Render <div> and <script> code fragment for a chart.
"""
tpl_string = """
{{ echarts_container(chart) }}
{{ echarts_js_content(chart) }}
"""
tpl = self.from_string(tpl_string)
return tpl.render(chart=chart)
def render_chart_to_file(
self,
chart,
object_name="chart",
path="render.html",
template_name="simple_chart.html",
**kwargs
):
"""
Render a chart or page to local html files.
:param chart: A Chart or Page object
:param object_name: Variable name for chart/page used in template
:param path: The destination file which the html code write to
:param template_name: The name of template file.
:param extra_context: A dictionary containing extra data.
:return: None
"""
kwargs[object_name] = chart
tpl = self.get_template(template_name)
html = tpl.render(**kwargs)
utils.write_utf8_html_file(path, html)
def render_chart_to_notebook(self, **context):
"""
Return html string for rendering a chart/page to a notebook cell.
:param context: A dictionary containing data.
:return: A unicode string that will be displayed in notebook cell.
"""
tpl = self.get_template("notebook.html")
return tpl.render(**context)
class EnvironmentManager(PluginManager):
"""
Extend the rendering capability of pyecharts by having
loosely coupled environments
"""
def __init__(self):
"""
Register with lml that this class manages 'pyecharts_environment'
extension
"""
super(EnvironmentManager, self).__init__(
constants.ENVIRONMENT_PLUGIN_TYPE
)
def get_a_environment(self, file_type, **kwargs):
"""
Factory method to choose the default html rendering EchartsEnvironment
or image rendering SnapshotEnvironment from pyecharts-snapshot
:param file_type: 'html', 'svg', 'png', 'jpeg', 'gif' or 'pdf'
:param kwargs: the initialization parameters for Environment
"""
_a_echarts_env_cls = super(EnvironmentManager, self).load_me_now(
key=file_type
)
return _a_echarts_env_cls(**kwargs)
ENV_MANAGER = EnvironmentManager()
def create_default_environment(file_type):
"""
Create environment object with pyecharts default single PyEchartsConfig.
:return: A new EchartsEnvironment object.
"""
default_template_dir = utils.get_resource_dir("templates")
config = conf.CURRENT_CONFIG
echarts_env = ENV_MANAGER.get_a_environment(
file_type,
pyecharts_config=config,
loader=FileSystemLoader(
[config.echarts_template_dir, default_template_dir]
),
)
return echarts_env
| [
"1051712303@qq.com"
] | 1051712303@qq.com |
b3943be483dc16b387eb116bc05a852c68759105 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /simulation_research/tf_risk/run_replication_experiment.py | 3bd791c79eb4c0c8b6f9bf723fd6e1026baa1856 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 8,615 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an experiment of the synthesis of a basket call with mc methods.
Consider we are selling an European basket call option which in general cannot
be priced or hedged analytically and cannot be replicated statically even
under a simple Black-Scholes model for the dynamics of the underlyings.
For simplicity, bid-ask spread and fees are not taken into account.
We simulate correlated stock price variations under the historical probability
and hedge at each time period with a delta computed by a Monte Carlo method
(path-wise differentiation) under the risk neutral probability.
Tensorflow's automated differentiation capabilities make such a Monte Carlo
simulation simple to differentiate to extract the delta and easy to accelerate
on GPU/TPU.
See http://www.cmap.polytechnique.fr/~touzi/Poly-MAP552.pdf p99 7.4.2.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import scipy.linalg
import tensorflow.compat.v1 as tf
from simulation_research.tf_risk import controllers
from simulation_research.tf_risk import monte_carlo_manager
from simulation_research.tf_risk.dynamics import gbm_log_euler_step_nd
from simulation_research.tf_risk.payoffs import basket_call_payoff
flags.DEFINE_integer("num_dims", 100,
"Number of assets in basket option.")
flags.DEFINE_integer("num_batch_samples", 1000,
"Number of Monte Carlo per batch.")
flags.DEFINE_integer("num_batches", 10,
"Number of Monte Carlo batches.")
flags.DEFINE_float("initial_price", 100.0, "Initial price.")
flags.DEFINE_float("drift", 0.0, "Stock drift.")
flags.DEFINE_float("volatility", 0.2,
"Volatilty of each stock (assumed equal across stock).")
flags.DEFINE_float("correlation", 0.5,
"Pairwise correlation between stocks "
"(assumed equal across pairs).")
flags.DEFINE_float("strike", 100.0, "Basket strike.")
flags.DEFINE_float("maturity", 1.0, "European option maturity (in years).")
flags.DEFINE_float("delta_t_historical", 0.1,
"Time step of historical stock price simulation.")
flags.DEFINE_float("delta_t_monte_carlo", 0.1,
"Time step of monte carlo risk neutral price simulation.")
FLAGS = flags.FLAGS
def hist_log_euler_step(hist_state, hist_drift, hist_vol_matrix, dt):
"""Simulation of price movements under the historical probability."""
num_dims = hist_state.shape[0]
hist_dw_t = np.random.normal(size=[num_dims]) * np.sqrt(dt)
return np.maximum(hist_state * (
1.0 + hist_drift * dt + np.matmul(hist_dw_t, hist_vol_matrix)), 1e-7)
def main(_):
num_dims = FLAGS.num_dims
num_batches = FLAGS.num_batches
hist_drift = FLAGS.drift
hist_vol = FLAGS.volatility
hist_cor = FLAGS.correlation
hist_cor_matrix = (hist_cor * np.ones((num_dims, num_dims))
+ (1.0 - hist_cor) * np.eye(num_dims))
hist_price = FLAGS.initial_price * np.ones(num_dims)
hist_vol_matrix = hist_vol * np.real(scipy.linalg.sqrtm(hist_cor_matrix))
hist_dt = FLAGS.delta_t_historical
sim_dt = FLAGS.delta_t_monte_carlo
strike = FLAGS.strike
maturity = FLAGS.maturity
# Placeholders for tensorflow-based simulator's arguments.
sim_price = tf.placeholder(shape=[num_dims], dtype=tf.float32)
sim_drift = tf.placeholder(shape=(), dtype=tf.float32)
sim_vol_matrix = tf.constant(hist_vol_matrix, dtype=tf.float32)
sim_maturity = tf.placeholder(shape=(), dtype=tf.float32)
# Transition operation between t and t + dt with price in log scale.
def _dynamics_op(log_s, t, dt):
return gbm_log_euler_step_nd(
log_s, sim_drift, sim_vol_matrix, t, dt)
# Terminal payoff function (with price in log scale).
def _payoff_fn(log_s):
return basket_call_payoff(tf.exp(log_s), strike)
# Call's price and delta estimates (sensitivity to current underlying price).
# Monte Carlo estimation under the risk neutral probability is used.
# The reason why we employ the risk neutral probability is that the position
# is hedged each day depending on the value of the underlying.
# See http://www.cmap.polytechnique.fr/~touzi/Poly-MAP552.pdf for a complete
# explanation.
price_estimate, _, _ = monte_carlo_manager.non_callable_price_mc(
initial_state=tf.log(sim_price),
dynamics_op=_dynamics_op,
payoff_fn=_payoff_fn,
maturity=sim_maturity,
num_samples=FLAGS.num_batch_samples,
dt=sim_dt)
delta_estimate = monte_carlo_manager.sensitivity_autodiff(
price_estimate, sim_price)
# Start the hedging experiment.
session = tf.Session()
hist_price_profile = []
cash_profile = []
underlying_profile = []
wall_times = []
t = 0
cash_owned = 0.0
underlying_owned = np.zeros(num_dims)
while t <= maturity:
# Each day, a new stock price is observed.
cash_eval = 0.0
delta_eval = 0.0
for _ in range(num_batches):
if t == 0.0:
# The first day a derivative price is computed to decide how mush cash
# is initially needed to replicate the derivative's payoff at maturity.
cash_eval_batch = controllers.price_derivative(
price_estimate,
session,
params={
sim_drift: 0.0,
sim_price: hist_price,
sim_maturity: maturity - t
})
# Each day the delta of the derivative is computed to decide how many
# shares of the underlying should be owned to replicate the derivative's
# payoff at maturity.
start_time = time.time()
delta_eval_batch = controllers.hedge_derivative(
delta_estimate,
session,
params={
sim_drift: 0.0,
sim_price: hist_price,
sim_maturity: maturity - t
})
wall_times.append(time.time() - start_time)
delta_eval += delta_eval_batch / num_batches
cash_eval += cash_eval_batch / num_batches
if t == 0.0:
logging.info("Initial price estimate: %.2f", cash_eval)
# Self-financing portfolio dynamics, held cash is used to buy the underlying
# or increases when the underlying is sold.
if t == 0.0:
cash_owned = cash_eval - np.sum(delta_eval * hist_price)
underlying_owned = delta_eval
else:
cash_owned -= np.sum((delta_eval - underlying_owned) * hist_price)
underlying_owned = delta_eval
logging.info("Cash at t=%.2f: %.2f", t, cash_owned)
logging.info("Mean delta at t=%.2f: %.4f", t, np.mean(delta_eval))
logging.info("Mean underlying at t=%.2f: %.2f ", t, np.mean(hist_price))
hist_price_profile.append(hist_price)
cash_profile.append(cash_owned)
underlying_profile.append(underlying_owned)
# Simulation of price movements under the historical probability (i.e. what
# is actually happening in the stock market).
hist_price = hist_log_euler_step(
hist_price, hist_drift, hist_vol_matrix, hist_dt)
t += hist_dt
session.close()
# At maturity, the value of the replicating portfolio should be exactly
# the opposite of the payoff of the option being sold.
# The reason why the match is not exact here is two-fold: we only hedge once
# a day and we use noisy Monte Carlo estimates to do so.
underlying_owned_value = np.sum(underlying_owned * hist_price)
profit = np.sum(underlying_owned * hist_price) + cash_owned
loss = (np.mean(hist_price) - strike) * (np.mean(hist_price) > strike)
logging.info("Cash owned at maturity %.3f.", cash_owned)
logging.info("Value of underlying owned at maturity %.3f.",
underlying_owned_value)
logging.info("Profit (value held) = %.3f.", profit)
logging.info("Loss (payoff sold, 0 if price is below strike) = %.3f.", loss)
logging.info("PnL (should be close to 0) = %.3f.", profit - loss)
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f3832061ad2374d819506a18348aa03d27a95d26 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_slipped.py | b9ab1c3a9d8d932d06c99f0ba3cd3a162dee6b49 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.nouns._slip import _SLIP
#calss header
class _SLIPPED(_SLIP, ):
def __init__(self,):
_SLIP.__init__(self)
self.name = "SLIPPED"
self.specie = 'nouns'
self.basic = "slip"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d41b809d954f96b6e5eed80c4f4f724e850412aa | d7bc683cc14198ba4b1ae9193f7feaec1808e9f3 | /python练习题/day01/Check variable type.py | 2ad3cf67da0abeab24045e029c5e21b4fb137f79 | [] | no_license | yuanshaohui/python-learn | 8bd4e6a0a5cc0ed622e32bdf0c9e833460ee3b90 | 8c9c6ecccb4219e29fec7593e9bc4b45e218da3e | refs/heads/master | 2021-03-14T09:44:50.142244 | 2020-03-21T13:48:18 | 2020-03-21T13:48:18 | 246,756,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | """
检查变量类型
Author:袁亮亮
Date:2019-11-19
"""
a = 100
b = 100.1
c = "字符串"
d = 10 + 5j
e = True
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(type(e)) | [
"123456@qq.com"
] | 123456@qq.com |
93356333e90c6fe3292e1d0888ed82cba15329c5 | 28510f9d380272777be446665be6bdbf784b07d1 | /tensorflow_federated/python/learning/personalization_eval_test.py | 5088671af269fb35b2381cca9250e37c1e3f349d | [
"Apache-2.0"
] | permissive | 00mjk/federated | b6d5d1988dc999e60c2e3478aaae37a402e0e8c6 | 11caa7569ed00a8fa590f25761747d0ee09504a5 | refs/heads/master | 2023-04-09T07:21:09.469878 | 2021-04-22T20:47:52 | 2021-04-22T20:59:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,153 | py | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model_examples
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning import personalization_eval as p13n_eval
from tensorflow_federated.python.learning.framework import dataset_reduce
# TODO(b/160896627): Switch to `dataset.reduce` once multi-GPU supports it.
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(simulation_flag=True)
@tf.function
def _evaluate_fn(model, dataset, batch_size=1):
"""Evaluates a `tff.learning.Model` on the given dataset."""
# Reset the local variables so that the returned metrics are computed using
# the given data. Similar to the `reset_states` method of `tf.metrics.Metric`.
for var in model.local_variables:
if var.initial_value is not None:
var.assign(var.initial_value)
else:
var.assign(tf.zeros_like(var))
def eval_fn(whimsy_state, batch):
"""Evaluates the model on a batch."""
model.forward_pass(batch, training=False)
return whimsy_state
# Evaluate on the dataset.
batched_dataset = dataset.batch(batch_size)
dataset_reduce_fn(
reduce_fn=eval_fn,
dataset=batched_dataset,
initial_state_fn=lambda: tf.constant(0))
# Obtain the metrics.
results = collections.OrderedDict()
local_outputs = model.report_local_outputs()
for name, metric in local_outputs.items():
if isinstance(metric, list) and (len(metric) == 2):
# Some metrics returned by `report_local_outputs()` can have two scalars:
# one represents `sum`, and the other represents `count`. Ideally, we want
# to return a single scalar for each metric.
results[name] = metric[0] / metric[1]
else:
results[name] = metric[0] if isinstance(metric, list) else metric
return results
def _build_personalize_fn(optimizer_fn, train_batch_size, test_batch_size):
"""Builds a personalization function given an optimizer constructor."""
optimizer = optimizer_fn()
@tf.function
def personalize_fn(model, train_data, test_data, context=None):
def train_fn(num_examples_sum, batch):
"""Runs gradient descent on a batch."""
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
grads = tape.gradient(output.loss, model.trainable_variables)
optimizer.apply_gradients(
zip(
tf.nest.flatten(grads),
tf.nest.flatten(model.trainable_variables)))
return num_examples_sum + output.num_examples
# Train a personalized model.
batched_train_data = train_data.batch(train_batch_size)
num_examples_sum = dataset_reduce_fn(
reduce_fn=train_fn,
dataset=batched_train_data,
initial_state_fn=lambda: tf.constant(0))
# For test coverage, this example uses an optional `int32` as `context`.
if context is not None:
num_examples_sum = num_examples_sum + context
results = collections.OrderedDict()
results['num_examples'] = num_examples_sum
results['test_outputs'] = _evaluate_fn(model, test_data, test_batch_size)
return results
return personalize_fn
def _create_p13n_fn_dict(learning_rate):
"""Creates a dictionary containing two personalization strategies."""
p13n_fn_dict = collections.OrderedDict()
opt_fn = lambda: tf.keras.optimizers.SGD(learning_rate=learning_rate)
# The two personalization strategies use different training batch sizes.
p13n_fn_dict['batch_size_1'] = lambda: _build_personalize_fn(opt_fn, 1, 3)
p13n_fn_dict['batch_size_2'] = lambda: _build_personalize_fn(opt_fn, 2, 3)
return p13n_fn_dict
def _create_dataset(scale):
"""Constructs a dataset with three datapoints."""
x = np.array([[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]]) * scale
y = np.array([[1.0], [1.0], [1.0]]) * scale
ds = collections.OrderedDict(x=x.astype(np.float32), y=y.astype(np.float32))
# Note: batching is not needed here as the preprocessing of dataset is done
# inside the personalization function.
return tf.data.Dataset.from_tensor_slices(ds)
def _create_client_input(train_scale, test_scale, context=None):
"""Constructs client datasets for personalization."""
client_input = collections.OrderedDict()
client_input['train_data'] = _create_dataset(train_scale)
client_input['test_data'] = _create_dataset(test_scale)
if context is not None:
client_input['context'] = context
return client_input
def _create_zero_model_weights(model_fn):
"""Creates the model weights with all zeros."""
whimsy_model = model_utils.enhance(model_fn())
return tf.nest.map_structure(tf.zeros_like, whimsy_model.weights)
class PersonalizationEvalTest(test_case.TestCase, parameterized.TestCase):
def test_failure_with_invalid_model_fn(self):
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
with self.assertRaises(TypeError):
# `model_fn` should be a callable.
bad_model_fn = 6
p13n_eval.build_personalization_eval(bad_model_fn, p13n_fn_dict,
_evaluate_fn)
with self.assertRaises(TypeError):
# `model_fn` should be a callable that returns a `tff.learning.Model`.
bad_model_fn = lambda: 6
p13n_eval.build_personalization_eval(bad_model_fn, p13n_fn_dict,
_evaluate_fn)
def test_failure_with_invalid_p13n_fns(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
with self.assertRaises(TypeError):
# `personalize_fn_dict` should be a `OrderedDict`.
bad_p13n_fn_dict = {'a': 6}
p13n_eval.build_personalization_eval(model_fn, bad_p13n_fn_dict,
_evaluate_fn)
with self.assertRaises(TypeError):
# `personalize_fn_dict` should be a `OrderedDict` that maps a `string` to
# a `callable`.
bad_p13n_fn_dict = collections.OrderedDict(a=6)
p13n_eval.build_personalization_eval(model_fn, bad_p13n_fn_dict,
_evaluate_fn)
with self.assertRaises(TypeError):
# `personalize_fn_dict` should be a `OrderedDict` that maps a `string` to
# a `callable` that when called, gives another `callable`.
bad_p13n_fn_dict = collections.OrderedDict(x=lambda: 2)
p13n_eval.build_personalization_eval(model_fn, bad_p13n_fn_dict,
_evaluate_fn)
with self.assertRaises(ValueError):
# `personalize_fn_dict` should not use `baseline_metrics` as a key.
bad_p13n_fn_dict = collections.OrderedDict(baseline_metrics=lambda: 2)
p13n_eval.build_personalization_eval(model_fn, bad_p13n_fn_dict,
_evaluate_fn)
def test_failure_with_invalid_baseline_eval_fn(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
with self.assertRaises(TypeError):
# `baseline_evaluate_fn` should be a callable.
bad_baseline_evaluate_fn = 6
p13n_eval.build_personalization_eval(model_fn, p13n_fn_dict,
bad_baseline_evaluate_fn)
def test_success_with_directly_constructed_model(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn)
# Perform p13n eval on two clients: their train data are equivalent, but the
# test data have different scales.
results = federated_p13n_eval(zero_model_weights, [
_create_client_input(train_scale=1.0, test_scale=1.0),
_create_client_input(train_scale=1.0, test_scale=2.0)
])
# Check if the baseline metrics are correct.
baseline_metrics = results['baseline_metrics']
# Number of test examples is 3 for both clients.
self.assertAllEqual(baseline_metrics['num_examples'], [3, 3])
# Number of test batches is 3 for both clients, because the function that
# evaluates the baseline metrics `_evaluate_fn` uses a default batch size 1.
self.assertAllEqual(sorted(baseline_metrics['num_batches']), [3, 3])
# The initial weights are all zeros. The average loss can be computed as:
# Client 1, 0.5*(1 + 1 + 1)/3 = 0.5; Client 2, 0.5*(4 + 4 + 4)/3 = 2.0.
# Note: the order is not preserved due to `federated_sample`.
self.assertAllEqual(sorted(baseline_metrics['loss']), [0.5, 2.0])
if baseline_metrics['loss'][0] == 0.5:
client_1_idx, client_2_idx = 0, 1
else:
client_1_idx, client_2_idx = 1, 0
# Check if the metrics of `batch_size_1` are correct.
bs1_metrics = results['batch_size_1']
# Number of training examples is 3 for both clients.
self.assertAllEqual(bs1_metrics['num_examples'], [3, 3])
bs1_test_outputs = bs1_metrics['test_outputs']
# Number of test examples is also 3 for both clients.
self.assertAllEqual(bs1_test_outputs['num_examples'], [3, 3])
# Number of test batches is 1 for both clients since test batch size is 3.
self.assertAllEqual(bs1_test_outputs['num_batches'], [1, 1])
# Both clients's weights become [-3, -3, -1] after training, which gives an
# average loss 24 for Client 1 and 88.5 for Client 2.
self.assertAlmostEqual(bs1_test_outputs['loss'][client_1_idx], 24.0)
self.assertAlmostEqual(bs1_test_outputs['loss'][client_2_idx], 88.5)
# Check if the metrics of `batch_size_2` are correct.
bs2_metrics = results['batch_size_2']
# Number of training examples is 3 for both clients.
self.assertAllEqual(bs2_metrics['num_examples'], [3, 3])
bs2_test_outputs = bs2_metrics['test_outputs']
# Number of test examples is also 3 for both clients.
self.assertAllEqual(bs2_test_outputs['num_examples'], [3, 3])
# Number of test batches is 1 for both clients since test batch size is 3.
self.assertAllEqual(bs2_test_outputs['num_batches'], [1, 1])
# Both clients' weights become [0, 0, 1] after training, which gives an
# average loss 0 for Client 1 and 0.5 for Client 2.
self.assertAlmostEqual(bs2_test_outputs['loss'][client_1_idx], 0.0)
self.assertAlmostEqual(bs2_test_outputs['loss'][client_2_idx], 0.5)
@parameterized.named_parameters(
('python_container',
collections.OrderedDict(
x=tf.TensorSpec(shape=[None, 2], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32))),
('tff_struct_with_python_type',
computation_types.StructWithPythonType(
collections.OrderedDict(
x=tf.TensorSpec(shape=[None, 2], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32)),
container_type=collections.OrderedDict)),
('tff_struct_type',
computation_types.StructType(
collections.OrderedDict(
x=tf.TensorSpec(shape=[None, 2], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.float32)))),
)
def test_success_with_model_constructed_from_keras(self, input_spec):
def model_fn():
inputs = tf.keras.Input(shape=(2,)) # feature dim = 2
outputs = tf.keras.layers.Dense(1)(inputs)
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
return keras_utils.from_keras_model(
keras_model,
input_spec=input_spec,
loss=tf.keras.losses.MeanSquaredError())
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=0.5)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn)
# Perform p13n eval on two clients: their train data are equivalent, but the
# test data have different scales.
results = federated_p13n_eval(zero_model_weights, [
_create_client_input(train_scale=1.0, test_scale=1.0),
_create_client_input(train_scale=1.0, test_scale=2.0)
])
# Check if the baseline metrics are correct.
baseline_metrics = results['baseline_metrics']
# The initial weights are all zeros. The MeanSquredError(MSE) is:
# Client 1, (1 + 1 + 1)/3 = 1.0; Client 2, (4 + 4 + 4)/3 = 4.0.
# Note: the order is not preserved due to `federated_sample`.
self.assertAllEqual(sorted(baseline_metrics['loss']), [1.0, 4.0])
# Check if the metrics of `batch_size_1` are correct.
bs1_metrics = results['batch_size_1']
# Number of training examples is 3 for both clients.
self.assertAllEqual(bs1_metrics['num_examples'], [3, 3])
bs1_test_outputs = bs1_metrics['test_outputs']
# Both clients' weights become [-3, -3, -1] after training, which gives MSE
# 48 for Client 1 and 177 for Client 2.
self.assertAlmostEqual(sorted(bs1_test_outputs['loss']), [48.0, 177.0])
# Check if the metrics of `batch_size_2` are correct.
bs2_metrics = results['batch_size_2']
# Number of training examples is 3 for both clients.
self.assertAllEqual(bs2_metrics['num_examples'], [3, 3])
bs2_test_outputs = bs2_metrics['test_outputs']
# Both clients' weights become [0, 0, 1] after training, which gives MSE 0
# for Client 1 and 1.0 for Client 2.
self.assertAlmostEqual(sorted(bs2_test_outputs['loss']), [0.0, 1.0])
def test_failure_with_batched_datasets(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn)
with self.assertRaises(TypeError):
# client_input should not have batched datasets.
bad_client_input = collections.OrderedDict(
train_data=_create_dataset(scale=1.0).batch(1),
test_data=_create_dataset(scale=1.0).batch(1))
federated_p13n_eval(zero_model_weights, [bad_client_input])
def test_failure_with_invalid_context_type(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
with self.assertRaises(TypeError):
# `tf.int32` is not a `tff.Type`.
bad_context_tff_type = tf.int32
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn,
p13n_fn_dict,
_evaluate_fn,
context_tff_type=bad_context_tff_type)
with self.assertRaises(TypeError):
# `context_tff_type` is provided but `context` is not provided.
context_tff_type = computation_types.to_type(tf.int32)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn,
p13n_fn_dict,
_evaluate_fn,
context_tff_type=context_tff_type)
federated_p13n_eval(zero_model_weights, [
_create_client_input(train_scale=1.0, test_scale=1.0, context=None),
_create_client_input(train_scale=1.0, test_scale=2.0, context=None)
])
def test_success_with_valid_context(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
# Build the p13n eval with an extra `context` argument.
context_tff_type = computation_types.to_type(tf.int32)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn, context_tff_type=context_tff_type)
# Perform p13n eval on two clients with different `context` values.
results = federated_p13n_eval(zero_model_weights, [
_create_client_input(train_scale=1.0, test_scale=1.0, context=2),
_create_client_input(train_scale=1.0, test_scale=2.0, context=5)
])
bs1_metrics = results['batch_size_1']
bs2_metrics = results['batch_size_2']
# Number of training examples is `3 + context` for both clients.
# Note: the order is not preserved due to `federated_sample`, but the order
# should be consistent across different personalization strategies.
self.assertAllEqual(sorted(bs1_metrics['num_examples']), [5, 8])
self.assertAllEqual(bs1_metrics['num_examples'],
bs2_metrics['num_examples'])
def test_failure_with_invalid_sample_size(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
with self.assertRaises(TypeError):
# `max_num_clients` should be an `int`.
bad_num_clients = 1.0
p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn, max_num_clients=bad_num_clients)
with self.assertRaises(ValueError):
# `max_num_clients` should be a positive `int`.
bad_num_clients = 0
p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn, max_num_clients=bad_num_clients)
def test_success_with_small_sample_size(self):
def model_fn():
return model_examples.LinearRegression(feature_dim=2)
zero_model_weights = _create_zero_model_weights(model_fn)
p13n_fn_dict = _create_p13n_fn_dict(learning_rate=1.0)
federated_p13n_eval = p13n_eval.build_personalization_eval(
model_fn, p13n_fn_dict, _evaluate_fn, max_num_clients=1)
# Perform p13n eval on two clients.
results = federated_p13n_eval(zero_model_weights, [
_create_client_input(train_scale=1.0, test_scale=1.0),
_create_client_input(train_scale=1.0, test_scale=2.0)
])
# The results should only contain metrics from one client.
self.assertAllEqual(len(results['baseline_metrics']['loss']), 1)
self.assertAllEqual(len(results['batch_size_1']['test_outputs']['loss']), 1)
self.assertAllEqual(len(results['batch_size_2']['test_outputs']['loss']), 1)
if __name__ == '__main__':
execution_contexts.set_local_execution_context()
test_case.main()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.