hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21a8ff372a94dc4b17ebbcb16fdf9c8a605c4286 | 1,141 | py | Python | archive/gdax-bitcoin.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
] | 1 | 2021-08-09T07:22:25.000Z | 2021-08-09T07:22:25.000Z | archive/gdax-bitcoin.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
] | null | null | null | archive/gdax-bitcoin.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file = Path(__file__).parents[0] / 'data' / 'gdax-bitcoin.csv'
if __name__ == '__main__':
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main() | 28.525 | 87 | 0.617003 | #!/usr/bin/env python3
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file = Path(__file__).parents[0] / 'data' / 'gdax-bitcoin.csv'
def request(url):
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
print(data)
return data['price'], data['volume']
def main():
current_time = datetime.datetime.now(datetime.timezone.utc)
unix_timestamp = current_time.timestamp()
print(int(unix_timestamp))
url = 'https://api.gdax.com/products/BTC-USD/ticker'
try:
price, volume = request(url)
except Exception as e:
price, volume = None,None
price = round(float(price), 2)
print('price: ' + str(price))
print('volume: ' + str(volume))
with open(csv_file, 'a') as f:
f.write(str(int(unix_timestamp)) + ',' + str(price) + ',' + str(volume) + '\n')
if __name__ == '__main__':
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main() | 672 | 0 | 46 |
36b92bb1526f3a9da7363ca52f98832014813eaf | 93 | py | Python | codes_auto/251.flatten-2d-vector.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/251.flatten-2d-vector.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/251.flatten-2d-vector.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=251 lang=python3
#
# [251] flatten-2d-vector
#
None
# @lc code=end | 13.285714 | 41 | 0.666667 | #
# @lc app=leetcode.cn id=251 lang=python3
#
# [251] flatten-2d-vector
#
None
# @lc code=end | 0 | 0 | 0 |
58ca5ff75d44f47faac66456474cd35e0f1c0fdb | 1,796 | py | Python | addons/calendar/models/mail_activity.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/calendar/models/mail_activity.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/calendar/models/mail_activity.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields, tools, _
| 39.043478 | 119 | 0.654788 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields, tools, _
class MailActivityType(models.Model):
_inherit = "mail.activity.type"
category = fields.Selection(selection_add=[('meeting', 'Meeting')])
class MailActivity(models.Model):
_inherit = "mail.activity"
calendar_event_id = fields.Many2one('calendar.event', string="Calendar Meeting", ondelete='cascade')
def action_create_calendar_event(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("calendar.action_calendar_event")
action['context'] = {
'default_activity_type_id': self.activity_type_id.id,
'default_res_id': self.env.context.get('default_res_id'),
'default_res_model': self.env.context.get('default_res_model'),
'default_name': self.summary or self.res_name,
'default_description': self.note and tools.html2plaintext(self.note).strip() or '',
'default_activity_ids': [(6, 0, self.ids)],
}
return action
def _action_done(self, feedback=False, attachment_ids=False):
events = self.mapped('calendar_event_id')
messages, activities = super(MailActivity, self)._action_done(feedback=feedback, attachment_ids=attachment_ids)
if feedback:
for event in events:
description = event.description
description = '%s\n%s%s' % (description or '', _("Feedback: "), feedback)
event.write({'description': description})
return messages, activities
def unlink_w_meeting(self):
events = self.mapped('calendar_event_id')
res = self.unlink()
events.unlink()
return res
| 1,246 | 355 | 46 |
4216908a3e0c9e6669d47c63d15493c14f639eda | 3,861 | py | Python | tavolo/embeddings.py | Steap/tavolo | 4af5f645b5ef4399c3746a3d930c5c8fc0892fb1 | [
"MIT"
] | null | null | null | tavolo/embeddings.py | Steap/tavolo | 4af5f645b5ef4399c3746a3d930c5c8fc0892fb1 | [
"MIT"
] | null | null | null | tavolo/embeddings.py | Steap/tavolo | 4af5f645b5ef4399c3746a3d930c5c8fc0892fb1 | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np
import tensorflow as tf
class PositionalEncoding(tf.keras.layers.Layer):
"""
Create a positional encoding layer, usually added on top of an embedding layer.
Embeds information about the position of the elements using the formula
.. math::
PE[pos,2i]=sin\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
PE[pos,2i+1]=cos\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
The resulting embedding gets added (point-wise) to the input.
Arguments
---------
- `max_sequence_length` (``int``): Maximum sequence length of input
- `embedding_dim` (``int``): Dimensionality of the of the input's last dimension
- `normalize_factor` (``float``): Normalize factor
- `name` (``str``): Layer name
Input shape
-----------
(batch_size, time_steps, channels) where time_steps equals to the ``max_sequence_length`` and channels to ``embedding_dim``
Output shape
------------
Same shape as input.
Examples
--------
.. code-block:: python3
import tensorflow as tf
import tavolo as tvl
model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, 8, input_length=max_sequence_length),
tvl.embeddings.PositionalEncoding(max_sequence_length=max_sequence_length,
embedding_dim=8)]) # Add positional encoding
References
----------
`Attention Is All You Need`_
.. _Attention Is All You Need:
https://arxiv.org/abs/1706.03762
"""
def __init__(self,
max_sequence_length: int,
embedding_dim: int,
normalize_factor: Optional[float] = 10000,
name: Optional[str] = 'positional_encoding',
**kwargs):
"""
:param max_sequence_length: Maximum sequence length of input
:param embedding_dim: Dimensionality of the of the input's last dimension
:param normalize_factor: Normalize factor
:param name: Layer name
"""
super().__init__(name=name, **kwargs)
# Error checking
if max_sequence_length < 1:
raise ValueError(
'max_sequence_length must be greater than zero. (value provided {})'.format(max_sequence_length))
if embedding_dim < 1:
raise ValueError(
'embedding_dim must be greater than zero. (value provided {})'.format(max_sequence_length))
# First part of the PE function: sin and cos argument
self.positional_encoding = np.array([
[pos / np.power(normalize_factor, 2. * i / embedding_dim) for i in range(embedding_dim)]
for pos in range(max_sequence_length)])
# Second part, apply the cosine to even columns and sin to odds.
self.positional_encoding[:, 0::2] = np.sin(self.positional_encoding[:, 0::2])
self.positional_encoding[:, 1::2] = np.cos(self.positional_encoding[:, 1::2])
self.positional_encoding = self.add_variable(
'embedding_matrix',
shape=self.positional_encoding.shape,
initializer=tf.keras.initializers.Constant(self.positional_encoding),
trainable=False,
dtype=self.dtype)
| 33 | 127 | 0.612018 | from typing import Optional
import numpy as np
import tensorflow as tf
class PositionalEncoding(tf.keras.layers.Layer):
"""
Create a positional encoding layer, usually added on top of an embedding layer.
Embeds information about the position of the elements using the formula
.. math::
PE[pos,2i]=sin\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
PE[pos,2i+1]=cos\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
The resulting embedding gets added (point-wise) to the input.
Arguments
---------
- `max_sequence_length` (``int``): Maximum sequence length of input
- `embedding_dim` (``int``): Dimensionality of the of the input's last dimension
- `normalize_factor` (``float``): Normalize factor
- `name` (``str``): Layer name
Input shape
-----------
(batch_size, time_steps, channels) where time_steps equals to the ``max_sequence_length`` and channels to ``embedding_dim``
Output shape
------------
Same shape as input.
Examples
--------
.. code-block:: python3
import tensorflow as tf
import tavolo as tvl
model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, 8, input_length=max_sequence_length),
tvl.embeddings.PositionalEncoding(max_sequence_length=max_sequence_length,
embedding_dim=8)]) # Add positional encoding
References
----------
`Attention Is All You Need`_
.. _Attention Is All You Need:
https://arxiv.org/abs/1706.03762
"""
def __init__(self,
max_sequence_length: int,
embedding_dim: int,
normalize_factor: Optional[float] = 10000,
name: Optional[str] = 'positional_encoding',
**kwargs):
"""
:param max_sequence_length: Maximum sequence length of input
:param embedding_dim: Dimensionality of the of the input's last dimension
:param normalize_factor: Normalize factor
:param name: Layer name
"""
super().__init__(name=name, **kwargs)
# Error checking
if max_sequence_length < 1:
raise ValueError(
'max_sequence_length must be greater than zero. (value provided {})'.format(max_sequence_length))
if embedding_dim < 1:
raise ValueError(
'embedding_dim must be greater than zero. (value provided {})'.format(max_sequence_length))
# First part of the PE function: sin and cos argument
self.positional_encoding = np.array([
[pos / np.power(normalize_factor, 2. * i / embedding_dim) for i in range(embedding_dim)]
for pos in range(max_sequence_length)])
# Second part, apply the cosine to even columns and sin to odds.
self.positional_encoding[:, 0::2] = np.sin(self.positional_encoding[:, 0::2])
self.positional_encoding[:, 1::2] = np.cos(self.positional_encoding[:, 1::2])
self.positional_encoding = self.add_variable(
'embedding_matrix',
shape=self.positional_encoding.shape,
initializer=tf.keras.initializers.Constant(self.positional_encoding),
trainable=False,
dtype=self.dtype)
def compute_mask(self, inputs, mask=None):
return mask
def call(self, inputs,
mask: Optional[tf.Tensor] = None,
**kwargs) -> tf.Tensor:
output = inputs + self.positional_encoding
if mask is not None:
output = tf.where(tf.tile(tf.expand_dims(mask, axis=-1), multiples=[1, 1, inputs.shape[-1]]), output, inputs)
return output # shape=(batch_size, time_steps, channels)
| 397 | 0 | 54 |
75bc9f1808d3923afaf6c3b89c04c2116d84a380 | 767 | py | Python | AoC2021/6/main.py | SCBbestof/AoC | 861f0b43e2c8d1b8580dbf53aa7bd5556c7c75ad | [
"MIT"
] | null | null | null | AoC2021/6/main.py | SCBbestof/AoC | 861f0b43e2c8d1b8580dbf53aa7bd5556c7c75ad | [
"MIT"
] | null | null | null | AoC2021/6/main.py | SCBbestof/AoC | 861f0b43e2c8d1b8580dbf53aa7bd5556c7c75ad | [
"MIT"
] | null | null | null | with open("data.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
arr = []
for ln in lines:
arr.append(int(ln))
for i in range(80):
for j in range(0, len(arr)):
if arr[j] <= 0:
arr[j] = 6
arr.append(8)
else:
arr[j] -= 1
print("1: " + str(len(arr)))
file.seek(0)
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
lines = list(map(int, lines))
fish = [lines.count(i) for i in range(9)]
for i in range(256):
num = fish.pop(0)
fish[6] += num
fish.append(num)
print("2: " + str(sum(fish))) | 23.242424 | 45 | 0.482399 | with open("data.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
arr = []
for ln in lines:
arr.append(int(ln))
for i in range(80):
for j in range(0, len(arr)):
if arr[j] <= 0:
arr[j] = 6
arr.append(8)
else:
arr[j] -= 1
print("1: " + str(len(arr)))
file.seek(0)
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
lines = list(map(int, lines))
fish = [lines.count(i) for i in range(9)]
for i in range(256):
num = fish.pop(0)
fish[6] += num
fish.append(num)
print("2: " + str(sum(fish))) | 0 | 0 | 0 |
a747fcf6a611afaccd0b841157ac9e2dbef3c8e2 | 1,403 | py | Python | chrome/test/enterprise/e2e/policy/password_manager_enabled/password_manager_enabled.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/test/enterprise/e2e/policy/password_manager_enabled/password_manager_enabled.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/test/enterprise/e2e/policy/password_manager_enabled/password_manager_enabled.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class PasswordManagerEnabledTest(ChromeEnterpriseTestCase):
"""Test the PasswordManagerEnabled policy.
See https://cloud.google.com/docs/chrome-enterprise/policies/?policy=PasswordManagerEnabled"""
@before_all
@test
@test
| 32.627907 | 96 | 0.75196 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class PasswordManagerEnabledTest(ChromeEnterpriseTestCase):
"""Test the PasswordManagerEnabled policy.
See https://cloud.google.com/docs/chrome-enterprise/policies/?policy=PasswordManagerEnabled"""
@before_all
def setup(self):
self.InstallChrome('client2012')
self.InstallWebDriver('client2012')
def isPasswordManagerEnabled(self):
dir = os.path.dirname(os.path.abspath(__file__))
output = self.RunWebDriverTest(
'client2012',
os.path.join(dir, 'password_manager_enabled_webdriver_test.py'))
return "TRUE" in output
@test
def test_PasswordManagerDisabled(self):
self.SetPolicy('win2012-dc', 'PasswordManagerEnabled', 0, 'DWORD')
self.RunCommand('client2012', 'gpupdate /force')
enabled = self.isPasswordManagerEnabled()
self.assertFalse(enabled)
@test
def test_PasswordManagerEnabled(self):
self.SetPolicy('win2012-dc', 'PasswordManagerEnabled', 1, 'DWORD')
self.RunCommand('client2012', 'gpupdate /force')
enabled = self.isPasswordManagerEnabled()
self.assertTrue(enabled)
| 734 | 0 | 97 |
b91533dbd161e7d291e2f8112ae3e97941cdb235 | 299 | py | Python | reverse_client/connection_controller/connection_controller.py | tchar/webshell-client | 29612488cf7ef59fa67db732a10c3396407ff154 | [
"MIT"
] | 1 | 2021-12-07T22:17:18.000Z | 2021-12-07T22:17:18.000Z | reverse_client/connection_controller/connection_controller.py | tchar/webshell-client | 29612488cf7ef59fa67db732a10c3396407ff154 | [
"MIT"
] | null | null | null | reverse_client/connection_controller/connection_controller.py | tchar/webshell-client | 29612488cf7ef59fa67db732a10c3396407ff154 | [
"MIT"
] | null | null | null | from abc import abstractmethod
| 15.736842 | 36 | 0.752508 | from abc import abstractmethod
class ConnectionController():
def __init__(self, interactive):
self._interactive = interactive
@property
def interactive(self):
return self._interactive
@abstractmethod
def request(self, *args, **kwargs):
pass
@abstractmethod
def close(self):
pass | 96 | 149 | 23 |
9a71ea4cabd3ecdbaad8ef400436428f59cd7dd3 | 10,921 | py | Python | source/learning/model.py | OmarSayedMostafa/pytorch-upper-Maxilla-and-lower-Mandible-jaws-segmentation | 0ed1da5d364f0dbac72d1b3636d0da0abac19e4d | [
"RSA-MD"
] | 1 | 2022-03-19T11:02:28.000Z | 2022-03-19T11:02:28.000Z | source/learning/model.py | OmarSayedMostafa/pytorch-upper-Maxilla-and-lower-Mandible-jaws-segmentation | 0ed1da5d364f0dbac72d1b3636d0da0abac19e4d | [
"RSA-MD"
] | null | null | null | source/learning/model.py | OmarSayedMostafa/pytorch-upper-Maxilla-and-lower-Mandible-jaws-segmentation | 0ed1da5d364f0dbac72d1b3636d0da0abac19e4d | [
"RSA-MD"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import OrderedDict
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
| 34.560127 | 122 | 0.589232 | import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import OrderedDict
class UNet(nn.Module):
def __init__(self, n_classes_input, n_classes_output, batchnorm = False, dropout_p=0.0):
super(UNet, self).__init__()
self.inc = inconv(n_classes_input, 64, batchnorm)
self.down1 = down(64, 128, batchnorm)
self.down2 = down(128, 256, batchnorm)
self.down3 = down(256, 512, batchnorm)
self.down4 = down(512, 512, batchnorm)
self.dropout = nn.Dropout2d(p=dropout_p)
self.up1 = up(1024, 256, batchnorm)
self.up2 = up(512, 128, batchnorm)
self.up3 = up(256, 64, batchnorm)
self.up4 = up(128, 64, batchnorm)
self.outc = outconv(64, n_classes_output)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# x5 = self.dropout(x5)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, bn):
super(double_conv, self).__init__()
if bn:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch, bn):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch, bn)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch, bn):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch, bn)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bn, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch, bn)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class _SimpleSegmentationModel(nn.Module):
__constants__ = ['aux_classifier']
def __init__(self, backbone, classifier, aux_classifier=None):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def forward(self, x):
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
result["aux"] = x
return result
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabHead(nn.Sequential):
def __init__(self, in_channels, num_classes):
super(DeepLabHead, self).__init__(
ASPP(in_channels, [12, 24, 36]),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, num_classes, 1)
)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
def group_std(x, groups = 32, eps = 1e-5):
N, C, H, W = x.size()
x = torch.reshape(x, (N, groups, C // groups, H, W))
var = torch.var(x, dim = (2, 3, 4), keepdim = True).expand_as(x)
return torch.reshape(torch.sqrt(var + eps), (N, C, H, W))
class EvoNorm(nn.Module):
def __init__(self, input, non_linear = True, version = 'S0', momentum = 0.9, eps = 1e-5, training = True):
super(EvoNorm, self).__init__()
self.non_linear = non_linear
self.version = version
self.training = training
self.momentum = momentum
self.eps = eps
if self.version not in ['B0', 'S0']:
raise ValueError("Invalid EvoNorm version")
self.insize = input
self.gamma = nn.Parameter(torch.ones(1, self.insize, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, self.insize, 1, 1))
if self.non_linear:
self.v = nn.Parameter(torch.ones(1,self.insize,1,1))
self.register_buffer('running_var', torch.ones(1, self.insize, 1, 1))
self.reset_parameters()
def reset_parameters(self):
self.running_var.fill_(1)
def forward(self, x):
if x.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(x.dim()))
if self.version == 'S0':
if self.non_linear:
num = x * torch.sigmoid(self.v * x)
return num / group_std(x, eps = self.eps) * self.gamma + self.beta
else:
return x * self.gamma + self.beta
if self.version == 'B0':
if self.training:
var = torch.var(x, dim = (0, 2, 3), unbiased = False, keepdim = True).reshape(1, x.size(1), 1, 1)
with torch.no_grad():
self.running_var.copy_(self.momentum * self.running_var + (1 - self.momentum) * var)
else:
var = self.running_var
if self.non_linear:
den = torch.max((var+self.eps).sqrt(), self.v * x + instance_std(x, eps = self.eps))
return x / den * self.gamma + self.beta
else:
return x * self.gamma + self.beta
def convert_bn_to_instancenorm(model):
for child_name, child in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, nn.InstanceNorm2d(child.num_features))
else:
convert_bn_to_instancenorm(child)
def convert_bn_to_evonorm(model):
for child_name, child in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, EvoNorm(child.num_features))
elif isinstance(child, nn.ReLU):
setattr(model, child_name, nn.Identity())
else:
convert_bn_to_evonorm(child)
def convert_bn_to_groupnorm(model, num_groups=32):
for child_name, child in model.named_children():
if isinstance(child, nn.BatchNorm2d):
setattr(model, child_name, nn.GroupNorm(num_groups=num_groups, num_channels=child.num_features))
else:
convert_bn_to_groupnorm(child, num_groups=num_groups)
| 8,914 | 160 | 905 |
3db3d36bceb49d0c9b8c6d1f75a9539136a2a20f | 7,260 | py | Python | app/recipe/views.py | igndukwe/recipe-app-api | 8b0554e7d15fdab8c0aa99e7b3dcb56187998ceb | [
"MIT"
] | null | null | null | app/recipe/views.py | igndukwe/recipe-app-api | 8b0554e7d15fdab8c0aa99e7b3dcb56187998ceb | [
"MIT"
] | null | null | null | app/recipe/views.py | igndukwe/recipe-app-api | 8b0554e7d15fdab8c0aa99e7b3dcb56187998ceb | [
"MIT"
] | null | null | null |
# We are goint to be creating a viewset
# and basing it of the combination of generic viewset
# and we are specifically going to use the list model mixins
# > A django rest frameworke feature
# where you can pull in different parts of a viewset
# that we want to use for our application
# > so we only want to take the list model function
# we do not want to the create, update, delete functions
# > we can achive this be a combination of the
# generic viewset and the list model mixins
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# import the Tag model class
from core.models import Tag, Ingredient, Recipe
# import the serializer
from recipe import serializers
# Create your views here.
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,):
"""Base viewset for user owned recipe attributes"""
# requires authentication to access the Tag
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# override get_queryset() mtd for ListModelMixin
# to filter object by the authenticated user
def get_queryset(self):
"""Return objects for the current authenticated user only"""
# self.queryset is referencing the queryset
# 'queryset = Tag.objects.all()'
# or 'queryset = Ingredient.objects.all()'
# then the filtering is performed in the overriden mtd
# then order by tag name
return self.queryset.filter(user=self.request.user).order_by('-name')
# overide perform_create for CreateModelMixin
# it allows us to hook into the create proceswe do a create object
# so that when we fo a create object in our ViewSet
# the validated serializer will be passed in as a serializer argument
# and we can perform any modifications that we like
def perform_create(self, serializer):
"""Create a new Object e.g. Tag or Ingredient
"""
serializer.save(user=self.request.user)
# Create your views here.
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
# select all
queryset = Tag.objects.all()
# serializer class
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
# select all
queryset = Ingredient.objects.all()
# serializer class
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
# add serializer class
serializer_class = serializers.RecipeSerializer
# add the recipe class
queryset = Recipe.objects.all()
# add athentication classes
# so that user must be authenticated to be permited to have access
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# create a private function
# to convert ids to tags
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
# override get_queryset()
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
# query_params: all of the query params provided in the request
# 'tags': one of the query params string provided
tags = self.request.query_params.get('tags')
# 'ingredients': one of the query params string we have provided
ingredients = self.request.query_params.get('ingredients')
# get queryset before we apply filters
queryset = self.queryset
# if tags is not None
if tags:
# converts all the tag string ids to tag int ids
tag_ids = self._params_to_ints(tags)
# tags__id__in: django syntax for filtering on FK objects
# we have a 'tags' field in our recipe queryset
# that has a FK to the tags table that has an 'id'
# if you want to filter by the remot table you do 'tags__id'
# then you can apply another function like 'in'
# to become 'tags__id__in'
# which then means return all of the tags
# where the id is in this list that we provide
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
# limit the object to the authenticated user
# return self.queryset.filter(user=self.request.user)
return queryset.filter(user=self.request.user)
# override get_serializer_class()
def get_serializer_class(self):
"""Return appropriate serializer class"""
# ViewSet actions are:
# list, create, retrieve, update, partial update, and destroy
# > retrieve is the action used for detailed view
# The self.action contains the action of the request currently used
# therefore, check that action currently used is the retrieve action
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
# override perform_create()
def perform_create(self, serializer):
"""Create a new recipe"""
# viewsets.ModelViewSet allows you to create objects out of the box
# so the default is that if you assign a serializer_class
# serializer_class = serializers.RecipeSerializer
# and its assigned to a model
# then it knows how to create new objects with that model
# when you do a HTTP POST
# > hence what we need to do is to assign authenticated user
# to that model once it has been created
serializer.save(user=self.request.user)
# override the upload_image()
# -methods=[]: mtd your action will use, 'GET', 'POST', 'PUT', 'PATCH'
# -detail=True: means use only the detail url to upload images
# also you will be able to upload images for resipes that already exist
# -url_path: path name for our urls
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
# retrieve the recipe object, based on the ID/PK
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
# check if serializer is valied
if serializer.is_valid():
serializer.save()
# return good response
return Response(
serializer.data,
status=status.HTTP_200_OK
)
# else return invalied response
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 38.210526 | 77 | 0.674656 |
# We are goint to be creating a viewset
# and basing it of the combination of generic viewset
# and we are specifically going to use the list model mixins
# > A django rest frameworke feature
# where you can pull in different parts of a viewset
# that we want to use for our application
# > so we only want to take the list model function
# we do not want to the create, update, delete functions
# > we can achive this be a combination of the
# generic viewset and the list model mixins
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# import the Tag model class
from core.models import Tag, Ingredient, Recipe
# import the serializer
from recipe import serializers
# Create your views here.
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,):
"""Base viewset for user owned recipe attributes"""
# requires authentication to access the Tag
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# override get_queryset() mtd for ListModelMixin
# to filter object by the authenticated user
def get_queryset(self):
"""Return objects for the current authenticated user only"""
# self.queryset is referencing the queryset
# 'queryset = Tag.objects.all()'
# or 'queryset = Ingredient.objects.all()'
# then the filtering is performed in the overriden mtd
# then order by tag name
return self.queryset.filter(user=self.request.user).order_by('-name')
# overide perform_create for CreateModelMixin
# it allows us to hook into the create proceswe do a create object
# so that when we fo a create object in our ViewSet
# the validated serializer will be passed in as a serializer argument
# and we can perform any modifications that we like
def perform_create(self, serializer):
"""Create a new Object e.g. Tag or Ingredient
"""
serializer.save(user=self.request.user)
# Create your views here.
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
# select all
queryset = Tag.objects.all()
# serializer class
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
# select all
queryset = Ingredient.objects.all()
# serializer class
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
# add serializer class
serializer_class = serializers.RecipeSerializer
# add the recipe class
queryset = Recipe.objects.all()
# add athentication classes
# so that user must be authenticated to be permited to have access
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# create a private function
# to convert ids to tags
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
# override get_queryset()
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
# query_params: all of the query params provided in the request
# 'tags': one of the query params string provided
tags = self.request.query_params.get('tags')
# 'ingredients': one of the query params string we have provided
ingredients = self.request.query_params.get('ingredients')
# get queryset before we apply filters
queryset = self.queryset
# if tags is not None
if tags:
# converts all the tag string ids to tag int ids
tag_ids = self._params_to_ints(tags)
# tags__id__in: django syntax for filtering on FK objects
# we have a 'tags' field in our recipe queryset
# that has a FK to the tags table that has an 'id'
# if you want to filter by the remot table you do 'tags__id'
# then you can apply another function like 'in'
# to become 'tags__id__in'
# which then means return all of the tags
# where the id is in this list that we provide
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
# limit the object to the authenticated user
# return self.queryset.filter(user=self.request.user)
return queryset.filter(user=self.request.user)
# override get_serializer_class()
def get_serializer_class(self):
"""Return appropriate serializer class"""
# ViewSet actions are:
# list, create, retrieve, update, partial update, and destroy
# > retrieve is the action used for detailed view
# The self.action contains the action of the request currently used
# therefore, check that action currently used is the retrieve action
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
# override perform_create()
def perform_create(self, serializer):
"""Create a new recipe"""
# viewsets.ModelViewSet allows you to create objects out of the box
# so the default is that if you assign a serializer_class
# serializer_class = serializers.RecipeSerializer
# and its assigned to a model
# then it knows how to create new objects with that model
# when you do a HTTP POST
# > hence what we need to do is to assign authenticated user
# to that model once it has been created
serializer.save(user=self.request.user)
# override the upload_image()
# -methods=[]: mtd your action will use, 'GET', 'POST', 'PUT', 'PATCH'
# -detail=True: means use only the detail url to upload images
# also you will be able to upload images for resipes that already exist
# -url_path: path name for our urls
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
# retrieve the recipe object, based on the ID/PK
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
# check if serializer is valied
if serializer.is_valid():
serializer.save()
# return good response
return Response(
serializer.data,
status=status.HTTP_200_OK
)
# else return invalied response
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 0 | 0 | 0 |
e81b68ba6d2033055b7933b11730c205a1112d28 | 4,024 | py | Python | custom_components/xiaomi_miot_raw/deps/const.py | Daneaz/xiaomi_miot_raw | 987a5eac8741b6bd2c82b846978ba0a12a667886 | [
"Apache-2.0"
] | null | null | null | custom_components/xiaomi_miot_raw/deps/const.py | Daneaz/xiaomi_miot_raw | 987a5eac8741b6bd2c82b846978ba0a12a667886 | [
"Apache-2.0"
] | null | null | null | custom_components/xiaomi_miot_raw/deps/const.py | Daneaz/xiaomi_miot_raw | 987a5eac8741b6bd2c82b846978ba0a12a667886 | [
"Apache-2.0"
] | null | null | null | import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import *
CONF_UPDATE_INSTANT = "update_instant"
CONF_MAPPING = 'mapping'
CONF_CONTROL_PARAMS = 'params'
CONF_CLOUD = 'update_from_cloud'
CONF_MODEL = 'model'
CONF_SENSOR_PROPERTY = "sensor_property"
CONF_SENSOR_UNIT = "sensor_unit"
CONF_DEFAULT_PROPERTIES = "default_properties"
ATTR_STATE_VALUE = "state_value"
ATTR_MODEL = "model"
ATTR_FIRMWARE_VERSION = "firmware_version"
ATTR_HARDWARE_VERSION = "hardware_version"
DOMAIN = 'xiaomi_miot_raw'
SUPPORTED_DOMAINS = [
"sensor",
"switch",
"light",
"fan",
"cover",
"humidifier",
"media_player",
"climate",
"lock",
"water_heater",
]
DEFAULT_NAME = "Xiaomi MIoT Device"
DUMMY_IP = "255.255.255.255"
DUMMY_TOKEN = "00000000000000000000000000000000"
SCHEMA = {
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,
vol.Optional(CONF_CLOUD): vol.All(),
vol.Optional('cloud_write'):vol.All(),
vol.Required(CONF_MAPPING):vol.All(),
vol.Required(CONF_CONTROL_PARAMS):vol.All(),
vol.Optional(CONF_SENSOR_PROPERTY): cv.string,
vol.Optional(CONF_SENSOR_UNIT): cv.string,
}
MAP = {
"sensor": {
"air_monitor",
"water_purifier",
"cooker",
"pressure_cooker",
"induction_cooker",
"power_consumption",
"electricity",
"environment",
"filter",
"filter_2",
"filter_3",
"filter_4",
"temperature_humidity_sensor",
"magnet_sensor",
"motion_sensor",
"submersion_sensor",
"tds_sensor",
"air_fryer",
"remain_clean_time",
},
"switch": {
"switch",
"outlet",
"switch_2",
"switch_3",
"switch_4",
"coffee_machine",
},
"light": {
"light",
"light_2",
"light_3",
"light_4",
"indicator_light",
},
"fan": {
"a_l",
"fan",
"ceiling_fan",
"air_fresh",
"air_purifier",
"washer",
"hood",
"fan_control",
"dryer",
"toilet",
"settings",
"settings_2",
"air_fresh_heater",
"bed",
"pet_drinking_fountain",
},
"cover": {
"curtain",
"airer",
},
"humidifier": {
"humidifier",
"dehumidifier",
},
"media_player": {
"media_player",
"speaker",
"play_control",
},
"climate": {
"air_conditioner",
"heater",
},
"lock": {
"physical_controls_locked",
},
"water_heater": {
"water_heater",
"kettle",
"dishwasher",
},
}
UNIT_MAPPING = {
"percentage" : PERCENTAGE , # 百分比
"celsius" : TEMP_CELSIUS , # 摄氏度
"seconds" : "秒" , # 秒
"minutes" : "分钟" , # 分
"hours" : "小时" , # 小时
"days" : "天" , # 天
"kelvin" : TEMP_KELVIN , # 开氏温标
"pascal" : "Pa" , # 帕斯卡(大气压强单位)
"arcdegrees" : "rad" , # 弧度(角度单位)
"rgb" : "RGB" , # RGB(颜色)
"watt" : POWER_WATT , # 瓦特(功率)
"litre" : VOLUME_LITERS , # 升
"ppm" : CONCENTRATION_PARTS_PER_MILLION , # ppm浓度
"lux" : LIGHT_LUX , # 勒克斯(照度)
"mg/m3" : CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER , # 毫克每立方米
} | 27.189189 | 81 | 0.492296 | import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import *
CONF_UPDATE_INSTANT = "update_instant"
CONF_MAPPING = 'mapping'
CONF_CONTROL_PARAMS = 'params'
CONF_CLOUD = 'update_from_cloud'
CONF_MODEL = 'model'
CONF_SENSOR_PROPERTY = "sensor_property"
CONF_SENSOR_UNIT = "sensor_unit"
CONF_DEFAULT_PROPERTIES = "default_properties"
ATTR_STATE_VALUE = "state_value"
ATTR_MODEL = "model"
ATTR_FIRMWARE_VERSION = "firmware_version"
ATTR_HARDWARE_VERSION = "hardware_version"
DOMAIN = 'xiaomi_miot_raw'
SUPPORTED_DOMAINS = [
"sensor",
"switch",
"light",
"fan",
"cover",
"humidifier",
"media_player",
"climate",
"lock",
"water_heater",
]
DEFAULT_NAME = "Xiaomi MIoT Device"
DUMMY_IP = "255.255.255.255"
DUMMY_TOKEN = "00000000000000000000000000000000"
SCHEMA = {
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,
vol.Optional(CONF_CLOUD): vol.All(),
vol.Optional('cloud_write'):vol.All(),
vol.Required(CONF_MAPPING):vol.All(),
vol.Required(CONF_CONTROL_PARAMS):vol.All(),
vol.Optional(CONF_SENSOR_PROPERTY): cv.string,
vol.Optional(CONF_SENSOR_UNIT): cv.string,
}
MAP = {
"sensor": {
"air_monitor",
"water_purifier",
"cooker",
"pressure_cooker",
"induction_cooker",
"power_consumption",
"electricity",
"environment",
"filter",
"filter_2",
"filter_3",
"filter_4",
"temperature_humidity_sensor",
"magnet_sensor",
"motion_sensor",
"submersion_sensor",
"tds_sensor",
"air_fryer",
"remain_clean_time",
},
"switch": {
"switch",
"outlet",
"switch_2",
"switch_3",
"switch_4",
"coffee_machine",
},
"light": {
"light",
"light_2",
"light_3",
"light_4",
"indicator_light",
},
"fan": {
"a_l",
"fan",
"ceiling_fan",
"air_fresh",
"air_purifier",
"washer",
"hood",
"fan_control",
"dryer",
"toilet",
"settings",
"settings_2",
"air_fresh_heater",
"bed",
"pet_drinking_fountain",
},
"cover": {
"curtain",
"airer",
},
"humidifier": {
"humidifier",
"dehumidifier",
},
"media_player": {
"media_player",
"speaker",
"play_control",
},
"climate": {
"air_conditioner",
"heater",
},
"lock": {
"physical_controls_locked",
},
"water_heater": {
"water_heater",
"kettle",
"dishwasher",
},
}
UNIT_MAPPING = {
"percentage" : PERCENTAGE , # 百分比
"celsius" : TEMP_CELSIUS , # 摄氏度
"seconds" : "秒" , # 秒
"minutes" : "分钟" , # 分
"hours" : "小时" , # 小时
"days" : "天" , # 天
"kelvin" : TEMP_KELVIN , # 开氏温标
"pascal" : "Pa" , # 帕斯卡(大气压强单位)
"arcdegrees" : "rad" , # 弧度(角度单位)
"rgb" : "RGB" , # RGB(颜色)
"watt" : POWER_WATT , # 瓦特(功率)
"litre" : VOLUME_LITERS , # 升
"ppm" : CONCENTRATION_PARTS_PER_MILLION , # ppm浓度
"lux" : LIGHT_LUX , # 勒克斯(照度)
"mg/m3" : CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER , # 毫克每立方米
} | 0 | 0 | 0 |
bec1a1e105bd7e36d63830d345623c40085e6acf | 25,240 | py | Python | PythonNetwork/venv/Lib/site-packages/google/protobuf/type_pb2.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | PythonNetwork/venv/Lib/site-packages/google/protobuf/type_pb2.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | PythonNetwork/venv/Lib/site-packages/google/protobuf/type_pb2.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/type.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/type.proto',
package='google.protobuf',
syntax='proto3',
serialized_options=b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_source__context__pb2.DESCRIPTOR,])
_SYNTAX = _descriptor.EnumDescriptor(
name='Syntax',
full_name='google.protobuf.Syntax',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO2', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO3', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1413,
serialized_end=1459,
)
_sym_db.RegisterEnumDescriptor(_SYNTAX)
Syntax = enum_type_wrapper.EnumTypeWrapper(_SYNTAX)
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
_FIELD_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='google.protobuf.Field.Kind',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=938,
)
_sym_db.RegisterEnumDescriptor(_FIELD_KIND)
_FIELD_CARDINALITY = _descriptor.EnumDescriptor(
name='Cardinality',
full_name='google.protobuf.Field.Cardinality',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CARDINALITY_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_OPTIONAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REQUIRED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REPEATED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=940,
serialized_end=1056,
)
_sym_db.RegisterEnumDescriptor(_FIELD_CARDINALITY)
_TYPE = _descriptor.Descriptor(
name='Type',
full_name='google.protobuf.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Type.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fields', full_name='google.protobuf.Type.fields', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneofs', full_name='google.protobuf.Type.oneofs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Type.options', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Type.source_context', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Type.syntax', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=328,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='google.protobuf.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.protobuf.Field.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cardinality', full_name='google.protobuf.Field.cardinality', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.Field.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Field.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Field.type_url', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.Field.oneof_index', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.Field.packed', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Field.options', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json_name', full_name='google.protobuf.Field.json_name', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.Field.default_value', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELD_KIND,
_FIELD_CARDINALITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=1056,
)
_ENUM = _descriptor.Descriptor(
name='Enum',
full_name='google.protobuf.Enum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Enum.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumvalue', full_name='google.protobuf.Enum.enumvalue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Enum.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Enum.source_context', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Enum.syntax', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1265,
)
_ENUMVALUE = _descriptor.Descriptor(
name='EnumValue',
full_name='google.protobuf.EnumValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValue.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValue.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1267,
serialized_end=1350,
)
_OPTION = _descriptor.Descriptor(
name='Option',
full_name='google.protobuf.Option',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Option.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Option.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1352,
serialized_end=1411,
)
_TYPE.fields_by_name['fields'].message_type = _FIELD
_TYPE.fields_by_name['options'].message_type = _OPTION
_TYPE.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_TYPE.fields_by_name['syntax'].enum_type = _SYNTAX
_FIELD.fields_by_name['kind'].enum_type = _FIELD_KIND
_FIELD.fields_by_name['cardinality'].enum_type = _FIELD_CARDINALITY
_FIELD.fields_by_name['options'].message_type = _OPTION
_FIELD_KIND.containing_type = _FIELD
_FIELD_CARDINALITY.containing_type = _FIELD
_ENUM.fields_by_name['enumvalue'].message_type = _ENUMVALUE
_ENUM.fields_by_name['options'].message_type = _OPTION
_ENUM.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_ENUM.fields_by_name['syntax'].enum_type = _SYNTAX
_ENUMVALUE.fields_by_name['options'].message_type = _OPTION
_OPTION.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Type'] = _TYPE
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
DESCRIPTOR.message_types_by_name['Enum'] = _ENUM
DESCRIPTOR.message_types_by_name['EnumValue'] = _ENUMVALUE
DESCRIPTOR.message_types_by_name['Option'] = _OPTION
DESCRIPTOR.enum_types_by_name['Syntax'] = _SYNTAX
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), {
'DESCRIPTOR' : _TYPE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Type)
})
_sym_db.RegisterMessage(Type)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _FIELD,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Field)
})
_sym_db.RegisterMessage(Field)
Enum = _reflection.GeneratedProtocolMessageType('Enum', (_message.Message,), {
'DESCRIPTOR' : _ENUM,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Enum)
})
_sym_db.RegisterMessage(Enum)
EnumValue = _reflection.GeneratedProtocolMessageType('EnumValue', (_message.Message,), {
'DESCRIPTOR' : _ENUMVALUE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValue)
})
_sym_db.RegisterMessage(EnumValue)
Option = _reflection.GeneratedProtocolMessageType('Option', (_message.Message,), {
'DESCRIPTOR' : _OPTION,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Option)
})
_sym_db.RegisterMessage(Option)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43.972125 | 2,740 | 0.750238 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/type.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/type.proto',
package='google.protobuf',
syntax='proto3',
serialized_options=b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_source__context__pb2.DESCRIPTOR,])
_SYNTAX = _descriptor.EnumDescriptor(
name='Syntax',
full_name='google.protobuf.Syntax',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO2', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO3', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1413,
serialized_end=1459,
)
_sym_db.RegisterEnumDescriptor(_SYNTAX)
Syntax = enum_type_wrapper.EnumTypeWrapper(_SYNTAX)
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
_FIELD_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='google.protobuf.Field.Kind',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=938,
)
_sym_db.RegisterEnumDescriptor(_FIELD_KIND)
_FIELD_CARDINALITY = _descriptor.EnumDescriptor(
name='Cardinality',
full_name='google.protobuf.Field.Cardinality',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CARDINALITY_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_OPTIONAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REQUIRED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REPEATED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=940,
serialized_end=1056,
)
_sym_db.RegisterEnumDescriptor(_FIELD_CARDINALITY)
_TYPE = _descriptor.Descriptor(
name='Type',
full_name='google.protobuf.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Type.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fields', full_name='google.protobuf.Type.fields', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneofs', full_name='google.protobuf.Type.oneofs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Type.options', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Type.source_context', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Type.syntax', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=328,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='google.protobuf.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.protobuf.Field.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cardinality', full_name='google.protobuf.Field.cardinality', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.Field.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Field.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Field.type_url', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.Field.oneof_index', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.Field.packed', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Field.options', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json_name', full_name='google.protobuf.Field.json_name', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.Field.default_value', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELD_KIND,
_FIELD_CARDINALITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=1056,
)
_ENUM = _descriptor.Descriptor(
name='Enum',
full_name='google.protobuf.Enum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Enum.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumvalue', full_name='google.protobuf.Enum.enumvalue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Enum.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Enum.source_context', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Enum.syntax', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1265,
)
_ENUMVALUE = _descriptor.Descriptor(
name='EnumValue',
full_name='google.protobuf.EnumValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValue.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValue.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1267,
serialized_end=1350,
)
_OPTION = _descriptor.Descriptor(
name='Option',
full_name='google.protobuf.Option',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Option.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Option.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1352,
serialized_end=1411,
)
_TYPE.fields_by_name['fields'].message_type = _FIELD
_TYPE.fields_by_name['options'].message_type = _OPTION
_TYPE.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_TYPE.fields_by_name['syntax'].enum_type = _SYNTAX
_FIELD.fields_by_name['kind'].enum_type = _FIELD_KIND
_FIELD.fields_by_name['cardinality'].enum_type = _FIELD_CARDINALITY
_FIELD.fields_by_name['options'].message_type = _OPTION
_FIELD_KIND.containing_type = _FIELD
_FIELD_CARDINALITY.containing_type = _FIELD
_ENUM.fields_by_name['enumvalue'].message_type = _ENUMVALUE
_ENUM.fields_by_name['options'].message_type = _OPTION
_ENUM.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_ENUM.fields_by_name['syntax'].enum_type = _SYNTAX
_ENUMVALUE.fields_by_name['options'].message_type = _OPTION
_OPTION.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Type'] = _TYPE
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
DESCRIPTOR.message_types_by_name['Enum'] = _ENUM
DESCRIPTOR.message_types_by_name['EnumValue'] = _ENUMVALUE
DESCRIPTOR.message_types_by_name['Option'] = _OPTION
DESCRIPTOR.enum_types_by_name['Syntax'] = _SYNTAX
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), {
'DESCRIPTOR' : _TYPE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Type)
})
_sym_db.RegisterMessage(Type)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _FIELD,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Field)
})
_sym_db.RegisterMessage(Field)
Enum = _reflection.GeneratedProtocolMessageType('Enum', (_message.Message,), {
'DESCRIPTOR' : _ENUM,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Enum)
})
_sym_db.RegisterMessage(Enum)
EnumValue = _reflection.GeneratedProtocolMessageType('EnumValue', (_message.Message,), {
'DESCRIPTOR' : _ENUMVALUE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValue)
})
_sym_db.RegisterMessage(EnumValue)
Option = _reflection.GeneratedProtocolMessageType('Option', (_message.Message,), {
'DESCRIPTOR' : _OPTION,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Option)
})
_sym_db.RegisterMessage(Option)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
45f66ef47864261732e9f9b09202279ebd769349 | 492 | py | Python | modules/resources_manager/resource_manager.py | signeus/WebSockets | a3013ed684404a95c3608e6016b28ce08b16eeb8 | [
"MIT"
] | null | null | null | modules/resources_manager/resource_manager.py | signeus/WebSockets | a3013ed684404a95c3608e6016b28ce08b16eeb8 | [
"MIT"
] | null | null | null | modules/resources_manager/resource_manager.py | signeus/WebSockets | a3013ed684404a95c3608e6016b28ce08b16eeb8 | [
"MIT"
] | null | null | null | from resource_factory import ResourceFactory
from modules.configs.environment import env | 30.75 | 58 | 0.745935 | from resource_factory import ResourceFactory
from modules.configs.environment import env
class ResourceManager:
def __init__(self, core):
self.environmentName = env
self.core = core
def DatabaseManager(self):
return self.ManagerOperation(self.environmentName)
def ResourcesManager(self):
return self.ManagerOperation(self.environmentName)
def ManagerOperation(self, environmentName):
return ResourceFactory(environmentName).config() | 273 | 1 | 130 |
78fccb3e303a782c196b3f5c038480d45fc8348a | 7,243 | py | Python | mymoney/core/management/commands/demo.py | ychab/mymoney | 9ee665d40648fd0b95f6e90d82ccf2bfc791e8af | [
"BSD-3-Clause"
] | 67 | 2015-12-22T10:27:34.000Z | 2022-03-10T21:33:18.000Z | mymoney/core/management/commands/demo.py | clebercarmo/mymoney | 9ee665d40648fd0b95f6e90d82ccf2bfc791e8af | [
"BSD-3-Clause"
] | null | null | null | mymoney/core/management/commands/demo.py | clebercarmo/mymoney | 9ee665d40648fd0b95f6e90d82ccf2bfc791e8af | [
"BSD-3-Clause"
] | 35 | 2016-08-07T11:43:51.000Z | 2022-02-21T21:20:06.000Z | import datetime
from decimal import Decimal
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from dateutil.relativedelta import relativedelta
from factory import fuzzy
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.bankaccounts.models import BankAccount
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactions.models import BankTransaction
from mymoney.apps.banktransactionschedulers.factories import (
BankTransactionSchedulerFactory,
)
from mymoney.apps.banktransactionschedulers.models import (
BankTransactionScheduler,
)
from mymoney.apps.banktransactiontags.factories import (
BankTransactionTagFactory,
)
from mymoney.apps.banktransactiontags.models import BankTransactionTag
from ...factories import UserFactory
class Command(BaseCommand):
"""
Data generator for purpose only.
"""
help = 'Generate data for purpose.'
leave_locale_alone = True
| 35.331707 | 84 | 0.60486 | import datetime
from decimal import Decimal
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from dateutil.relativedelta import relativedelta
from factory import fuzzy
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.bankaccounts.models import BankAccount
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactions.models import BankTransaction
from mymoney.apps.banktransactionschedulers.factories import (
BankTransactionSchedulerFactory,
)
from mymoney.apps.banktransactionschedulers.models import (
BankTransactionScheduler,
)
from mymoney.apps.banktransactiontags.factories import (
BankTransactionTagFactory,
)
from mymoney.apps.banktransactiontags.models import BankTransactionTag
from ...factories import UserFactory
class Command(BaseCommand):
"""
Data generator for purpose only.
"""
help = 'Generate data for purpose.'
leave_locale_alone = True
def add_arguments(self, parser):
parser.add_argument('--username', default='demo', help='Default: demo')
parser.add_argument('--password', default='demo', help='Default: demo')
parser.add_argument('--email', default='demo@example.com')
parser.add_argument('--currency', default='EUR', help='Default : EUR')
parser.add_argument('--purge', action='store_true', default=False,
help="Delete all data relatives to the project's "
"models then exit.")
parser.add_argument('--noinput', action='store_false',
dest='interactive', default=True)
def handle(self, *args, **options):
if options.get('purge'):
if options.get('interactive'): # pragma: no cover
msg = "Are you sure to delete all data?"
choice = input("%s (y/N): " % msg).strip().lower()
if choice != 'y':
return
# Deleting users only should be enough to delete all instances.
get_user_model().objects.all().delete()
BankAccount.objects.all().delete()
BankTransactionTag.objects.all().delete()
BankTransaction.objects.all().delete()
BankTransactionScheduler.objects.all().delete()
self.stdout.write('All data have been deleted.')
return
user = UserFactory(
username=options.get('username'),
password=options.get('password'),
email=options.get('email'),
user_permissions='admin',
)
bankaccount = BankAccountFactory(
label=_('Current account'),
balance=2000,
balance_initial=150,
currency=options.get('currency'),
owners=[user],
)
tag_rent = BankTransactionTagFactory(name=_('Rent'), owner=user)
tag_shopping = BankTransactionTagFactory(name=_('Shopping'), owner=user)
tag_car = BankTransactionTagFactory(name=_('Car'), owner=user)
tag_tax = BankTransactionTagFactory(name=_('Tax'), owner=user)
tag_restaurant = BankTransactionTagFactory(name=_('Restaurant'), owner=user)
today = datetime.date.today()
BankTransactionSchedulerFactory(
bankaccount=bankaccount,
label=_("Rent"),
amount=Decimal("-910"),
date=datetime.date(today.year, today.month, 10),
payment_method=BankTransaction.PAYMENT_METHOD_TRANSFER,
tag=tag_rent,
type=BankTransactionScheduler.TYPE_MONTHLY,
recurrence=None,
).clone()
BankTransactionSchedulerFactory(
bankaccount=bankaccount,
label=_("Council tax"),
amount=Decimal("-99.89"),
date=datetime.date(today.year, today.month, 15),
payment_method=BankTransaction.PAYMENT_METHOD_TRANSFER,
tag=tag_tax,
type=BankTransactionScheduler.TYPE_MONTHLY,
recurrence=10,
).clone()
BankTransactionSchedulerFactory(
bankaccount=bankaccount,
label=_("Wages"),
amount=Decimal("2615.78"),
date=datetime.date(today.year, today.month, 5),
payment_method=BankTransaction.PAYMENT_METHOD_TRANSFER,
tag=None,
type=BankTransactionScheduler.TYPE_MONTHLY,
recurrence=None,
).clone()
BankTransactionFactory(
bankaccount=bankaccount,
label=_("Internal transfer"),
amount=Decimal("500"),
date=today - relativedelta(months=1, day=28),
reconciled=True,
status=BankTransaction.STATUS_IGNORED,
payment_method=BankTransaction.PAYMENT_METHOD_TRANSFER_INTERNAL,
tag=None,
memo="Ineed$",
)
BankTransactionFactory(
bankaccount=bankaccount,
label=_("Scratch ticket"),
amount=Decimal("150"),
date=today,
reconciled=False,
payment_method=BankTransaction.PAYMENT_METHOD_CASH,
tag=None,
memo="Hooray!",
)
BankTransactionFactory(
bankaccount=bankaccount,
label=_("New tires"),
amount=Decimal("-189.59"),
date=today - relativedelta(days=5),
reconciled=True,
payment_method=BankTransaction.PAYMENT_METHOD_CHECK,
tag=tag_car,
memo="Love my bike!",
)
BankTransactionFactory(
bankaccount=bankaccount,
label=_("Bad stuff"),
amount=Decimal("-79.90"),
date=datetime.date(today.year, today.month, 9),
reconciled=True,
payment_method=BankTransaction.PAYMENT_METHOD_CREDIT_CARD,
tag=tag_shopping,
)
BankTransactionFactory(
bankaccount=bankaccount,
label=_("Refund"),
amount=Decimal("49.59"),
date=datetime.date(today.year, today.month, 15),
reconciled=True,
payment_method=BankTransaction.PAYMENT_METHOD_TRANSFER,
tag=tag_shopping,
)
date_start = today + relativedelta(months=-1, day=15)
date_end = today + relativedelta(months=1, day=15)
date = date_start
while date < date_end:
if date <= today or date.day % 3 == 0:
choice = [tag_shopping, tag_restaurant, None, None]
tag = fuzzy.FuzzyChoice(choice).fuzz()
BankTransactionFactory(
bankaccount=bankaccount,
label=tag.name if tag is not None else _('Something'),
amount=fuzzy.FuzzyDecimal(-100, -10),
date=date,
reconciled=date < today - relativedelta(days=-3),
status=BankTransaction.STATUS_ACTIVE,
tag=tag,
)
date += relativedelta(days=1)
self.stdout.write("Data have been generated successfully.")
| 6,106 | 0 | 54 |
2f420971607e641370ba1d5ce2d4ac918e1560c6 | 251 | py | Python | tests/common/constants.py | infoshareacademy/jpydzr1-dkmap-django | 4fc34b2d721507b69a7fe945fd4ea3e5fd863828 | [
"MIT"
] | null | null | null | tests/common/constants.py | infoshareacademy/jpydzr1-dkmap-django | 4fc34b2d721507b69a7fe945fd4ea3e5fd863828 | [
"MIT"
] | 1 | 2021-03-20T18:56:37.000Z | 2021-03-20T18:56:37.000Z | tests/common/constants.py | infoshareacademy/jpydzr1-dkmap-django | 4fc34b2d721507b69a7fe945fd4ea3e5fd863828 | [
"MIT"
] | null | null | null | BOARD_FIELDS_EXPECTED = {
'first_field': 0,
'second_field': 1,
'third_field': 2,
'fourth_field': 3,
'fifth_field': 4,
'sixth_field': 5,
'seventh_field': 6,
'eighth_field': 7,
'ninth_field': 8
}
FIELD_EMPTY_VAL = " " | 20.916667 | 25 | 0.593625 | BOARD_FIELDS_EXPECTED = {
'first_field': 0,
'second_field': 1,
'third_field': 2,
'fourth_field': 3,
'fifth_field': 4,
'sixth_field': 5,
'seventh_field': 6,
'eighth_field': 7,
'ninth_field': 8
}
FIELD_EMPTY_VAL = " " | 0 | 0 | 0 |
7db3c1dd01c161f35a0206bbf5386c1a5f7c8e4d | 846 | py | Python | hata/ext/rpc/exceptions.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 173 | 2019-06-14T20:25:00.000Z | 2022-03-21T19:36:10.000Z | hata/ext/rpc/exceptions.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 52 | 2020-01-03T17:05:14.000Z | 2022-03-31T11:39:50.000Z | hata/ext/rpc/exceptions.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 47 | 2019-11-09T08:46:45.000Z | 2022-03-31T14:33:34.000Z | __all__ = ('DiscordRPCError', )
class DiscordRPCError(BaseException):
"""
Discord RPC error code.
Attributes
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
def __init__(self, code, message):
"""
Creates a new Discord RPC error instance with the given parameters.
Parameters
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
self.code = code
self.message = message
BaseException.__init__(self, code, message)
def __repr__(self):
"""Returns the representation of the error code."""
return f'{self.__class__.__name__}: [{self.code}] {self.message!r}'
| 25.636364 | 75 | 0.542553 | __all__ = ('DiscordRPCError', )
class DiscordRPCError(BaseException):
"""
Discord RPC error code.
Attributes
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
def __init__(self, code, message):
"""
Creates a new Discord RPC error instance with the given parameters.
Parameters
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
self.code = code
self.message = message
BaseException.__init__(self, code, message)
def __repr__(self):
"""Returns the representation of the error code."""
return f'{self.__class__.__name__}: [{self.code}] {self.message!r}'
| 0 | 0 | 0 |
d0ad443fa2d28e25f90e7079ee64526fe2255f0b | 1,166 | py | Python | Basic_IO/count_numbers.py | shubee17/HackerEarth | 73cc17a64490969cc6b9648601e7a9d817e7dfd1 | [
"MIT"
] | null | null | null | Basic_IO/count_numbers.py | shubee17/HackerEarth | 73cc17a64490969cc6b9648601e7a9d817e7dfd1 | [
"MIT"
] | null | null | null | Basic_IO/count_numbers.py | shubee17/HackerEarth | 73cc17a64490969cc6b9648601e7a9d817e7dfd1 | [
"MIT"
] | null | null | null | """
Your task is pretty simple , given a string S , find the total count of numbers present in the digit.
Input
The first line contains T , the number of test cases. The first line of each and every testc ase will contain a integer N , the length of the string . The second line of each and every test case will contain a string S of length N.
Output
For each and every testcase , output the total count of numbers present in the string.
Constraints
0<T<200
0<N<10000
SAMPLE INPUT
1
26
sadw96aeafae4awdw2wd100awd
SAMPLE OUTPUT
4
Explanation
For the first test case , the string given is "sadw96aeafae4awdw2wd100awd". There are total of 4 numbers in the string - [96,4,2,100]. So , we output 4.
"""
T = raw_input()
Digits = ['0','1','2','3','4','5','6','7','8','9']
for i in range(int(T)):
N = raw_input()
Array = map(str, raw_input())
arr = []
cnt = 0
Array = list(Array)
for j in range(len(Array)):
if Array[j] in Digits:
arr.append(j)
else:
pass
for k in range(len(arr)-1):
if arr[k] == arr[k+1]-1:
pass
else:
cnt += 1
print cnt+1
| 22.862745 | 231 | 0.627787 | """
Your task is pretty simple , given a string S , find the total count of numbers present in the digit.
Input
The first line contains T , the number of test cases. The first line of each and every testc ase will contain a integer N , the length of the string . The second line of each and every test case will contain a string S of length N.
Output
For each and every testcase , output the total count of numbers present in the string.
Constraints
0<T<200
0<N<10000
SAMPLE INPUT
1
26
sadw96aeafae4awdw2wd100awd
SAMPLE OUTPUT
4
Explanation
For the first test case , the string given is "sadw96aeafae4awdw2wd100awd". There are total of 4 numbers in the string - [96,4,2,100]. So , we output 4.
"""
T = raw_input()
Digits = ['0','1','2','3','4','5','6','7','8','9']
for i in range(int(T)):
N = raw_input()
Array = map(str, raw_input())
arr = []
cnt = 0
Array = list(Array)
for j in range(len(Array)):
if Array[j] in Digits:
arr.append(j)
else:
pass
for k in range(len(arr)-1):
if arr[k] == arr[k+1]-1:
pass
else:
cnt += 1
print cnt+1
| 0 | 0 | 0 |
d0ea79d7ce1f551c9b67e6805b22a9073c3ae0c7 | 7,869 | py | Python | dcc_venv/venvs_handler.py | rBrenick/dcc-venv | c199fadd3c1b8d58b818becf45f4638c2bdc9a7f | [
"MIT"
] | 4 | 2019-06-06T21:57:42.000Z | 2021-09-13T14:11:35.000Z | dcc_venv/venvs_handler.py | rBrenick/dcc-venv | c199fadd3c1b8d58b818becf45f4638c2bdc9a7f | [
"MIT"
] | null | null | null | dcc_venv/venvs_handler.py | rBrenick/dcc-venv | c199fadd3c1b8d58b818becf45f4638c2bdc9a7f | [
"MIT"
] | 1 | 2020-12-06T18:14:05.000Z | 2020-12-06T18:14:05.000Z | import os
import sys
import imp
import venv
import shutil
import stat
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(" dcc-venv ")
import requirements_handler as reqhand
# VENV_ROOT_FOLDER = os.path.join(os.path.expanduser('~'), ".dcc-venvs")
VENV_ROOT_FOLDER = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), ".venvs")
CONFIGS_FOLDER = os.path.abspath(os.path.dirname(__file__))
CONFIG_PREFIX = "config_"
DCC_STARTUP_SCRIPT = os.path.join(CONFIGS_FOLDER, "common", "dcc_venv_startup.py")
def onremoveerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def str2bool(v):
"""
I can't believe this isn't built in
https://stackoverflow.com/a/43357954
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
os.system("cls")
log.info("#"*85)
log.info("dcc_venvs setup")
log.info("#"*85)
sys.stdout.write("\n{} \n\n".format("-"*100))
func_map = {"install": install_venv,
"update": update_venv,
"uninstall": uninstall_venv}
import argparse
parser = argparse.ArgumentParser("venv handler")
parser.add_argument("type", type=str, help="install or uninstall")
parser.add_argument("-dev", type=str2bool, help="use edit install for git packages under '# DEV' tag")
parser.add_argument("-dccs", default=(), nargs="+", help="specific dccs")
args = parser.parse_args()
func = func_map.get(args.type)
func(args.dccs, args.dev)
os.system("pause")
| 29.806818 | 112 | 0.607955 | import os
import sys
import imp
import venv
import shutil
import stat
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(" dcc-venv ")
import requirements_handler as reqhand
# VENV_ROOT_FOLDER = os.path.join(os.path.expanduser('~'), ".dcc-venvs")
VENV_ROOT_FOLDER = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), ".venvs")
CONFIGS_FOLDER = os.path.abspath(os.path.dirname(__file__))
CONFIG_PREFIX = "config_"
DCC_STARTUP_SCRIPT = os.path.join(CONFIGS_FOLDER, "common", "dcc_venv_startup.py")
def get_dcc_configs(target_configs=()):
configs = {}
for folder, subfolder, files in os.walk(CONFIGS_FOLDER):
if CONFIG_PREFIX in folder:
dcc_name = os.path.basename(folder).replace(CONFIG_PREFIX, "")
configs[dcc_name] = os.path.abspath(folder)
for target_config in target_configs:
if target_config not in configs.keys():
log.warning("{}{} not found in: {}".format(CONFIG_PREFIX, target_config, CONFIGS_FOLDER))
return configs
def get_dcc_name_from_venv(venv_path):
return venv_path.replace("\\", "/").split("/.")[-1].split("_venv")[0] # this is kinda dumb
def get_activate_bat_path(venv_path):
dcc_name = get_dcc_name_from_venv(venv_path)
bat_path = os.path.join(VENV_ROOT_FOLDER, f"venv_activate__{dcc_name}.bat")
return bat_path
def create_activate_bat(venv_path):
bat_path = get_activate_bat_path(venv_path)
bat_content = r"start cmd /k call {}\Scripts\activate.bat".format(venv_path)
with open(bat_path, "w") as fp:
fp.write(bat_content)
def install_venv(target_configs=(), developer=False):
dcc_configs = get_dcc_configs(target_configs)
for dcc, config_folder in dcc_configs.items():
if target_configs and dcc not in target_configs:
continue
requirements_path = os.path.join(config_folder, "requirements.txt")
venv_name = ".{}_venv".format(dcc)
venv_path = os.path.join(VENV_ROOT_FOLDER, venv_name)
site_packages_path = os.path.join(venv_path, "Lib", "site-packages")
log.info("#"*50)
log.info("Creating {} ...".format(venv_path))
venv.create(venv_path, with_pip=True)
log.info("Installing Requirements for {} ...".format(dcc))
if developer:
req_dev_path = reqhand.create_requirements_dev(requirements_path)
requirements_path = req_dev_path
activate_cmd = "call {}/scripts/activate".format(venv_path)
os.system('{} && pip install -r {}'.format(activate_cmd, requirements_path))
log.info("Creating venv_activate__{}.bat".format(dcc))
create_activate_bat(venv_path)
log.info("Installing config {}".format(dcc))
mod = imp.load_source("__dcc__", os.path.join(config_folder, "venv_handler.py"))
mod.install(venv_path)
log.info("Installing startup egg link script - {}".format(dcc))
startup_path = os.path.join(site_packages_path, os.path.basename(DCC_STARTUP_SCRIPT))
shutil.copy(DCC_STARTUP_SCRIPT, startup_path)
log.info("#"*50)
sys.stdout.write("\n{} \n\n".format("-"*100))
if developer and os.path.exists(req_dev_path):
os.remove(req_dev_path)
sys.stdout.write("\ndcc-venv install complete for: {}\n\n".format(", ".join(target_configs), "-"*100))
def update_venv(target_configs=(), developer=False):
dcc_configs = get_dcc_configs(target_configs)
for dcc, config_folder in dcc_configs.items():
if target_configs and dcc not in target_configs:
continue
requirements_path = os.path.join(config_folder, "requirements.txt")
venv_name = ".{}_venv".format(dcc)
venv_path = os.path.join(VENV_ROOT_FOLDER, venv_name)
if not os.path.exists(venv_path):
log.warning("venv does not exist for updating: {}".format(venv_name))
continue
log.info("Reinstalling Requirements for {} ...".format(dcc))
if developer:
req_dev_path = reqhand.create_requirements_dev(requirements_path)
requirements_path = req_dev_path
activate_cmd = "call {}/scripts/activate".format(venv_path)
os.system('{} && pip install --upgrade --force-reinstall -r {}'.format(activate_cmd, requirements_path))
if developer and os.path.exists(req_dev_path):
os.remove(req_dev_path)
def uninstall_venv(target_configs=(), developer=False):
dcc_configs = get_dcc_configs(target_configs)
for dcc, config_folder in dcc_configs.items():
if target_configs and dcc not in target_configs:
continue
venv_name = ".{}_venv".format(dcc)
venv_path = os.path.join(VENV_ROOT_FOLDER, venv_name)
log.info("#"*50)
log.info("Uninstalling {} ...".format(venv_name))
if os.path.exists(venv_path):
shutil.rmtree(venv_path, onerror=onremoveerror)
activate_bat = get_activate_bat_path(venv_path)
if os.path.exists(activate_bat):
log.info("Removing venv_activate__{}.bat".format(dcc))
os.remove(activate_bat)
log.info("running config_{} uninstall".format(dcc))
mod = imp.load_source("__dcc__", os.path.join(config_folder, "venv_handler.py"))
mod.uninstall(venv_path)
log.info("#"*50)
sys.stdout.write("\n{} \n\n".format("-"*100))
sys.stdout.write("\ndcc-venv uninstall complete for: {}\n\n".format(", ".join(target_configs), "-"*100))
if len(os.listdir(VENV_ROOT_FOLDER)) == 0: # remove folder if empty
os.rmdir(VENV_ROOT_FOLDER)
def onremoveerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def str2bool(v):
"""
I can't believe this isn't built in
https://stackoverflow.com/a/43357954
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
os.system("cls")
log.info("#"*85)
log.info("dcc_venvs setup")
log.info("#"*85)
sys.stdout.write("\n{} \n\n".format("-"*100))
func_map = {"install": install_venv,
"update": update_venv,
"uninstall": uninstall_venv}
import argparse
parser = argparse.ArgumentParser("venv handler")
parser.add_argument("type", type=str, help="install or uninstall")
parser.add_argument("-dev", type=str2bool, help="use edit install for git packages under '# DEV' tag")
parser.add_argument("-dccs", default=(), nargs="+", help="specific dccs")
args = parser.parse_args()
func = func_map.get(args.type)
func(args.dccs, args.dev)
os.system("pause")
| 5,404 | 0 | 169 |
359220406527a1281d48494104b818ce6350a538 | 589 | py | Python | basic_lambda_function.py | radii/lambda-apigateway-twilio-tutorial | d2c990dbe5ca870e50243ae50e208dfd1889ddef | [
"Apache-2.0"
] | 104 | 2015-12-14T21:46:40.000Z | 2018-01-31T19:52:11.000Z | basic_lambda_function.py | radii/lambda-apigateway-twilio-tutorial | d2c990dbe5ca870e50243ae50e208dfd1889ddef | [
"Apache-2.0"
] | 4 | 2016-01-11T15:07:03.000Z | 2017-05-07T08:38:50.000Z | basic_lambda_function.py | radii/lambda-apigateway-twilio-tutorial | d2c990dbe5ca870e50243ae50e208dfd1889ddef | [
"Apache-2.0"
] | 37 | 2015-12-19T21:56:03.000Z | 2017-12-19T15:42:46.000Z | '''
Basic Twilio handler function
'''
import boto3
import random
import StringIO
import urllib2
from boto3.dynamodb.conditions import Key
from boto3.session import Session
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
session = Session()
| 19.633333 | 65 | 0.680815 | '''
Basic Twilio handler function
'''
import boto3
import random
import StringIO
import urllib2
from boto3.dynamodb.conditions import Key
from boto3.session import Session
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
session = Session()
def lambda_handler(event, context):
message = event['body']
from_number = event['fromNumber']
pic_url = event['image']
num_media = event['numMedia']
if num_media != '0':
twilio_resp = "Hi I got an image @ location %s" % pic_url
else:
twilio_resp = 'No image found'
return twilio_resp
| 313 | 0 | 23 |
c27b28f0950517aebb8aa8a605d825cf8e6e5a75 | 1,326 | py | Python | setup.py | aaronlelevier/mlearning | 92e60d89f18151e07bcd1931fbd956019a233ef5 | [
"MIT"
] | 3 | 2020-01-16T10:11:09.000Z | 2021-01-20T12:59:21.000Z | setup.py | aaronlelevier/mlearning | 92e60d89f18151e07bcd1931fbd956019a233ef5 | [
"MIT"
] | 1 | 2020-01-12T11:16:48.000Z | 2020-01-12T11:16:48.000Z | setup.py | aaronlelevier/mlearning | 92e60d89f18151e07bcd1931fbd956019a233ef5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('mlearning')
setup(
name='mlearning',
version=version,
url='https://github.com/aaronlelevier/mlearning',
license='MIT',
description="Code repo for general machine learning code that doesn't belong to any one repo or model in particular",
author='Aaron Lelevier',
author_email='aaron.lelevier@gmail.com',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=[
'numpy',
'matplotlib',
'opencv-python',
],
python_requires=">=3.6",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
| 27.625 | 121 | 0.621418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('mlearning')
setup(
name='mlearning',
version=version,
url='https://github.com/aaronlelevier/mlearning',
license='MIT',
description="Code repo for general machine learning code that doesn't belong to any one repo or model in particular",
author='Aaron Lelevier',
author_email='aaron.lelevier@gmail.com',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=[
'numpy',
'matplotlib',
'opencv-python',
],
python_requires=">=3.6",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
| 0 | 0 | 0 |
10199d5a643f3bff67cbb00da10669245dae054e | 735 | py | Python | test/test_ber.py | graille/ITR315-deep-telecom | 13d67e5bfc3659f69e72bde6761cca8b0420b44a | [
"MIT"
] | null | null | null | test/test_ber.py | graille/ITR315-deep-telecom | 13d67e5bfc3659f69e72bde6761cca8b0420b44a | [
"MIT"
] | null | null | null | test/test_ber.py | graille/ITR315-deep-telecom | 13d67e5bfc3659f69e72bde6761cca8b0420b44a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from src.communications import Transmitter, AWGNChannel, Receiver
from src.utils import *
# Configuration
MODULATION = 'BPSK'
EbN0dBs = np.linspace(-20, 8, 20)
# Initialization
transmitter = Transmitter(MODULATION)
receiver = Receiver(MODULATION)
channel = AWGNChannel(get_bps(MODULATION), transmitter.block_length, transmitter.block_coded_length)
if __name__ == '__main__':
BER = ber_performance(
EbN0dBs,
get_basic_channel_fct(transmitter, channel, receiver),
1000,
500
)
# Plot results
plt.figure()
show_ber(MODULATION, EbN0dBs, BER)
plt.legend(['BPSK Theory', 'BPSK simulation'])
plt.show() | 24.5 | 100 | 0.707483 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from src.communications import Transmitter, AWGNChannel, Receiver
from src.utils import *
# Configuration
MODULATION = 'BPSK'
EbN0dBs = np.linspace(-20, 8, 20)
# Initialization
transmitter = Transmitter(MODULATION)
receiver = Receiver(MODULATION)
channel = AWGNChannel(get_bps(MODULATION), transmitter.block_length, transmitter.block_coded_length)
if __name__ == '__main__':
BER = ber_performance(
EbN0dBs,
get_basic_channel_fct(transmitter, channel, receiver),
1000,
500
)
# Plot results
plt.figure()
show_ber(MODULATION, EbN0dBs, BER)
plt.legend(['BPSK Theory', 'BPSK simulation'])
plt.show() | 0 | 0 | 0 |
97af0d4270e7125ce486b5ebce2f379147e6d92d | 1,177 | py | Python | SoundEffect.py | dan-kuroto/HarukaPet | 09409ec254530b53624c431cd9b4add612bfa2fc | [
"MIT"
] | 4 | 2022-01-27T15:51:33.000Z | 2022-03-28T07:20:05.000Z | SoundEffect.py | dan-kuroto/HarukaPet | 09409ec254530b53624c431cd9b4add612bfa2fc | [
"MIT"
] | null | null | null | SoundEffect.py | dan-kuroto/HarukaPet | 09409ec254530b53624c431cd9b4add612bfa2fc | [
"MIT"
] | null | null | null | import json
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
voicePlayer = Sound('voice') # 人声音轨
soundPlayer = Sound('sound') # 音效音轨
# bgmPlayer = Sound('bgm') # BGM播放音轨,在该项目中不需要
if __name__ == '__main__': # 这里是标准用法
# 别的信号槽之类的不可信,只有在主动操作时才会激发,唯一可信的只有主动.position()查询,.duration()都没用
from time import sleep
app = QApplication(sys.argv)
sound = Sound('test')
sound.play_music(r'source\Move.wav')
sleep(3)
sound.play_music(r'source\Note.wav')
sleep(3)
# sys.exit(app.exec_()) # 不能阻塞,否则无法退出
| 28.707317 | 64 | 0.64486 | import json
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
class Sound(QMediaPlayer):
def __init__(self, name: str):
super(Sound, self).__init__()
self.setObjectName(name)
def play_music(self, path: str):
file = open(r'source\UserData.json', encoding='utf-8')
data = json.load(file)
file.close()
if data[self.objectName()]:
# self.pause() # 下面会自动pause
# 手动pause已经播放完的反而会出问题(并且无法用try捕获)
url = QUrl.fromLocalFile(path)
content = QMediaContent(url)
self.setMedia(content)
self.play()
voicePlayer = Sound('voice') # 人声音轨
soundPlayer = Sound('sound') # 音效音轨
# bgmPlayer = Sound('bgm') # BGM播放音轨,在该项目中不需要
if __name__ == '__main__': # 这里是标准用法
# 别的信号槽之类的不可信,只有在主动操作时才会激发,唯一可信的只有主动.position()查询,.duration()都没用
from time import sleep
app = QApplication(sys.argv)
sound = Sound('test')
sound.play_music(r'source\Move.wav')
sleep(3)
sound.play_music(r'source\Note.wav')
sleep(3)
# sys.exit(app.exec_()) # 不能阻塞,否则无法退出
| 528 | 5 | 76 |
605f35d964149765b20bfddb755148266f5ebefa | 4,059 | py | Python | gen/gen.py | JBorrow/GoGoGadget | 9eb0384aba585d181a43a474c951ca9071864d14 | [
"MIT"
] | null | null | null | gen/gen.py | JBorrow/GoGoGadget | 9eb0384aba585d181a43a474c951ca9071864d14 | [
"MIT"
] | null | null | null | gen/gen.py | JBorrow/GoGoGadget | 9eb0384aba585d181a43a474c951ca9071864d14 | [
"MIT"
] | null | null | null | import numpy as np
import gen.dists as dists
# This really should be a pass-through function for gen gas, etc. but oh well.
if __name__ == "__main__":
# Generate some test distribution and display in 3d with mpl
gen = Generator(int(100), int(100), int(100), 1e5, 1e4, 10, 40, 100, 10, 40, 100, 10, 2)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.quiver(gen.gas_x, gen.gas_y, gen.gas_z, gen.gas_v_x, gen.gas_v_y, gen.gas_v_z, length=10)
ax.quiver(gen.star_x, gen.star_y, gen.star_z, gen.star_v_x, gen.star_v_y, gen.star_v_z, length=10)
ax.set_zlim(-400, 400)
plt.show()
| 34.991379 | 141 | 0.635378 | import numpy as np
import gen.dists as dists
class Generator:
def __init__(self, n_DM, n_gas, n_star, M_halo, M_gas, M_star, R_NFW, c_NFW, R_gas, max_gas, Z_gas, R_star, max_star, Z_star, G=6.67e-8):
self.n_DM = n_DM
self.n_gas = n_gas
self.n_star = n_star
self.M_halo = M_halo
self.M_gas = M_gas
self.M_star = M_star
self.R_NFW = R_NFW
self.c_NFW = c_NFW
self.R_gas = R_gas
self.max_gas = max_gas
self.Z_gas = Z_gas
self.R_star = R_star
self.max_star = max_star
self.Z_star = Z_star
self.G = G
self._gen_dm()
if n_gas:
self._gen_gas()
if n_star:
self._gen_star()
self._convert_coords()
return
def _gen_dm(self):
self.nfw_gen = dists.NFW(self.R_NFW, self.c_NFW)
self.dm_theta = 2*np.pi*np.random.rand(self.n_DM)
self.dm_phi = np.arccos(2*np.random.rand(self.n_DM) - 1)
self.dm_r = self.nfw_gen.gen(self.n_DM)
self.dm_v_x, self.dm_v_y, self.dm_v_z = self._mod_v(self.dm_r)*self.nfw_gen.vel(self.dm_r)
return self.dm_theta, self.dm_phi, self.dm_r, self.dm_v_x, self.dm_v_y, self.dm_v_z
def _gen_gas(self):
gen_gas_r = dists.GasR(self.R_gas, self.max_gas)
gen_gas_z = dists.GasZ(self.Z_gas)
self.gas_theta = 2*np.pi*np.random.rand(self.n_gas)
self.gas_z = gen_gas_z.gen(self.n_gas)
self.gas_r = gen_gas_r.gen(self.n_gas)
self.gas_v_x, self.gas_v_y, self.gas_v_z = self._mod_v(self.gas_r)*gen_gas_r.vel(self.gas_theta)
return self.gas_theta, self.gas_z, self.gas_r, self.gas_v_x, self.gas_v_y, self.gas_v_z
# This really should be a pass-through function for gen gas, etc. but oh well.
def _gen_star(self):
gen_star_r = dists.GasR(self.R_star, self.max_star)
gen_star_z = dists.GasZ(self.Z_star)
self.star_theta = 2*np.pi*np.random.rand(self.n_star)
self.star_z = gen_star_z.gen(self.n_star)
self.star_r = gen_star_r.gen(self.n_star)
self.star_v_x, self.star_v_y, self.star_v_z = self._mod_v(self.star_r)*gen_star_r.vel(self.star_theta)
return self.star_theta, self.star_z, self.star_r, self.star_v_x, self.star_v_y, self.star_v_z
def _m_in_r_dm(self, r):
prefactor = self.M_halo/(np.log(self.c_NFW + 1) - self.c_NFW/(self.c_NFW + 1))
return prefactor*(np.log((self.R_NFW + r)/self.R_NFW) - r/(self.R_NFW + r))
def _m_in_r_gas(self, r):
div = r/self.R_gas
return self.M_gas*(1 - (1 + div)*np.exp(-div))
def _m_in_r_star(self, r):
div = r/self.R_star
return self.M_star*(1 - (1 + div)*np.exp(-div))
def _m_in_r(self, r):
# We need to consider all components separately
return self._m_in_r_dm(r) + self._m_in_r_gas(r) + self._m_in_r_star(r)
def _mod_v(self, r):
return np.sqrt(self.G * self._m_in_r(r)/r)
def _convert_coords(self):
self.dm_x, self.dm_y, self.dm_z = dists.spherical_to_cartesian(self.dm_r, self.dm_theta, self.dm_phi)
if self.n_gas:
self.gas_x, self.gas_y, self.gas_z = dists.cylindrical_to_cartesian(self.gas_r, self.gas_theta, self.gas_z)
if self.n_star:
self.star_x, self.star_y, self.star_z = dists.cylindrical_to_cartesian(self.star_r, self.star_theta, self.star_z)
return
if __name__ == "__main__":
# Generate some test distribution and display in 3d with mpl
gen = Generator(int(100), int(100), int(100), 1e5, 1e4, 10, 40, 100, 10, 40, 100, 10, 2)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.quiver(gen.gas_x, gen.gas_y, gen.gas_z, gen.gas_v_x, gen.gas_v_y, gen.gas_v_z, length=10)
ax.quiver(gen.star_x, gen.star_y, gen.star_z, gen.star_v_x, gen.star_v_y, gen.star_v_z, length=10)
ax.set_zlim(-400, 400)
plt.show()
| 3,040 | -5 | 296 |
e5cf921f51dc8aeb84813b308b2172c05c4d3efd | 2,599 | py | Python | SRC/engine/IO/meshinfo.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 31 | 2015-04-01T15:59:36.000Z | 2022-03-18T20:21:47.000Z | SRC/engine/IO/meshinfo.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 3 | 2015-02-06T19:30:24.000Z | 2017-05-25T14:14:31.000Z | SRC/engine/IO/meshinfo.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 7 | 2015-01-23T15:19:22.000Z | 2021-06-09T09:03:59.000Z | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.common import debug
from ooflib.common import toolbox
from ooflib.engine.IO import genericinfotoolbox
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
toolbox.registerToolboxClass(MeshInfoToolbox, ordering=3.0)
| 39.378788 | 72 | 0.698346 | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.common import debug
from ooflib.common import toolbox
from ooflib.engine.IO import genericinfotoolbox
class MeshInfoMode(genericinfotoolbox.GenericInfoMode):
pass
class MeshElementInfoMode(MeshInfoMode):
targetName = "Element"
def resolveQuery(self, meshctxt, indx):
return meshctxt.getObject().getElement(indx)
class MeshNodeInfoMode(MeshInfoMode):
targetName = "Node"
def resolveQuery(self, meshctxt, indx):
return meshctxt.getObject().getNode(indx)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class MeshInfoToolbox(genericinfotoolbox.GenericInfoToolbox):
whoClassName = 'Mesh'
def __init__(self, gfxwindow):
genericinfotoolbox.GenericInfoToolbox.__init__(
self, gfxwindow, 'Mesh Info', self.makeInfoModes())
# The mesh info toolbox has a special attribute, "meshlayer",
# which refers to the display layer in which the referred-to
# mesh is displayed. The reason for needing the actual layer
# is that the toolbox *display* needs to be able to draw the
# selected objects (elements, nodes) at their displaced
# position, possibly including any enhancements, and the mesh
# display layer's source object has all of that data. Mesh
# display layers provide coordinate transformation routines
# that convert undisplaced to displaced points, and vice
# versa.
self.meshlayer = None
def makeInfoModes(self):
return [MeshElementInfoMode(self), MeshNodeInfoMode(self)]
def activate(self):
genericinfotoolbox.GenericInfoToolbox.activate(self)
self.meshlayer = self.gfxwindow().topwholayer("Mesh")
def newLayers(self): # sb "layers changed" callback
genericinfotoolbox.GenericInfoToolbox.newLayers(self)
self.meshlayer = self.gfxwindow().topwholayer("Mesh")
tip = "Get information about a Mesh."
discussion="""<para>
Get information about a &mesh;, including &field; values, based on
mouse input.
</para>"""
toolbox.registerToolboxClass(MeshInfoToolbox, ordering=3.0)
| 1,281 | 533 | 92 |
966b50a9e7312243d16b4738bb05893c48473875 | 211 | py | Python | Chapter 4/pair_client.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 22 | 2016-06-07T07:52:35.000Z | 2021-11-08T13:12:21.000Z | Chapter 4/pair_client.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 2 | 2016-05-23T08:20:54.000Z | 2018-07-02T08:21:32.000Z | Chapter 4/pair_client.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 27 | 2016-05-23T08:19:51.000Z | 2021-08-31T02:46:00.000Z | import zmq
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://*:5555" % port)
while True:
socket.send("What time is it?")
msg = socket.recv()
print msg
time.sleep(1)
| 16.230769 | 35 | 0.63981 | import zmq
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://*:5555" % port)
while True:
socket.send("What time is it?")
msg = socket.recv()
print msg
time.sleep(1)
| 0 | 0 | 0 |
edbd3df8bb3402a06e9354c51cce1d5a2e698ae0 | 396 | py | Python | djangoapp/website/admin_urls.py | jaysridhar/fuzzy-spoon | 43ff6c424c6fe8e0d46cbb1555ada57957bf0cb4 | [
"MIT"
] | null | null | null | djangoapp/website/admin_urls.py | jaysridhar/fuzzy-spoon | 43ff6c424c6fe8e0d46cbb1555ada57957bf0cb4 | [
"MIT"
] | null | null | null | djangoapp/website/admin_urls.py | jaysridhar/fuzzy-spoon | 43ff6c424c6fe8e0d46cbb1555ada57957bf0cb4 | [
"MIT"
] | null | null | null | from django.urls import path
from website import api
urlpatterns = [
path('', api.load_user_location),
path('status/<status>', api.load_user_location),
path('approve/<locid>', api.approve_user_location),
path('approve/', api.approve_user_location),
path('disapprove/<locid>', api.disapprove_user_location),
path('disapprove/', api.disapprove_user_location),
]
| 33 | 62 | 0.704545 | from django.urls import path
from website import api
urlpatterns = [
path('', api.load_user_location),
path('status/<status>', api.load_user_location),
path('approve/<locid>', api.approve_user_location),
path('approve/', api.approve_user_location),
path('disapprove/<locid>', api.disapprove_user_location),
path('disapprove/', api.disapprove_user_location),
]
| 0 | 0 | 0 |
e290af2c95b86f602dda1aefef693139350755aa | 2,928 | py | Python | tests.py | ardinor/mojibake | b080656866603ebf4020ea2b49618cc29533a77b | [
"MIT"
] | 1 | 2015-11-05T16:14:13.000Z | 2015-11-05T16:14:13.000Z | tests.py | ardinor/mojibake | b080656866603ebf4020ea2b49618cc29533a77b | [
"MIT"
] | null | null | null | tests.py | ardinor/mojibake | b080656866603ebf4020ea2b49618cc29533a77b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import datetime
from sqlalchemy.exc import IntegrityError
from mojibake.app import app, db
from mojibake.models import Post, Category, Tag, User
from mojibake.settings import TEST_DATABASE_URI
#TO DO: More work on this on testing!
#http://flask.pocoo.org/docs/testing/
if __name__ == '__main__':
unittest.main()
| 32.533333 | 96 | 0.644467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import datetime
from sqlalchemy.exc import IntegrityError
from mojibake.app import app, db
from mojibake.models import Post, Category, Tag, User
from mojibake.settings import TEST_DATABASE_URI
#TO DO: More work on this on testing!
#http://flask.pocoo.org/docs/testing/
class Tests(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = TEST_DATABASE_URI
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_categories(self):
test_cat = Category(name='Test')
db.session.add(test_cat)
db.session.commit()
db_test_cat = Category.query.filter(Category.name == 'Test')
self.assertEqual(db_test_cat.count(), 1)
db_test_cat = db_test_cat.first()
self.assertIsNone(test_cat.name_ja)
test_cat = Category(name='Test2', name_ja='テスト')
db.session.add(test_cat)
db.session.commit()
db_test_cat = Category.query.filter(Category.name == 'Test2')
self.assertEqual(db_test_cat.count(), 1)
db_test_cat = db_test_cat.first()
self.assertIsNotNone(db_test_cat.name_ja)
self.assertEqual(test_cat.name_ja, 'テスト')
def test_category_integrity(self):
test_cat = Category(name='Test')
db.session.add(test_cat)
db.session.commit()
test_cat2 = Category(name='Test')
db.session.add(test_cat2)
self.assertRaises(IntegrityError, db.session.commit)
db.session.rollback()
def test_posts(self):
test_post = Post('Test Post', 'test_post')
test_post.add_category('Test')
test_post.add_tags('tag1;tag2')
test_post.add_body('This is a **test** post.', 'これはテスト書き込みですね。')
test_post.title_ja = 'テスト書き込み'
db.session.add(test_post)
db.session.commit()
db_test_post = Post.query.filter(Post.slug == 'test_post').first()
self.assertIsNotNone(db_test_post)
self.assertEqual(db_test_post.body_html, '<p>This is a <strong>test</strong> post.</p>')
self.assertEqual(db_test_post.body_ja_html, '<p>これはテスト書き込みですね。</p>')
db_cat = db_test_post.category_id
self.assertIsNotNone(db_cat)
db_tags = []
for tag in db_test_post.tags:
db_tags.append(tag.id)
self.assertIsNot(len(db_tags), 0)
db.session.delete(db_test_post)
db.session.commit()
# Ensure orphaned categories and tags are deleted
cat = Category.query.filter(id == db_cat).first()
self.assertIsNone(cat)
for tag_id in db_tags:
tag = Tag.query.filter(id == tag_id).first()
self.assertIsNone(tag)
if __name__ == '__main__':
unittest.main()
| 2,458 | 10 | 158 |
5c811082f08281075392593b809ed5d72f62a9a7 | 262 | py | Python | engine/sound_loop.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | 1 | 2020-09-25T02:46:00.000Z | 2020-09-25T02:46:00.000Z | engine/sound_loop.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | null | null | null | engine/sound_loop.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | null | null | null | import pyglet
| 23.818182 | 67 | 0.648855 | import pyglet
class SoundLoop(pyglet.media.Player):
def __init__(self, sound):
super().__init__()
looper = pyglet.media.SourceGroup(sound.audio_format, None)
looper.loop = True
looper.queue(sound)
self.queue(looper)
| 182 | 16 | 49 |
b52bff6ffbcd793210d4dcce63d859de14a6987e | 12,658 | py | Python | functions/ScheduleMaintenanceWindow/ScheduleMaintenanceWindow.py | densify-quick-start/ITSM-Controlled-Continuous-Optimization | bd05c4bb705c6412cc62a0cc597f39dbd96c7e60 | [
"Apache-2.0"
] | 1 | 2021-05-20T22:50:52.000Z | 2021-05-20T22:50:52.000Z | functions/ScheduleMaintenanceWindow/ScheduleMaintenanceWindow.py | densify-quick-start/ITSM-Controlled-Continuous-Optimization | bd05c4bb705c6412cc62a0cc597f39dbd96c7e60 | [
"Apache-2.0"
] | null | null | null | functions/ScheduleMaintenanceWindow/ScheduleMaintenanceWindow.py | densify-quick-start/ITSM-Controlled-Continuous-Optimization | bd05c4bb705c6412cc62a0cc597f39dbd96c7e60 | [
"Apache-2.0"
] | 1 | 2021-05-26T11:56:06.000Z | 2021-05-26T11:56:06.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 10:42:50 2020
@author: draveendran
"""
import json
import boto3
import datetime
import os
#SSM Functions | 28.638009 | 196 | 0.539027 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 10:42:50 2020
@author: draveendran
"""
import json
import boto3
import datetime
import os
def lambda_handler(event, context):
# TODO implement
try:
print("Incoming event: " + str(event))
if int(event['duration']) < 1 or int(event['duration']) > 24:
raise Exception ("Duration can only be between 1-24 hours. Specify as an integer.")
opsItemId = event['opsItemId']
opsItem = getOpsItem(opsItemId)
maintenanceWindow = findActiveMaintenanceWindow("mw-" + opsItem['OpsItem']['OperationalData']['cloudformation:stack-name']['Value'])
windowId = None
if maintenanceWindow != False:
print("Updating existing maintenance window.")
windowId = maintenanceWindow['WindowId']
updateMaintenanceWindow(windowId, event)
else:
print("Creating a new maintenance window.")
windowId = createMaintenanceWindow(event)
if windowId == False:
raise Exception ("Window creation failed.")
print("Registering tasks.")
register_task(windowId, opsItemId)
print("Updating tags on maintenance window.")
updateMaintenanceWindowTags(windowId, opsItemId)
print("Updating all associated opsItems.")
updateOpsItems(windowId, opsItemId, event)
print("Update assoicated parameters")
updateParameters(opsItemId)
return {
'statusCode': 200,
'body': json.dumps('Maintenance window scheduled.')
}
except Exception as error:
print("Exception encountered: " + str(error))
return {'statusCode': -1, 'body': json.dumps({'message': str(error)})}
def updateParameters(opsItemId):
try:
relatedOpsItems = getRelatedOpsItems(opsItemId)
for opsItem in relatedOpsItems:
parameterKey = getOpsItem(opsItem['OpsItemId'])['OpsItem']['OperationalData']['parameterKey']['Value']
version = putParameter(parameterKey, getParameter(parameterKey)['Parameter']['Value'])
labelParameterVersion(parameterKey, version, ['Scheduled'])
except Exception as error:
print(error)
return False
def register_task(windowId, opsItemId):
try:
client = boto3.client('ssm')
opsItem = client.get_ops_item(
OpsItemId=opsItemId
)
payload = {}
payload['stackId'] = opsItem['OpsItem']['OperationalData']['cloudformation:stack-id']['Value']
payload['windowId'] = windowId
payload['stage'] = "1"
jsonStr = json.dumps(payload)
accountId = payload['stackId'].split(":")[4]
region = payload['stackId'].split(":")[3]
ssm_doc = client.get_document(
Name='ScheduleMaintenanceWindow'
)
response = client.register_task_with_maintenance_window(
WindowId=windowId,
Targets=[
{
'Key': 'InstanceIds',
'Values': [
'i-00000000000000000',
]
},
],
TaskArn='arn:aws:lambda:' + region + ':' + accountId + ':function:ManageUpdateProcess',
ServiceRoleArn=json.loads(ssm_doc['Content'])['assumeRole'],
TaskType='LAMBDA',
TaskInvocationParameters={
'Lambda': {
'Payload': jsonStr.encode('utf-8')
}
},
MaxConcurrency='1',
MaxErrors='1',
Name='Update-Stack'
)
print(response)
except Exception as error:
print(error)
def updateOpsItems(windowId, opsItemId, event):
try:
client = boto3.client('ssm')
relatedOpsItems = getRelatedOpsItems(opsItemId)
opsData = {}
opsData['scheduledMaintenanceWindowDetails'] = {}
opsData['scheduledMaintenanceWindowDetails']['Value'] = 'WindowId=' + windowId
opsData['scheduledMaintenanceWindowDetails']['Value'] += '\nCronExpression=' + event['cronExpression']
opsData['scheduledMaintenanceWindowDetails']['Value'] += '\nDuration=' + event['duration']
opsData['scheduledMaintenanceWindowDetails']['Type'] = 'String'
response = {}
for opsItem in relatedOpsItems:
response = client.update_ops_item(
Status='InProgress',
OperationalData=opsData,
OpsItemId=opsItem['OpsItemId']
)
print(response)
except Exception as error:
print(error)
def getRelatedOpsItems(opsItemId):
try:
client = boto3.client('ssm')
opsItem = client.get_ops_item(
OpsItemId=opsItemId
)
stackId = opsItem['OpsItem']['OperationalData']['cloudformation:stack-id']['Value']
stackName = opsItem['OpsItem']['OperationalData']['cloudformation:stack-name']['Value']
OpsItemFilters=[
{
'Key': 'OperationalData',
'Values': [
"{\"key\":\"cloudformation:stack-name\",\"value\":\"" + stackName + "\"}",
],
'Operator': 'Equal'
},
{
'Key': 'Status',
'Values': [
"Open",
"InProgress"
],
'Operator': 'Equal'
}
]
relatedOpsItems = getOpsItems(OpsItemFilters)
return relatedOpsItems
except Exception as error:
print(error)
def getOpsItems(filter):
try:
client = boto3.client('ssm')
response = client.describe_ops_items(
OpsItemFilters=filter
)
opItemIds = []
for opsItem in response['OpsItemSummaries']:
opItemIds.append({'OpsItemId': opsItem['OpsItemId']})
return opItemIds
except Exception as error:
print(error)
def updateMaintenanceWindowTags(windowId, opsItemId):
try:
client = boto3.client('ssm')
opsItem = client.get_ops_item(
OpsItemId=opsItemId
)
stackId = opsItem['OpsItem']['OperationalData']['cloudformation:stack-id']['Value']
stackName = opsItem['OpsItem']['OperationalData']['cloudformation:stack-name']['Value']
response = client.add_tags_to_resource(
ResourceType='MaintenanceWindow',
ResourceId=windowId,
Tags=[
{
'Key': 'cloudformation:stack-name',
'Value': stackName
},
{
'Key': 'LastModifiedDate',
'Value': str(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"))
}
]
)
print(response)
except Exception as error:
print(error)
def updateMaintenanceWindow(windowId, event):
try:
client = boto3.client("ssm")
opsItem = client.get_ops_item(
OpsItemId=event['opsItemId']
)
windowName = "mw-" + opsItem['OpsItem']['OperationalData']['cloudformation:stack-name']['Value']
response = client.update_maintenance_window(
WindowId=windowId,
Name=windowName,
Schedule="at(" + event['cronExpression'] + ")",
ScheduleTimezone=event['timezone'],
Duration=int(event['duration']),
Cutoff=1,
AllowUnassociatedTargets=True,
Enabled=True,
Replace=True
)
print(response)
except Exception as error:
print(error)
def createMaintenanceWindow(event):
try:
client = boto3.client("ssm")
opsItem = client.get_ops_item(
OpsItemId=event['opsItemId']
)
windowName = "mw-" + opsItem['OpsItem']['OperationalData']['cloudformation:stack-name']['Value']
response = client.create_maintenance_window(
Name=windowName,
Schedule="at(" + event['cronExpression'] + ")",
ScheduleTimezone=event['timezone'],
Duration=int(event['duration']),
Cutoff=1,
AllowUnassociatedTargets=True
)
print(response)
return response['WindowId']
except Exception as error:
print("Exception encountered while trying to create a maintenance window.")
print(error)
return False
#SSM Functions
def getOpsItem(OpsItemId, **kwargs):
region = os.environ['AWS_REGION']
if 'region' in kwargs:
region = kwargs['region']
session = boto3.session.Session()
ssm_client = session.client('ssm', region)
try:
response = ssm_client.get_ops_item(
OpsItemId=OpsItemId
)
return response
except Exception as error:
print("Exception encountered while getting opsItem [" + OpsItemId + "]: " + str(error))
return False
def findActiveMaintenanceWindow(name, **kwargs):
region = os.environ['AWS_REGION']
if 'region' in kwargs:
region = kwargs['region']
session = boto3.session.Session()
ssm_client = session.client('ssm', region)
try:
response = ssm_client.describe_maintenance_windows(
Filters=[
{
'Key': 'Name',
'Values': [
name
]
},
{
'Key': 'Enabled',
'Values': [
'True'
]
}
]
)
print(response)
if len(response['WindowIdentities']) != 1:
raise Exception ("Total number of maintenance window(s) " + str(response['WindowIdentities']) + " found is " + str(len(response['WindowIdentities'])) + ". There should only exist 1.")
return response['WindowIdentities'][0]
except Exception as error:
print("Exception caught while locating maintenance window [" + name + "]: " + str(error))
return False
def putParameter(parameterKey, value, **kwargs):
region = os.environ['AWS_REGION']
desc = ""
if 'region' in kwargs:
region = kwargs['region']
if 'description' in kwargs:
desc = kwargs['description']
session = boto3.session.Session()
ssm_client = session.client('ssm', region)
try:
response = ssm_client.put_parameter(
Name=parameterKey,
Description=desc,
Value=value,
Type='String',
Overwrite=True,
Tier='Standard'
)
return response['Version']
except Exception as error:
print("Exception caught while creating/updating parameter[" + parameterKey + "] in region[" + region + "]: " + str(error))
return False
def getParameter(key, **kwargs):
region = os.environ['AWS_REGION']
if 'region' in kwargs:
region = kwargs['region']
session = boto3.session.Session()
ssm_client = session.client('ssm', region)
try:
response = ssm_client.get_parameter(
Name=key,
WithDecryption=True
)
return response
except Exception as error:
print("Exception caught during parameter retreival for parameter[" + key + "] in region[" + region + "]: " + str(error))
return False
def labelParameterVersion(parameterKey, version, labels, **kwargs):
region = os.environ['AWS_REGION']
if 'region' in kwargs:
region = kwargs['region']
session = boto3.session.Session()
ssm_client = session.client('ssm', region)
try:
ssm_client.label_parameter_version(
Name=parameterKey,
ParameterVersion=version,
Labels=labels
)
return True
except Exception as error:
print("Exception caught while labeling parameter[" + parameterKey + "] in region[" + region + "]: " + str(error))
return False | 12,165 | 0 | 332 |
1f26b6eb6d6dcadfa381edb1a417fab9d0a51f97 | 5,305 | py | Python | python_graphs/instruction_test.py | reshinthadithyan/python-graphs | 1234c448cb38af44c963d5ef7f8d99f678028104 | [
"Apache-2.0"
] | 53 | 2021-04-12T14:20:16.000Z | 2022-03-29T02:47:30.000Z | python_graphs/instruction_test.py | reshinthadithyan/python-graphs | 1234c448cb38af44c963d5ef7f8d99f678028104 | [
"Apache-2.0"
] | 2 | 2021-09-08T16:37:34.000Z | 2022-03-15T17:32:36.000Z | python_graphs/instruction_test.py | reshinthadithyan/python-graphs | 1234c448cb38af44c963d5ef7f8d99f678028104 | [
"Apache-2.0"
] | 14 | 2021-05-08T04:34:46.000Z | 2022-01-16T12:58:16.000Z | # Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instruction module."""
from absl.testing import absltest
import gast as ast
from python_graphs import instruction as instruction_module
if __name__ == '__main__':
absltest.main()
| 42.782258 | 77 | 0.6541 | # Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instruction module."""
from absl.testing import absltest
import gast as ast
from python_graphs import instruction as instruction_module
def create_instruction(source):
node = ast.parse(source)
node = instruction_module._canonicalize(node)
return instruction_module.Instruction(node)
class InstructionTest(absltest.TestCase):
def test_instruction(self):
self.assertIsNotNone(instruction_module.Instruction)
def test_represent_same_program_basic_positive_case(self):
program1 = ast.parse('x + 1')
program2 = ast.parse('x + 1')
self.assertTrue(
instruction_module.represent_same_program(program1, program2))
def test_represent_same_program_basic_negative_case(self):
program1 = ast.parse('x + 1')
program2 = ast.parse('x + 2')
self.assertFalse(
instruction_module.represent_same_program(program1, program2))
def test_represent_same_program_different_contexts(self):
full_program1 = ast.parse('y = x + 1') # y is a write
program1 = full_program1.body[0].targets[0] # 'y'
program2 = ast.parse('y') # y is a read
self.assertTrue(
instruction_module.represent_same_program(program1, program2))
def test_get_accesses(self):
instruction = create_instruction('x + 1')
self.assertEqual(instruction.get_read_names(), {'x'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('return x + y + z')
self.assertEqual(instruction.get_read_names(), {'x', 'y', 'z'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('fn(a, b, c)')
self.assertEqual(instruction.get_read_names(), {'a', 'b', 'c', 'fn'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('c = fn(a, b, c)')
self.assertEqual(instruction.get_read_names(), {'a', 'b', 'c', 'fn'})
self.assertEqual(instruction.get_write_names(), {'c'})
def test_get_accesses_augassign(self):
instruction = create_instruction('x += 1')
self.assertEqual(instruction.get_read_names(), {'x'})
self.assertEqual(instruction.get_write_names(), {'x'})
instruction = create_instruction('x *= y')
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), {'x'})
def test_get_accesses_augassign_subscript(self):
instruction = create_instruction('x[0] *= y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_augassign_attribute(self):
instruction = create_instruction('x.attribute *= y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_subscript(self):
instruction = create_instruction('x[0] = y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_attribute(self):
instruction = create_instruction('x.attribute = y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_access_ordering(self):
instruction = create_instruction('c = fn(a, b + c, d / a)')
access_names_and_kinds = [(instruction_module.access_name(access),
instruction_module.access_kind(access))
for access in instruction.accesses]
self.assertEqual(access_names_and_kinds, [('fn', 'read'), ('a', 'read'),
('b', 'read'), ('c', 'read'),
('d', 'read'), ('a', 'read'),
('c', 'write')])
instruction = create_instruction('c += fn(a, b + c, d / a)')
access_names_and_kinds = [(instruction_module.access_name(access),
instruction_module.access_kind(access))
for access in instruction.accesses]
self.assertEqual(access_names_and_kinds, [('fn', 'read'), ('a', 'read'),
('b', 'read'), ('c', 'read'),
('d', 'read'), ('a', 'read'),
('c', 'read'), ('c', 'write')])
if __name__ == '__main__':
absltest.main()
| 4,189 | 20 | 321 |
903e8dd24495854c909f69f64e7a7530efaf9db4 | 1,431 | py | Python | autosk_dev_test/component/test/test_LogReg.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 2 | 2018-01-18T06:25:21.000Z | 2018-12-11T07:43:09.000Z | autosk_dev_test/component/test/test_LogReg.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 1 | 2016-03-29T07:55:18.000Z | 2016-03-29T07:55:18.000Z | autosk_dev_test/component/test/test_LogReg.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | null | null | null | import unittest
from component.LogReg import LogReg
from autosklearn.pipeline.util import _test_classifier
import sklearn.metrics
| 40.885714 | 75 | 0.570929 | import unittest
from component.LogReg import LogReg
from autosklearn.pipeline.util import _test_classifier
import sklearn.metrics
class LogRegComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = _test_classifier(LogReg, dataset='iris')
acc_score = sklearn.metrics.accuracy_score(y_true=targets,
y_pred=predictions)
print(acc_score)
self.assertAlmostEqual(0.28, acc_score)
def test_default_configuration_binary(self):
for i in range(10):
predictions, targets = _test_classifier(LogReg,
make_binary=True)
acc_score = sklearn.metrics.accuracy_score(y_true=targets,
y_pred=predictions)
print(acc_score)
self.assertAlmostEqual(0.28, acc_score)
def test_default_configuration_multilabel(self):
for i in range(10):
predictions, targets = _test_classifier(LogReg,
make_multilabel=True)
acc_score = sklearn.metrics.accuracy_score(y_true=targets,
y_pred=predictions)
print(acc_score)
self.assertAlmostEqual(0.28, acc_score)
| 1,171 | 24 | 104 |
5033bba781caf73f73ae20dbf086ab6ddfb77ca5 | 38 | py | Python | Local/hello.py | JeffreyILipton/ME480 | 7271418cf396fd537e4393daa3106b21a1d55806 | [
"MIT"
] | 15 | 2019-10-07T21:09:25.000Z | 2021-11-01T21:07:37.000Z | Local/helloX.py | JeffreyILipton/ME480 | 7271418cf396fd537e4393daa3106b21a1d55806 | [
"MIT"
] | null | null | null | Local/helloX.py | JeffreyILipton/ME480 | 7271418cf396fd537e4393daa3106b21a1d55806 | [
"MIT"
] | 23 | 2019-10-07T21:09:32.000Z | 2021-12-10T09:30:33.000Z | #!/usr/bin/env python3
print('hello')
| 12.666667 | 22 | 0.684211 | #!/usr/bin/env python3
print('hello')
| 0 | 0 | 0 |
92901e6c444bc5f4f8ae8d14f89d8eb5493b3596 | 2,614 | py | Python | preprocess.py | koshian2/ImageNet | ed11a31637db6f7c2df12232e3cb1cabad8397e9 | [
"MIT"
] | null | null | null | preprocess.py | koshian2/ImageNet | ed11a31637db6f7c2df12232e3cb1cabad8397e9 | [
"MIT"
] | null | null | null | preprocess.py | koshian2/ImageNet | ed11a31637db6f7c2df12232e3cb1cabad8397e9 | [
"MIT"
] | null | null | null | from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
def imagenet_data_augmentation(pillow_img, target_size,
area_min=0.08, area_max=1.0,
aspect_min=0.75, aspect_max=4.0/3.0):
"""
Data augmentation for single image(based on GoogLe Net)
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
# optional : cropped area = U([area_min, area_max])
cropped aspect ratio = U([aspect_min, aspect_max])
"""
# aspect_ratio = width / height
# cropped_width = sqrt(S*a)
# cropped_height = sqrt(S/a)
original_width, original_height = pillow_img.size
cropped_area = np.random.uniform(area_min, area_max) * original_width * original_height
cropped_aspect_ratio = np.random.uniform(aspect_min, aspect_max)
cropped_width = int(np.sqrt(cropped_area * cropped_aspect_ratio))
cropped_height = int(np.sqrt(cropped_area / cropped_aspect_ratio))
# crop left / right point
if original_width > cropped_width:
horizontal_slide = int(np.random.uniform(0, original_width-cropped_width))
left, right = horizontal_slide, horizontal_slide+cropped_width
else:
horizontal_slide = (cropped_width - original_width) // 2
left, right = -horizontal_slide, horizontal_slide+original_width
# crop top / bottom point
if original_height > cropped_height:
vertical_slide = int(np.random.uniform(0, original_height-cropped_height))
top, bottom = vertical_slide, vertical_slide+cropped_height
else:
vertical_slide = (cropped_height - original_height) // 2
top, bottom = -vertical_slide, vertical_slide+original_height
cropped = pillow_img.crop((left, top, right, bottom))
resized = cropped.resize((target_size, target_size), Image.LINEAR)
# horizontal flip
if np.random.random() >= 0.5:
resized = ImageOps.mirror(resized)
# auto contrast (a bit slow)
if np.random.random() >= 0.5:
resized = ImageOps.autocontrast(resized,
np.random.uniform(0, 1.0), ignore=0) # ignore black background
return np.asarray(resized, np.uint8)
def validation_image_load(pillow_img, target_size):
"""
Convert pillow instance to numpy array
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
"""
resized = pillow_img.resize((target_size, target_size), Image.LINEAR)
return np.asarray(resized, np.uint8)
| 43.566667 | 91 | 0.677123 | from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
def imagenet_data_augmentation(pillow_img, target_size,
area_min=0.08, area_max=1.0,
aspect_min=0.75, aspect_max=4.0/3.0):
"""
Data augmentation for single image(based on GoogLe Net)
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
# optional : cropped area = U([area_min, area_max])
cropped aspect ratio = U([aspect_min, aspect_max])
"""
# aspect_ratio = width / height
# cropped_width = sqrt(S*a)
# cropped_height = sqrt(S/a)
original_width, original_height = pillow_img.size
cropped_area = np.random.uniform(area_min, area_max) * original_width * original_height
cropped_aspect_ratio = np.random.uniform(aspect_min, aspect_max)
cropped_width = int(np.sqrt(cropped_area * cropped_aspect_ratio))
cropped_height = int(np.sqrt(cropped_area / cropped_aspect_ratio))
# crop left / right point
if original_width > cropped_width:
horizontal_slide = int(np.random.uniform(0, original_width-cropped_width))
left, right = horizontal_slide, horizontal_slide+cropped_width
else:
horizontal_slide = (cropped_width - original_width) // 2
left, right = -horizontal_slide, horizontal_slide+original_width
# crop top / bottom point
if original_height > cropped_height:
vertical_slide = int(np.random.uniform(0, original_height-cropped_height))
top, bottom = vertical_slide, vertical_slide+cropped_height
else:
vertical_slide = (cropped_height - original_height) // 2
top, bottom = -vertical_slide, vertical_slide+original_height
cropped = pillow_img.crop((left, top, right, bottom))
resized = cropped.resize((target_size, target_size), Image.LINEAR)
# horizontal flip
if np.random.random() >= 0.5:
resized = ImageOps.mirror(resized)
# auto contrast (a bit slow)
if np.random.random() >= 0.5:
resized = ImageOps.autocontrast(resized,
np.random.uniform(0, 1.0), ignore=0) # ignore black background
return np.asarray(resized, np.uint8)
def validation_image_load(pillow_img, target_size):
"""
Convert pillow instance to numpy array
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
"""
resized = pillow_img.resize((target_size, target_size), Image.LINEAR)
return np.asarray(resized, np.uint8)
| 0 | 0 | 0 |
40007e4e513dfb447e6e296344187853d86b55e6 | 292 | py | Python | test/core/scripts/basicpass.py | jcollins1983/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 15 | 2018-10-05T04:55:18.000Z | 2022-03-10T08:08:20.000Z | test/core/scripts/basicpass.py | jcollins1983/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 86 | 2018-09-26T02:33:11.000Z | 2022-01-10T06:12:17.000Z | test/core/scripts/basicpass.py | jcollins1983/Fixate | 132f7cda69f3d53ff1bd660518d11b45dc6182fd | [
"MIT"
] | 12 | 2018-10-09T01:32:11.000Z | 2022-03-22T01:19:09.000Z | from fixate.core.common import TestClass
from fixate.core.ui import user_ok, user_info
from fixate.core.checks import *
__version__ = "1"
class SimpleTest(TestClass):
"""Simple passing test"""
TEST_SEQUENCE = [SimpleTest()]
| 18.25 | 45 | 0.705479 | from fixate.core.common import TestClass
from fixate.core.ui import user_ok, user_info
from fixate.core.checks import *
__version__ = "1"
class SimpleTest(TestClass):
"""Simple passing test"""
def test(self):
chk_true(True, "It is True!")
TEST_SEQUENCE = [SimpleTest()]
| 32 | 0 | 27 |
d919e4aad227c8d885586066bf1525c3fd9f0dba | 120 | py | Python | src/exactcover/__init__.py | cliffordw20/exactcover | 0dc85d7a6e7da229ce0a0bbab086f2e54591dc5e | [
"MIT"
] | null | null | null | src/exactcover/__init__.py | cliffordw20/exactcover | 0dc85d7a6e7da229ce0a0bbab086f2e54591dc5e | [
"MIT"
] | null | null | null | src/exactcover/__init__.py | cliffordw20/exactcover | 0dc85d7a6e7da229ce0a0bbab086f2e54591dc5e | [
"MIT"
] | null | null | null | """Exactcover __init__."""
from .exactcover import solve, ExactCoverKeyError
__all__ = ['solve', 'ExactCoverKeyError']
| 24 | 49 | 0.758333 | """Exactcover __init__."""
from .exactcover import solve, ExactCoverKeyError
__all__ = ['solve', 'ExactCoverKeyError']
| 0 | 0 | 0 |
6bf5522636bdaf3a60c73c55d8011380e018549f | 8,761 | py | Python | global_var.py | NightFurySL2001/CJK-character-count | cee3fffb898b2080682218aad802080856b89a07 | [
"MIT"
] | 125 | 2020-07-14T06:30:01.000Z | 2022-03-27T06:48:34.000Z | global_var.py | NightFurySL2001/CJK-character-count | cee3fffb898b2080682218aad802080856b89a07 | [
"MIT"
] | 3 | 2021-06-25T01:42:27.000Z | 2022-02-06T14:34:55.000Z | global_var.py | NightFurySL2001/CJK-character-count | cee3fffb898b2080682218aad802080856b89a07 | [
"MIT"
] | 3 | 2021-01-17T03:08:03.000Z | 2022-02-17T08:42:22.000Z | global cjk_list
global unicode_list
global cjk_jian_list
global cjk_jian_fan_list
global cjk_fan_list
global cjk_count
global unicode_count
import os, sys
global main_directory
#if packaged by pyinstaller
#ref: https://stackoverflow.com/questions/404744/determining-application-path-in-a-python-exe-generated-by-pyinstaller
if getattr(sys, 'frozen', False):
#change from loading same folder to full folder, --onedir
main_directory = os.path.dirname(sys.executable)
#`pyinstaller --onefile` change to use the following code
#if '_MEIPASS2' in os.environ:
# main_directory = os.environ['_MEIPASS2']
#ref: https://stackoverflow.com/questions/9553262/pyinstaller-ioerror-errno-2-no-such-file-or-directory
else:
#dev mode
try: #py xx.py
app_full_path = os.path.realpath(__file__)
main_directory = os.path.dirname(app_full_path)
except NameError: #py then run code
main_directory = os.getcwd()
#english name
#old list for compatibility
cjk_list = {"gb2312":"GB/T 2312",
"gb12345":"GB/T 12345",
"gbk":"GBK",
"gb18030":"GB 18030",
"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"tongyong-guifan":"Table of General Standard Chinese Characters", #通用规范汉字表
"3500changyong":"List of Frequently Used Characters in Modern Chinese", #现代汉语常用字表
"7000tongyong":"List of Commonly Used Characters in Modern Chinese", #现代汉语通用字表
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education", #义务教育语文课程常用字表
"4808changyong":"Chart of Standard Forms of Common National Characters", #常用国字标准字体表
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters", #次常用国字标准字体表
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters", #常用字字形表
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)"
}
cjk_jian_list_en = {"gb2312":"GB/T 2312",
"3500changyong":"List of Frequently Used Characters in Modern Chinese",
"7000tongyong":"List of Commonly Used Characters in Modern Chinese",
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education",
"tongyong-guifan":"Table of General Standard Chinese Characters"
}
cjk_jian_fan_list_en = {"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_en = {"4808changyong":"Chart of Standard Forms of Common National Characters",
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters",
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters",
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)",
"gb12345":"GB/T 12345"
}
unicode_list = {"kangxi":"Kangxi Radicals",
"kangxi-sup":"CJK Radical Supplements",
"zero":"〇",
"basic":"CJK Unified Ideographs",
"ext-a":"CJK Unified Ideographs Extension A",
"compat":"CJK Compatibility Ideographs",
"compat-ideo":" Non-Compatibility (Unified) Ideographs",
"ext-b":"CJK Unified Ideographs Extension B",
"ext-c":"CJK Unified Ideographs Extension C",
"ext-d":"CJK Unified Ideographs Extension D",
"ext-e":"CJK Unified Ideographs Extension E",
"ext-f":"CJK Unified Ideographs Extension F",
"compat-sup":"CJK Compatibility Ideographs Supplement",
"ext-g":"CJK Unified Ideographs Extension G",
"total":"Total Ideographs"
}
#chinese name (simp)
cjk_jian_list_zhs = {"gb2312":"GB/T 2312",
"3500changyong":"现代汉语常用字表*",
"7000tongyong":"现代汉语通用字表",
"yiwu-jiaoyu":"义务教育语文课程常用字表",
"tongyong-guifan":"通用规范汉字表"
}
cjk_jian_fan_list_zhs = {"hanyi-jianfan":"汉仪简繁字表",
"fangzheng-jianfan":"方正简繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_zhs = {"4808changyong":"常用国字标准字体表",
"6343cichangyong":"次常用国字标准字体表",
"big5changyong":"五大码 (Big5) 常用汉字表",
"big5":"五大码 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增补字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6级)",
"gb12345":"GB/T 12345"
}
unicode_list_zhs = {"kangxi":"康熙部首",
"kangxi-sup":"汉字部首补充",
"zero":"〇",
"basic":"中日韩统一表意文字",
"ext-a":"中日韩统一表意文字—扩展A区",
"compat":"中日韩兼容表意文字",
"compat-ideo":" 非兼容(统一)表意文字",
"ext-b":"中日韩统一表意文字—扩展B区",
"ext-c":"中日韩统一表意文字—扩展C区",
"ext-d":"中日韩统一表意文字—扩展D区",
"ext-e":"中日韩统一表意文字—扩展E区",
"ext-f":"中日韩统一表意文字—扩展F区",
"compat-sup":"中日韩兼容表意文字(补充区)",
"ext-g":"中日韩统一表意文字—扩展G区",
"total":"总汉字数"
}
#chinese name (trad)
cjk_fan_list_zht = {"4808changyong":"常用國字標準字體表",
"6343cichangyong":"次常用國字標準字體表",
"big5changyong":"五大碼 (Big5) 常用漢字表",
"big5":"五大碼 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增補字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6級)",
"gb12345":"GB/T 12345"
}
cjk_jian_fan_list_zht = {"hanyi-jianfan":"漢儀簡繁字表",
"fangzheng-jianfan":"方正簡繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_jian_list_zht = {"gb2312":"GB/T 2312",
"3500changyong":"現代漢語常用字表",
"7000tongyong":"現代漢語通用字表",
"yiwu-jiaoyu":"義務教育語文課程常用字表",
"tongyong-guifan":"通用規範漢字表"
}
unicode_list_zht = {"kangxi":"康熙部首",
"kangxi-sup":"漢字部首補充",
"zero":"〇",
"basic":"中日韓統一表意文字",
"ext-a":"中日韓統一表意文字—擴展A區",
"compat":"中日韓兼容表意文字",
"compat-ideo":" 非兼容(統一)表意文字",
"ext-b":"中日韓統一表意文字—擴展B區",
"ext-c":"中日韓統一表意文字—擴展C區",
"ext-d":"中日韓統一表意文字—擴展D區",
"ext-e":"中日韓統一表意文字—擴展E區",
"ext-f":"中日韓統一表意文字—擴展F區",
"compat-sup":"中日韓兼容表意文字(補充區)",
"ext-g":"中日韓統一表意文字—擴展G區",
"total":"總漢字數"
}
#character count
cjk_count = {"gb2312":6763,
"gb12345":6866,
"gbk":20923,
"gb18030":0,
"hanyi-jianfan":9169,
"fangzheng-jianfan":9664,
"tongyong-guifan":8105,
"3500changyong":3500,
"7000tongyong":7000,
"yiwu-jiaoyu":3500,
"4808changyong":4808,
"6343cichangyong":6343,
"big5changyong":5401,
"big5":13060,
"hkchangyong":4825,
"hkscs":4603,
"suppchara":1097
}
unicode_count = {"kangxi":214,
"kangxi-sup":115,
"zero":1,
"basic":20992,
"ext-a":6592,
"compat":472,
"compat-ideo":12,
"ext-b":42720,
"ext-c":4153,
"ext-d":222,
"ext-e":5762,
"ext-f":7473,
"compat-sup":542,
"ext-g":4939,
"total":0
}
cjk_count["gb18030"] = unicode_count["zero"]+unicode_count["basic"]+unicode_count["ext-a"]
unicode_count["total"] = unicode_count["zero"]+unicode_count["compat-ideo"]+unicode_count["basic"]+unicode_count["ext-a"]+unicode_count["ext-b"]+unicode_count["ext-c"]+unicode_count["ext-d"]+unicode_count["ext-e"]+unicode_count["ext-f"]+unicode_count["ext-g"] | 43.371287 | 259 | 0.523228 | global cjk_list
global unicode_list
global cjk_jian_list
global cjk_jian_fan_list
global cjk_fan_list
global cjk_count
global unicode_count
import os, sys
global main_directory
#if packaged by pyinstaller
#ref: https://stackoverflow.com/questions/404744/determining-application-path-in-a-python-exe-generated-by-pyinstaller
if getattr(sys, 'frozen', False):
#change from loading same folder to full folder, --onedir
main_directory = os.path.dirname(sys.executable)
#`pyinstaller --onefile` change to use the following code
#if '_MEIPASS2' in os.environ:
# main_directory = os.environ['_MEIPASS2']
#ref: https://stackoverflow.com/questions/9553262/pyinstaller-ioerror-errno-2-no-such-file-or-directory
else:
#dev mode
try: #py xx.py
app_full_path = os.path.realpath(__file__)
main_directory = os.path.dirname(app_full_path)
except NameError: #py then run code
main_directory = os.getcwd()
#english name
#old list for compatibility
cjk_list = {"gb2312":"GB/T 2312",
"gb12345":"GB/T 12345",
"gbk":"GBK",
"gb18030":"GB 18030",
"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"tongyong-guifan":"Table of General Standard Chinese Characters", #通用规范汉字表
"3500changyong":"List of Frequently Used Characters in Modern Chinese", #现代汉语常用字表
"7000tongyong":"List of Commonly Used Characters in Modern Chinese", #现代汉语通用字表
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education", #义务教育语文课程常用字表
"4808changyong":"Chart of Standard Forms of Common National Characters", #常用国字标准字体表
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters", #次常用国字标准字体表
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters", #常用字字形表
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)"
}
cjk_jian_list_en = {"gb2312":"GB/T 2312",
"3500changyong":"List of Frequently Used Characters in Modern Chinese",
"7000tongyong":"List of Commonly Used Characters in Modern Chinese",
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education",
"tongyong-guifan":"Table of General Standard Chinese Characters"
}
cjk_jian_fan_list_en = {"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_en = {"4808changyong":"Chart of Standard Forms of Common National Characters",
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters",
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters",
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)",
"gb12345":"GB/T 12345"
}
unicode_list = {"kangxi":"Kangxi Radicals",
"kangxi-sup":"CJK Radical Supplements",
"zero":"〇",
"basic":"CJK Unified Ideographs",
"ext-a":"CJK Unified Ideographs Extension A",
"compat":"CJK Compatibility Ideographs",
"compat-ideo":" Non-Compatibility (Unified) Ideographs",
"ext-b":"CJK Unified Ideographs Extension B",
"ext-c":"CJK Unified Ideographs Extension C",
"ext-d":"CJK Unified Ideographs Extension D",
"ext-e":"CJK Unified Ideographs Extension E",
"ext-f":"CJK Unified Ideographs Extension F",
"compat-sup":"CJK Compatibility Ideographs Supplement",
"ext-g":"CJK Unified Ideographs Extension G",
"total":"Total Ideographs"
}
#chinese name (simp)
cjk_jian_list_zhs = {"gb2312":"GB/T 2312",
"3500changyong":"现代汉语常用字表*",
"7000tongyong":"现代汉语通用字表",
"yiwu-jiaoyu":"义务教育语文课程常用字表",
"tongyong-guifan":"通用规范汉字表"
}
cjk_jian_fan_list_zhs = {"hanyi-jianfan":"汉仪简繁字表",
"fangzheng-jianfan":"方正简繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_zhs = {"4808changyong":"常用国字标准字体表",
"6343cichangyong":"次常用国字标准字体表",
"big5changyong":"五大码 (Big5) 常用汉字表",
"big5":"五大码 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增补字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6级)",
"gb12345":"GB/T 12345"
}
unicode_list_zhs = {"kangxi":"康熙部首",
"kangxi-sup":"汉字部首补充",
"zero":"〇",
"basic":"中日韩统一表意文字",
"ext-a":"中日韩统一表意文字—扩展A区",
"compat":"中日韩兼容表意文字",
"compat-ideo":" 非兼容(统一)表意文字",
"ext-b":"中日韩统一表意文字—扩展B区",
"ext-c":"中日韩统一表意文字—扩展C区",
"ext-d":"中日韩统一表意文字—扩展D区",
"ext-e":"中日韩统一表意文字—扩展E区",
"ext-f":"中日韩统一表意文字—扩展F区",
"compat-sup":"中日韩兼容表意文字(补充区)",
"ext-g":"中日韩统一表意文字—扩展G区",
"total":"总汉字数"
}
#chinese name (trad)
cjk_fan_list_zht = {"4808changyong":"常用國字標準字體表",
"6343cichangyong":"次常用國字標準字體表",
"big5changyong":"五大碼 (Big5) 常用漢字表",
"big5":"五大碼 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增補字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6級)",
"gb12345":"GB/T 12345"
}
cjk_jian_fan_list_zht = {"hanyi-jianfan":"漢儀簡繁字表",
"fangzheng-jianfan":"方正簡繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_jian_list_zht = {"gb2312":"GB/T 2312",
"3500changyong":"現代漢語常用字表",
"7000tongyong":"現代漢語通用字表",
"yiwu-jiaoyu":"義務教育語文課程常用字表",
"tongyong-guifan":"通用規範漢字表"
}
unicode_list_zht = {"kangxi":"康熙部首",
"kangxi-sup":"漢字部首補充",
"zero":"〇",
"basic":"中日韓統一表意文字",
"ext-a":"中日韓統一表意文字—擴展A區",
"compat":"中日韓兼容表意文字",
"compat-ideo":" 非兼容(統一)表意文字",
"ext-b":"中日韓統一表意文字—擴展B區",
"ext-c":"中日韓統一表意文字—擴展C區",
"ext-d":"中日韓統一表意文字—擴展D區",
"ext-e":"中日韓統一表意文字—擴展E區",
"ext-f":"中日韓統一表意文字—擴展F區",
"compat-sup":"中日韓兼容表意文字(補充區)",
"ext-g":"中日韓統一表意文字—擴展G區",
"total":"總漢字數"
}
#character count
cjk_count = {"gb2312":6763,
"gb12345":6866,
"gbk":20923,
"gb18030":0,
"hanyi-jianfan":9169,
"fangzheng-jianfan":9664,
"tongyong-guifan":8105,
"3500changyong":3500,
"7000tongyong":7000,
"yiwu-jiaoyu":3500,
"4808changyong":4808,
"6343cichangyong":6343,
"big5changyong":5401,
"big5":13060,
"hkchangyong":4825,
"hkscs":4603,
"suppchara":1097
}
unicode_count = {"kangxi":214,
"kangxi-sup":115,
"zero":1,
"basic":20992,
"ext-a":6592,
"compat":472,
"compat-ideo":12,
"ext-b":42720,
"ext-c":4153,
"ext-d":222,
"ext-e":5762,
"ext-f":7473,
"compat-sup":542,
"ext-g":4939,
"total":0
}
cjk_count["gb18030"] = unicode_count["zero"]+unicode_count["basic"]+unicode_count["ext-a"]
unicode_count["total"] = unicode_count["zero"]+unicode_count["compat-ideo"]+unicode_count["basic"]+unicode_count["ext-a"]+unicode_count["ext-b"]+unicode_count["ext-c"]+unicode_count["ext-d"]+unicode_count["ext-e"]+unicode_count["ext-f"]+unicode_count["ext-g"] | 0 | 0 | 0 |
0a5395d0a2140ef5a5cc84548608073aba17fcf4 | 64,562 | py | Python | lldb_commands/dclass.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | lldb_commands/dclass.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | lldb_commands/dclass.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2017 Derek Selander
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import ds
import os
import shlex
import optparse
import datetime
import lldb.utils.symbolication
def dclass(debugger, command, exe_ctx, result, internal_dict):
'''
Dumps all the NSObject inherited classes in the process. If you give it a module,
it will dump only the classes within that module. You can also filter out classes
to only a certain type and can also generate a header file for a specific class.
Example:
# Dump ALL the NSObject classes within the process
(lldb) dclass
# Dump all the classes that are a UIViewController within the process
(lldb) dclass -f UIViewController
# Dump all the classes with the regex case insensitive search "viewcontroller" in the class name
(lldb) dclass -r (?i)viewCoNtrolLer
# Dump all the classes within the UIKit module
(lldb) dclass -m UIKit
# Dump all classes in CKConfettiEffect NSBundle that are UIView subclasses
(lldb) dclass /System/Library/Messages/iMessageEffects/CKConfettiEffect.bundle/CKConfettiEffect -f UIView
# Generate a header file for the class specified:
(lldb) dclass -g UIView
# Generate a protocol that you can cast an object to. Ideal when working with private classes at dev time
(lldb) dclass -P UIView
# Dump all classes and methods for a particular module, ideal for viewing changes in frameworks over time
(lldb) dclass -o UIKit
# Only dump classes whose superclass is of type class and in UIKit module. Ideal for going after specific classes
(lldb) dclass -s NSObject -m UIKit
'''
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
if not args:
# result.SetError('Usage: find NSObjectSubclass\n\nUse \'help find\' for more details')
clean_command = None
# return
if not args and options.generate_header:
result.SetError('Need to supply class for option')
return
else:
clean_command = ('').join(args)
res = lldb.SBCommandReturnObject()
interpreter = debugger.GetCommandInterpreter()
target = exe_ctx.target
if not options.info and not options.class_type and not options.verbose and not options.regular_expression and not options.module and not options.filter and not options.search_protocols and not options.dump_code_output and not options.generate_header and not options.verbose_info and not options.generate_protocol and not options.conforms_to_protocol and not options.superclass and len(args) == 1:
options.info = args[0]
if options.info or options.verbose_info:
script = generate_class_info(options)
# print(script)
# return
interpreter.HandleCommand('expression -lobjc -O -- ' + script, res)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
result.AppendMessage(contents)
return
elif options.dump_code_output:
directory = '/tmp/{}_{}/'.format(target.executable.basename, datetime.datetime.now().time())
os.makedirs(directory)
modules = target.modules
if len(args) > 0 and args[0] == '__all':
os.makedirs(directory + 'PrivateFrameworks')
os.makedirs(directory + 'Frameworks')
modules = [i for i in target.modules if '/usr/lib/' not in i.file.fullpath and '__lldb_' not in i.file.fullpath]
outputMsg = "Dumping all private Objective-C frameworks"
elif len(args) > 0 and args[0]:
module = target.module[args[0]]
if module is None:
result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(args[0]))
return
modules = [module]
outputMsg = "Dumping all private Objective-C frameworks"
else:
modules = [target.module[target.executable.fullpath]]
for module in modules:
command_script = generate_module_header_script(options, module.file.fullpath.replace('//', '/'))
interpreter.HandleCommand('expression -lobjc -O -u0 -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -- ' + command_script)
if '/System/Library/PrivateFrameworks/' in module.file.fullpath:
subdir = 'PrivateFrameworks/'
elif '/System/Library/Frameworks/' in module.file.fullpath:
subdir = 'Frameworks/'
else:
subdir = ''
ds.create_or_touch_filepath(directory + subdir + module.file.basename + '.txt', res.GetOutput())
print('Written output to: ' + directory + '... opening file')
os.system('open -R ' + directory)
return
if options.module is not None:
options.module = options.module.strip("\"\'")
module = target.FindModule(lldb.SBFileSpec(options.module))
if not module.IsValid():
if not module or not module.IsValid():
result.SetError(
"Unable to open module name '{}', to see list of images use 'image list -b'".format(str(options.module)))
return
if options.conforms_to_protocol is not None:
interpreter.HandleCommand('expression -lobjc -O -- (id)NSProtocolFromString(@\"{}\")'.format(options.conforms_to_protocol), res)
if 'nil' in res.GetOutput() or not res.GetOutput():
result.SetError("No such Protocol name '{}'".format(options.conforms_to_protocol))
return
res.Clear()
if options.generate_header or options.generate_protocol:
command_script = generate_header_script(options, clean_command)
else:
command_script = generate_class_dump(target, options, clean_command)
if options.generate_header or options.generate_protocol:
interpreter.HandleCommand('expression -lobjc -O -- (Class)NSClassFromString(@\"{}\")'.format(clean_command), res)
if 'nil' in res.GetOutput():
result.SetError('Can\'t find class named "{}". Womp womp...'.format(clean_command))
return
res.Clear()
if options.generate_protocol:
filepath = "/tmp/DS_" + clean_command + "Protocol.h"
else:
filepath = "/tmp/" + clean_command + ".h"
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
ds.create_or_touch_filepath(filepath, contents)
print('Written output to: ' + filepath + '... opening file')
os.system('open -R ' + filepath)
else:
msg = "Dumping protocols" if options.search_protocols else "Dumping classes"
result.AppendMessage(ds.attrStr(msg, 'cyan'))
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(ds.attrStr(res.GetError(), 'red'))
return
result.AppendMessage(ds.attrStr('************************************************************', 'cyan'))
if res.Succeeded():
result.AppendMessage(res.GetOutput())
| 41.652903 | 400 | 0.645922 | # MIT License
# Copyright (c) 2017 Derek Selander
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import ds
import os
import shlex
import optparse
import datetime
import lldb.utils.symbolication
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f dclass.dclass dclass -h "Dumps info about objc/swift classes"')
def dclass(debugger, command, exe_ctx, result, internal_dict):
'''
Dumps all the NSObject inherited classes in the process. If you give it a module,
it will dump only the classes within that module. You can also filter out classes
to only a certain type and can also generate a header file for a specific class.
Example:
# Dump ALL the NSObject classes within the process
(lldb) dclass
# Dump all the classes that are a UIViewController within the process
(lldb) dclass -f UIViewController
# Dump all the classes with the regex case insensitive search "viewcontroller" in the class name
(lldb) dclass -r (?i)viewCoNtrolLer
# Dump all the classes within the UIKit module
(lldb) dclass -m UIKit
# Dump all classes in CKConfettiEffect NSBundle that are UIView subclasses
(lldb) dclass /System/Library/Messages/iMessageEffects/CKConfettiEffect.bundle/CKConfettiEffect -f UIView
# Generate a header file for the class specified:
(lldb) dclass -g UIView
# Generate a protocol that you can cast an object to. Ideal when working with private classes at dev time
(lldb) dclass -P UIView
# Dump all classes and methods for a particular module, ideal for viewing changes in frameworks over time
(lldb) dclass -o UIKit
# Only dump classes whose superclass is of type class and in UIKit module. Ideal for going after specific classes
(lldb) dclass -s NSObject -m UIKit
'''
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
if not args:
# result.SetError('Usage: find NSObjectSubclass\n\nUse \'help find\' for more details')
clean_command = None
# return
if not args and options.generate_header:
result.SetError('Need to supply class for option')
return
else:
clean_command = ('').join(args)
res = lldb.SBCommandReturnObject()
interpreter = debugger.GetCommandInterpreter()
target = exe_ctx.target
if not options.info and not options.class_type and not options.verbose and not options.regular_expression and not options.module and not options.filter and not options.search_protocols and not options.dump_code_output and not options.generate_header and not options.verbose_info and not options.generate_protocol and not options.conforms_to_protocol and not options.superclass and len(args) == 1:
options.info = args[0]
if options.info or options.verbose_info:
script = generate_class_info(options)
# print(script)
# return
interpreter.HandleCommand('expression -lobjc -O -- ' + script, res)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
result.AppendMessage(contents)
return
elif options.dump_code_output:
directory = '/tmp/{}_{}/'.format(target.executable.basename, datetime.datetime.now().time())
os.makedirs(directory)
modules = target.modules
if len(args) > 0 and args[0] == '__all':
os.makedirs(directory + 'PrivateFrameworks')
os.makedirs(directory + 'Frameworks')
modules = [i for i in target.modules if '/usr/lib/' not in i.file.fullpath and '__lldb_' not in i.file.fullpath]
outputMsg = "Dumping all private Objective-C frameworks"
elif len(args) > 0 and args[0]:
module = target.module[args[0]]
if module is None:
result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(args[0]))
return
modules = [module]
outputMsg = "Dumping all private Objective-C frameworks"
else:
modules = [target.module[target.executable.fullpath]]
for module in modules:
command_script = generate_module_header_script(options, module.file.fullpath.replace('//', '/'))
interpreter.HandleCommand('expression -lobjc -O -u0 -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -- ' + command_script)
if '/System/Library/PrivateFrameworks/' in module.file.fullpath:
subdir = 'PrivateFrameworks/'
elif '/System/Library/Frameworks/' in module.file.fullpath:
subdir = 'Frameworks/'
else:
subdir = ''
ds.create_or_touch_filepath(directory + subdir + module.file.basename + '.txt', res.GetOutput())
print('Written output to: ' + directory + '... opening file')
os.system('open -R ' + directory)
return
if options.module is not None:
options.module = options.module.strip("\"\'")
module = target.FindModule(lldb.SBFileSpec(options.module))
if not module.IsValid():
if not module or not module.IsValid():
result.SetError(
"Unable to open module name '{}', to see list of images use 'image list -b'".format(str(options.module)))
return
if options.conforms_to_protocol is not None:
interpreter.HandleCommand('expression -lobjc -O -- (id)NSProtocolFromString(@\"{}\")'.format(options.conforms_to_protocol), res)
if 'nil' in res.GetOutput() or not res.GetOutput():
result.SetError("No such Protocol name '{}'".format(options.conforms_to_protocol))
return
res.Clear()
if options.generate_header or options.generate_protocol:
command_script = generate_header_script(options, clean_command)
else:
command_script = generate_class_dump(target, options, clean_command)
if options.generate_header or options.generate_protocol:
interpreter.HandleCommand('expression -lobjc -O -- (Class)NSClassFromString(@\"{}\")'.format(clean_command), res)
if 'nil' in res.GetOutput():
result.SetError('Can\'t find class named "{}". Womp womp...'.format(clean_command))
return
res.Clear()
if options.generate_protocol:
filepath = "/tmp/DS_" + clean_command + "Protocol.h"
else:
filepath = "/tmp/" + clean_command + ".h"
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
ds.create_or_touch_filepath(filepath, contents)
print('Written output to: ' + filepath + '... opening file')
os.system('open -R ' + filepath)
else:
msg = "Dumping protocols" if options.search_protocols else "Dumping classes"
result.AppendMessage(ds.attrStr(msg, 'cyan'))
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(ds.attrStr(res.GetError(), 'red'))
return
result.AppendMessage(ds.attrStr('************************************************************', 'cyan'))
if res.Succeeded():
result.AppendMessage(res.GetOutput())
def generate_class_dump(target, options, clean_command=None):
command_script = r'''
@import ObjectiveC;
@import Foundation;
unsigned int count = 0;
typedef struct ds_cls_struct {
void *isa;
void *supercls;
void *buckets;
uint32_t _mask;
uint32_t _occupied;
uintptr_t bits;
} ds_cls_struct;
'''
if options.search_protocols:
command_script += 'Protocol **allProtocols = objc_copyProtocolList(&count);\n'
elif clean_command:
command_script += ' const char **allClasses = objc_copyClassNamesForImage("' + clean_command + '", &count);'
else:
command_script += 'Class *allClasses = objc_copyClassList(&count);\n'
if options.regular_expression is not None:
command_script += ' NSRegularExpression *regex = [NSRegularExpression regularExpressionWithPattern:@"' + options.regular_expression + '" options:0 error:nil];\n'
if options.search_protocols:
command_script += ''' NSMutableString *classesString = [NSMutableString string];
for (int i = 0; i < count; i++) {
Protocol *ptl = allProtocols[i];
'''
else:
command_script += ''' NSMutableString *classesString = [NSMutableString string];
for (int i = 0; i < count; i++) {
Class cls = '''
command_script += 'objc_getClass(allClasses[i]);' if clean_command else 'allClasses[i];'
command_script += '''
NSString *dsclsName = (NSString*)NSStringFromClass(cls);
if ((BOOL)[dsclsName isEqualToString:@"_CNZombie_"] || (BOOL)[dsclsName isEqualToString:@"JSExport"] || (BOOL)[dsclsName isEqualToString:@"__NSGenericDeallocHandler"] || (BOOL)[dsclsName isEqualToString:@"_NSZombie_"] || (BOOL)[dsclsName isEqualToString:@"__NSMessageBuilder"] || (BOOL)[dsclsName isEqualToString:@"Object"] ) { continue; }
'''
if options.module is not None:
command_script += generate_module_search_sections_string(options.module, target, options.search_protocols)
if not options.search_protocols and options.conforms_to_protocol is not None:
command_script += 'if (!class_conformsToProtocol(cls, NSProtocolFromString(@"'+ options.conforms_to_protocol + '"))) { continue; }'
if options.search_protocols:
command_script += ' NSString *clsString = (NSString *)NSStringFromProtocol(ptl);\n'
else:
command_script += ' NSString *clsString = (NSString *)NSStringFromClass(cls);\n'
if options.regular_expression is not None:
command_script += r'''
NSUInteger matches = (NSUInteger)[regex numberOfMatchesInString:clsString options:0 range:NSMakeRange(0, [clsString length])];
if (matches == 0) {
continue;
}
'''
if options.class_type == 'objc':
command_script += ' if ((((ds_cls_struct *)cls)->bits & 1UL) == 1) { continue; }\n'
if options.class_type == 'swift':
command_script += 'if ((((ds_cls_struct *)cls)->bits & 1UL) == 0) { continue; }\n'
if not options.search_protocols and options.superclass is not None:
command_script += 'NSString *parentClassName = @"' + options.superclass + '";'
command_script += r'''
if (!(BOOL)[NSStringFromClass((Class)[cls superclass]) isEqualToString:parentClassName]) {
continue;
}
'''
if not options.search_protocols and options.filter is None:
if options.verbose:
command_script += r'''
NSString *imageString = [[[[NSString alloc] initWithUTF8String:class_getImageName(cls)] lastPathComponent] stringByDeletingPathExtension];
[classesString appendString:imageString];
[classesString appendString:@": "];
'''
command_script += r'''
[classesString appendString:(NSString *)clsString];
[classesString appendString:@"\n"];
}
'''
command_script += '\n free(allClasses);\n [classesString description];'
elif not options.search_protocols:
command_script += '\n if ((BOOL)[cls respondsToSelector:@selector(isSubclassOfClass:)] && (BOOL)[cls isSubclassOfClass:(Class)NSClassFromString(@"' + str(options.filter) + '")]) {\n'
if options.verbose:
command_script += r'''
NSString *imageString = [[[[NSString alloc] initWithUTF8String:class_getImageName(cls)] lastPathComponent] stringByDeletingPathExtension];
[classesString appendString:imageString];
[classesString appendString:@": "];
'''
command_script += r'''
[classesString appendString:(NSString *)clsString];
[classesString appendString:@"\n"];
}
}'''
command_script += '\n free(allClasses);\n [classesString description];'
else:
command_script += r'''
[classesString appendString:(NSString *)clsString];
[classesString appendString:@"\n"];
}'''
command_script += '\n free(allProtocols);\n [classesString description];'
return command_script
def generate_module_search_sections_string(module_name, target, useProtocol=False):
module = target.FindModule(lldb.SBFileSpec(module_name))
if not module.IsValid():
result.SetError(
"Unable to open module name '{}', to see list of images use 'image list -b'".format(module_name))
return
if useProtocol:
returnString = r'''
uintptr_t addr = (uintptr_t)ptl;
if (!('''
else:
returnString = r'''
uintptr_t addr = (uintptr_t)cls;
if (!('''
section = module.FindSection("__DATA")
for idx, subsec in enumerate(section):
lower_bounds = subsec.GetLoadAddress(target)
upper_bounds = lower_bounds + subsec.file_size
if idx != 0:
returnString += ' || '
returnString += '({} <= addr && addr <= {})'.format(lower_bounds, upper_bounds)
dirtysection = module.FindSection("__DATA_DIRTY")
for subsec in dirtysection:
lower_bounds = subsec.GetLoadAddress(target)
upper_bounds = lower_bounds + subsec.file_size
returnString += ' || ({} <= addr && addr <= {})'.format(lower_bounds, upper_bounds)
returnString += ')) { continue; }\n'
return returnString
def generate_header_script(options, class_to_generate_header):
script = '@import @ObjectiveC;\n'
script += 'NSString *className = @"' + str(class_to_generate_header) + '";\n'
script += r'''
//Dang it. LLDB JIT Doesn't like NSString stringWithFormat on device. Need to use stringByAppendingString instead
// Runtime declarations in case we're running on a stripped executable
typedef struct objc_method *Method;
typedef struct objc_ivar *Ivar;
// typedef struct objc_category *Category;
typedef struct objc_property *objc_property_t;
NSMutableString *returnString = [NSMutableString string];
// Properties
NSMutableString *generatedProperties = [NSMutableString string];
NSMutableSet *blackListMethodNames = [NSMutableSet set];
NSMutableSet *exportedClassesSet = [NSMutableSet set];
NSMutableSet *exportedProtocolsSet = [NSMutableSet set];
[blackListMethodNames addObjectsFromArray:@[@".cxx_destruct", @"dealloc"]];
unsigned int propertyCount = 0;
Class cls = NSClassFromString(className);
objc_property_t *properties = (objc_property_t *)class_copyPropertyList(cls, &propertyCount);
NSCharacterSet *charSet = [NSCharacterSet characterSetWithCharactersInString:@","];
NSString *(^argumentBlock)(NSString *) = ^(NSString *arg) {
if ([arg isEqualToString:@"@"]) {
return @"id";
} else if ([arg isEqualToString:@"v"]) {
return @"void";
} else if ([arg hasPrefix:@"{CGRect"]) {
return @"CGRect";
} else if ([arg hasPrefix:@"{CGPoint"]) {
return @"CGPoint";
} else if ([arg hasPrefix:@"{CGSize"]) {
return @"CGSize";
} else if ([arg isEqualToString:@"q"]) {
return @"NSInteger";
} else if ([arg isEqualToString:@"B"]) {
return @"BOOL";
} else if ([arg isEqualToString:@":"]) {
return @"SEL";
} else if ([arg isEqualToString:@"d"]) {
return @"CGFloat";
} else if ([arg isEqualToString:@"@?"]) { // A block?
return @"id";
}
return @"void *";
};
NSMutableSet *blackListPropertyNames = [NSMutableSet setWithArray:@[@"hash", @"superclass", @"class", @"description", @"debugDescription"]];
for (int i = 0; i < propertyCount; i++) {
objc_property_t property = properties[i];
NSString *attributes = [NSString stringWithUTF8String:(char *)property_getAttributes(property)];
NSString *name = [NSString stringWithUTF8String:(char *)property_getName(property)];
if ([blackListPropertyNames containsObject:name]) {
continue;
}
NSMutableString *generatedPropertyString = [NSMutableString stringWithString:@"@property ("];
NSScanner *scanner = [[NSScanner alloc] initWithString:attributes];
[scanner setCharactersToBeSkipped:charSet];
BOOL multipleOptions = 0;
NSString *propertyType;
NSString *parsedInput;
while ([scanner scanUpToCharactersFromSet:charSet intoString:&parsedInput]) {
if ([parsedInput isEqualToString:@"N"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"nonatomic"];
multipleOptions = 1;
} else if ([parsedInput isEqualToString:@"W"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"weak"];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"G"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:(NSString *)@"getter="];
[generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]];
[blackListMethodNames addObject:[parsedInput substringFromIndex:1]];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"S"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:(NSString *)@"setter="];
[generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]];
[blackListMethodNames addObject:[parsedInput substringFromIndex:1]];
multipleOptions = 1;
} else if ([parsedInput isEqualToString:@"&"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"strong"];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"V"]) { // ivar name here, V_name
} else if ([parsedInput hasPrefix:@"T"]) { // Type here, T@"NSString"
if ( (BOOL)[[[parsedInput substringToIndex:2] substringFromIndex:1] isEqualToString: @"@"]) { // It's a NSObject
NSString *tmpPropertyType = [parsedInput substringFromIndex:1];
NSArray *propertyComponents = [tmpPropertyType componentsSeparatedByString:@"\""];
if ([propertyComponents count] > 1) {
NSString *component = (NSString *)[propertyComponents objectAtIndex:1];
component = [component stringByReplacingOccurrencesOfString:@"><" withString:@", "];
if ([component hasPrefix:@"<"]) {
propertyType = (NSString *)[@"id" stringByAppendingString:component];
NSString *formatted = [[component stringByReplacingOccurrencesOfString:@"<" withString:@""] stringByReplacingOccurrencesOfString:@">" withString:@""];
for (NSString *f in [formatted componentsSeparatedByString:@", "]) {
[exportedProtocolsSet addObject:f];
}
} else {
[exportedClassesSet addObject:component];
propertyType = (NSString *)[component stringByAppendingString:@"*"];
}
} else {
propertyType = @"id";
}
} else {
propertyType = argumentBlock([parsedInput substringFromIndex:1]);
}
}
}
[generatedPropertyString appendString:(NSString *)[(NSString *)[(NSString *)[(NSString *)[@") " stringByAppendingString:propertyType] stringByAppendingString:@" "] stringByAppendingString:name] stringByAppendingString:@";\n"]];
[generatedProperties appendString:generatedPropertyString];
[blackListMethodNames addObject:name];
}
NSMutableArray *tmpSetArray = [NSMutableArray array];
for (NSString *propertyName in [blackListMethodNames allObjects]) {
NSString *setter = (NSString *)[@"set" stringByAppendingString:(NSString *)[(NSString *)[(NSString *)[[propertyName substringToIndex:1] uppercaseString] stringByAppendingString:[propertyName substringFromIndex:1]] stringByAppendingString:@":"]];
[tmpSetArray addObject:setter];
}
[blackListMethodNames addObjectsFromArray:tmpSetArray];
NSString *(^generateMethodsForClass)(Class) = ^(Class cls) {
NSMutableString* generatedMethods = [NSMutableString stringWithString:@""];
unsigned int classCount = 0;
Method *methods = (Method *)class_copyMethodList(cls, &classCount);
NSString *classOrInstanceStart = (BOOL)class_isMetaClass(cls) ? @"+" : @"-";
for (int i = 0; i < classCount; i++) {
Method m = methods[i];
NSString *methodName = NSStringFromSelector((char *)method_getName(m));
if ([blackListMethodNames containsObject:methodName]) {
continue;
}
NSMutableString *generatedMethodString = [NSMutableString stringWithString:classOrInstanceStart];
char *retType = (char *)method_copyReturnType(m);
NSString *retTypeString = [NSString stringWithUTF8String:retType];
free(retType);
unsigned int arguments = (unsigned int)method_getNumberOfArguments(m);
[generatedMethodString appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(retTypeString)] stringByAppendingString:@")"]];
NSArray *methodComponents = [methodName componentsSeparatedByString:@":"];
NSMutableString *realizedMethod = [NSMutableString stringWithString:@""];
for (int j = 2; j < arguments; j++) { // id, sel, always
int index = j - 2;
[realizedMethod appendString:(NSString *)[methodComponents[index] stringByAppendingString:@":"]];
char *argumentType = (char *)method_copyArgumentType(m, j);
NSString *argumentTypeString = [NSString stringWithUTF8String:argumentType];
free(argumentType);
[realizedMethod appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(argumentTypeString)] stringByAppendingString:@")"]];
[realizedMethod appendString:@"arg"];
[realizedMethod appendString:[@(index) stringValue]];
[realizedMethod appendString:@" "];
}
[generatedMethodString appendString:realizedMethod];
if (arguments == 2) {
[generatedMethodString appendString:methodName];
}
[generatedMethods appendString:(NSString *)[generatedMethodString stringByAppendingString:@";\n"]];
}
free(methods);
return generatedMethods;
};
// Instance Methods
NSString *generatedInstanceMethods = generateMethodsForClass((Class)cls);
// Class Methods
Class metaClass = (Class)objc_getMetaClass((char *)class_getName(cls));
NSString *generatedClassMethods = generateMethodsForClass(metaClass);
NSMutableString *finalString = [NSMutableString string];
[finalString appendString:@"#import <Foundation/Foundation.h>\n\n"];
if ([exportedClassesSet count] > 0) {
NSMutableString *importString = [NSMutableString string];
[importString appendString:@"@class "];
for (NSString *str in [exportedClassesSet allObjects]) {
[importString appendString:str];
[importString appendString:@", "];
}
[importString appendString:@";"];
NSString *finalImport = [importString stringByReplacingOccurrencesOfString:@", ;" withString:@";\n\n"];
[finalString appendString:finalImport];
}
if ([exportedProtocolsSet count] > 0) {
NSMutableString *importString = [NSMutableString string];
[importString appendString:@"@protocol "];
for (NSString *str in [exportedProtocolsSet allObjects]) {
[importString appendString:str];
[importString appendString:@", "];
}
[importString appendString:@";"];
NSString *finalImport = [importString stringByReplacingOccurrencesOfString:@", ;" withString:@";\n\n"];
[finalString appendString:finalImport];
}'''
if options.generate_protocol:
script += r'''
[finalString appendString:@"\n@protocol DS_"];
[finalString appendString:(NSString *)[cls description]];
[finalString appendString:@"Protocol <NSObject>"];'''
else:
script += r'''
[finalString appendString:@"\n@interface "];
[finalString appendString:(NSString *)[cls description]];
[finalString appendString:@" : "];
[finalString appendString:(NSString *)[[cls superclass] description]];'''
script += r'''
[finalString appendString:@"\n\n"];
[finalString appendString:generatedProperties];
[finalString appendString:@"\n"];
[finalString appendString:generatedClassMethods];
[finalString appendString:generatedInstanceMethods];
[finalString appendString:@"\n@end"];
[returnString appendString:finalString];
// Free stuff
free(properties);
returnString;
'''
return script
def generate_module_header_script(options, modulePath):
script = r'''@import @ObjectiveC;
//Dang it. LLDB JIT Doesn't like NSString stringWithFormat on device. Need to use stringByAppendingString instead
// Runtime declarations in case we're running on a stripped executable
typedef struct objc_method *Method;
typedef struct objc_ivar *Ivar;
// typedef struct objc_category *Category;
typedef struct objc_property *objc_property_t;
NSMutableString *returnString = [NSMutableString string];
[returnString appendString:@"''' + modulePath + r'''\n************************************************************\n"];
// Properties
NSMutableSet *exportedClassesSet = [NSMutableSet set];
NSMutableSet *exportedProtocolsSet = [NSMutableSet set];
unsigned int count = 0;
const char **allClasses = (const char **)objc_copyClassNamesForImage("''' + modulePath + r'''", &count);
NSMutableDictionary *returnDict = [NSMutableDictionary dictionaryWithCapacity:count];
for (int i = 0; i < count; i++) {
Class cls = objc_getClass(allClasses[i]);
NSMutableString *generatedProperties = [NSMutableString string];
NSMutableSet *blackListMethodNames = [NSMutableSet set];
[blackListMethodNames addObjectsFromArray:@[@".cxx_destruct", @"dealloc"]];
unsigned int propertyCount = 0;
objc_property_t *properties = (objc_property_t *)class_copyPropertyList(cls, &propertyCount);
NSCharacterSet *charSet = [NSCharacterSet characterSetWithCharactersInString:@","];
NSString *(^argumentBlock)(NSString *) = ^(NSString *arg) {
if ([arg isEqualToString:@"@"]) {
return @"id";
} else if ([arg isEqualToString:@"v"]) {
return @"void";
} else if ([arg hasPrefix:@"{CGRect"]) {
return @"CGRect";
} else if ([arg hasPrefix:@"{CGPoint"]) {
return @"CGPoint";
} else if ([arg hasPrefix:@"{CGSize"]) {
return @"CGSize";
} else if ([arg isEqualToString:@"q"]) {
return @"NSInteger";
} else if ([arg isEqualToString:@"B"]) {
return @"BOOL";
} else if ([arg isEqualToString:@":"]) {
return @"SEL";
} else if ([arg isEqualToString:@"d"]) {
return @"CGFloat";
} else if ([arg isEqualToString:@"@?"]) { // A block?
return @"id";
}
return @"void *";
};
NSMutableSet *blackListPropertyNames = [NSMutableSet setWithArray:@[@"hash", @"superclass", @"class", @"description", @"debugDescription"]];
for (int i = 0; i < propertyCount; i++) {
objc_property_t property = properties[i];
NSString *attributes = [NSString stringWithUTF8String:(char *)property_getAttributes(property)];
NSString *name = [NSString stringWithUTF8String:(char *)property_getName(property)];
if ([blackListPropertyNames containsObject:name]) {
continue;
}
NSMutableString *generatedPropertyString = [NSMutableString stringWithString:@"@property ("];
NSScanner *scanner = [[NSScanner alloc] initWithString:attributes];
[scanner setCharactersToBeSkipped:charSet];
BOOL multipleOptions = 0;
NSString *propertyType;
NSString *parsedInput;
while ([scanner scanUpToCharactersFromSet:charSet intoString:&parsedInput]) {
if ([parsedInput isEqualToString:@"N"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"nonatomic"];
multipleOptions = 1;
} else if ([parsedInput isEqualToString:@"W"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"weak"];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"G"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:(NSString *)@"getter="];
[generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]];
[blackListMethodNames addObject:[parsedInput substringFromIndex:1]];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"S"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:(NSString *)@"setter="];
[generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]];
[blackListMethodNames addObject:[parsedInput substringFromIndex:1]];
multipleOptions = 1;
} else if ([parsedInput isEqualToString:@"&"]) {
if (multipleOptions) {
[generatedPropertyString appendString:@", "];
}
[generatedPropertyString appendString:@"strong"];
multipleOptions = 1;
} else if ([parsedInput hasPrefix:@"V"]) { // ivar name here, V_name
} else if ([parsedInput hasPrefix:@"T"]) { // Type here, T@"NSString"
if ( (BOOL)[[[parsedInput substringToIndex:2] substringFromIndex:1] isEqualToString: @"@"]) { // It's a NSObject
NSString *tmpPropertyType = [parsedInput substringFromIndex:1];
NSArray *propertyComponents = [tmpPropertyType componentsSeparatedByString:@"\""];
if ([propertyComponents count] > 1) {
NSString *component = (NSString *)[propertyComponents objectAtIndex:1];
component = [component stringByReplacingOccurrencesOfString:@"><" withString:@", "];
if ([component hasPrefix:@"<"]) {
propertyType = (NSString *)[@"id" stringByAppendingString:component];
NSString *formatted = [[component stringByReplacingOccurrencesOfString:@"<" withString:@""] stringByReplacingOccurrencesOfString:@">" withString:@""];
for (NSString *f in [formatted componentsSeparatedByString:@", "]) {
[exportedProtocolsSet addObject:f];
}
} else {
[exportedClassesSet addObject:component];
propertyType = (NSString *)[component stringByAppendingString:@"*"];
}
} else {
propertyType = @"id";
}
} else {
propertyType = argumentBlock([parsedInput substringFromIndex:1]);
}
}
}
[generatedPropertyString appendString:(NSString *)[(NSString *)[(NSString *)[(NSString *)[@") " stringByAppendingString:propertyType] stringByAppendingString:@" "] stringByAppendingString:name] stringByAppendingString:@";\n"]];
[generatedProperties appendString:generatedPropertyString];
[blackListMethodNames addObject:name];
}
NSMutableArray *tmpSetArray = [NSMutableArray array];
for (NSString *propertyName in [blackListMethodNames allObjects]) {
NSString *setter = (NSString *)[@"set" stringByAppendingString:(NSString *)[(NSString *)[(NSString *)[[propertyName substringToIndex:1] uppercaseString] stringByAppendingString:[propertyName substringFromIndex:1]] stringByAppendingString:@":"]];
[tmpSetArray addObject:setter];
}
[blackListMethodNames addObjectsFromArray:tmpSetArray];
NSString *(^generateMethodsForClass)(Class) = ^(Class cls) {
NSMutableString* generatedMethods = [NSMutableString stringWithString:@""];
unsigned int classCount = 0;
Method *methods = (Method *)class_copyMethodList(cls, &classCount);
NSString *classOrInstanceStart = (BOOL)class_isMetaClass(cls) ? @"+" : @"-";
for (int i = 0; i < classCount; i++) {
Method m = methods[i];
NSString *methodName = NSStringFromSelector((char *)method_getName(m));
if ([blackListMethodNames containsObject:methodName]) {
continue;
}
NSMutableString *generatedMethodString = [NSMutableString stringWithString:classOrInstanceStart];
char *retType = (char *)method_copyReturnType(m);
NSString *retTypeString = [NSString stringWithUTF8String:retType];
free(retType);
unsigned int arguments = (unsigned int)method_getNumberOfArguments(m);
[generatedMethodString appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(retTypeString)] stringByAppendingString:@")"]];
NSArray *methodComponents = [methodName componentsSeparatedByString:@":"];
NSMutableString *realizedMethod = [NSMutableString stringWithString:@""];
for (int j = 2; j < arguments; j++) { // id, sel, always
int index = j - 2;
[realizedMethod appendString:(NSString *)[methodComponents[index] stringByAppendingString:@":"]];
char *argumentType = (char *)method_copyArgumentType(m, j);
NSString *argumentTypeString = [NSString stringWithUTF8String:argumentType];
free(argumentType);
[realizedMethod appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(argumentTypeString)] stringByAppendingString:@")"]];
[realizedMethod appendString:@"arg"];
[realizedMethod appendString:[@(index) stringValue]];
[realizedMethod appendString:@" "];
}
[generatedMethodString appendString:realizedMethod];
if (arguments == 2) {
[generatedMethodString appendString:methodName];
}
[generatedMethods appendString:(NSString *)[generatedMethodString stringByAppendingString:@";\n"]];
}
free(methods);
return generatedMethods;
};
// Instance Methods
NSString *generatedInstanceMethods = generateMethodsForClass((Class)cls);
// Class Methods
Class metaClass = (Class)objc_getMetaClass((char *)class_getName(cls));
NSString *generatedClassMethods = generateMethodsForClass(metaClass);
NSMutableString *finalString = [NSMutableString string];
[finalString appendString:(NSString *)[cls description]];
[finalString appendString:@" : "];
[finalString appendString:(NSString *)[[cls superclass] description]];
[finalString appendString:(NSString *)[[[generatedProperties componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]];
[finalString appendString:(NSString *)[[[[generatedClassMethods stringByReplacingOccurrencesOfString:@" ;" withString:@";"] componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]];
[finalString appendString:(NSString *)[[[[generatedInstanceMethods stringByReplacingOccurrencesOfString:@" ;" withString:@";"] componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]];
[finalString appendString:@"\n************************************************************\n"];
[returnDict setObject:(id _Nonnull)finalString forKey:(id _Nonnull)[cls description]];
// Free stuff
free(properties);
}
NSArray *sortedKeys = [[returnDict allKeys] sortedArrayUsingSelector: @selector(compare:)];
NSMutableArray *sortedValues = [NSMutableArray array];
for (NSString *key in sortedKeys) {
[returnString appendString:(NSString *)[returnDict objectForKey:key]];
}
returnString;
'''
return script
def generate_class_info(options):
if options.verbose_info and not options.info:
options.info = options.verbose_info
verboseOutput = True
else:
verboseOutput = False
if '.' in options.info:
classInfo = "(Class)NSClassFromString(@\"" + options.info + "\")"
else:
classInfo = "[" + options.info + " class]"
script = "BOOL verboseOutput = {};\n".format("YES" if verboseOutput else "NO")
script += r'''
@import Foundation;
@import ObjectiveC;
#define RO_META (1<<0)
// class is a root class
#define RO_ROOT (1<<1)
// class has .cxx_construct/destruct implementations
#define RO_HAS_CXX_STRUCTORS (1<<2)
// class has +load implementation
// #define RO_HAS_LOAD_METHOD (1<<3)
// class has visibility=hidden set
#define RO_HIDDEN (1<<4)
// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak
#define RO_EXCEPTION (1<<5)
// this bit is available for reassignment
// #define RO_REUSE_ME (1<<6)
// class compiled with ARC
#define RO_IS_ARC (1<<7)
// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
#define RO_HAS_CXX_DTOR_ONLY (1<<8)
// class is not ARC but has ARC-style weak ivar layout
#define RO_HAS_WEAK_WITHOUT_ARC (1<<9)
// class is in an unloadable bundle - must never be set by compiler
#define RO_FROM_BUNDLE (1<<29)
// class is unrealized future class - must never be set by compiler
#define RO_FUTURE (1<<30)
// class is realized - must never be set by compiler
#define RO_REALIZED (1<<31)
// Values for class_rw_t->flags
// These are not emitted by the compiler and are never used in class_ro_t.
// Their presence should be considered in future ABI versions.
// class_t->data is class_rw_t, not class_ro_t
#define RW_REALIZED (1<<31)
// class is unresolved future class
#define RW_FUTURE (1<<30)
// class is initialized
#define RW_INITIALIZED (1<<29)
// class is initializing
#define RW_INITIALIZING (1<<28)
// class_rw_t->ro is heap copy of class_ro_t
#define RW_COPIED_RO (1<<27)
// class allocated but not yet registered
#define RW_CONSTRUCTING (1<<26)
// class allocated and registered
#define RW_CONSTRUCTED (1<<25)
// available for use; was RW_FINALIZE_ON_MAIN_THREAD
// #define RW_24 (1<<24)
// class +load has been called
#define RW_LOADED (1<<23)
#if !SUPPORT_NONPOINTER_ISA
// class instances may have associative references
#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22)
#endif
// class has instance-specific GC layout
#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
// available for use
// #define RW_20 (1<<20)
// class has started realizing but not yet completed it
#define RW_REALIZING (1<<19)
// NOTE: MORE RW_ FLAGS DEFINED BELOW
// Values for class_rw_t->flags or class_t->bits
// These flags are optimized for retain/release and alloc/dealloc
// 64-bit stores more of them in class_t->bits to reduce pointer indirection.
#if !__LP64__
// class or superclass has .cxx_construct implementation
#define RW_HAS_CXX_CTOR (1<<18)
// class or superclass has .cxx_destruct implementation
#define RW_HAS_CXX_DTOR (1<<17)
// class or superclass has default alloc/allocWithZone: implementation
// Note this is is stored in the metaclass.
#define RW_HAS_DEFAULT_AWZ (1<<16)
// class's instances requires raw isa
#if SUPPORT_NONPOINTER_ISA
#define RW_REQUIRES_RAW_ISA (1<<15)
#endif
// class is a Swift class
#define FAST_IS_SWIFT (1UL<<0)
// class or superclass has default retain/release/autorelease/retainCount/
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
#define FAST_HAS_DEFAULT_RR (1UL<<1)
// data pointer
#define FAST_DATA_MASK 0xfffffffcUL
#elif 1
// Leaks-compatible version that steals low bits only.
// class or superclass has .cxx_construct implementation
#define RW_HAS_CXX_CTOR (1<<18)
// class or superclass has .cxx_destruct implementation
#define RW_HAS_CXX_DTOR (1<<17)
// class or superclass has default alloc/allocWithZone: implementation
// Note this is is stored in the metaclass.
#define RW_HAS_DEFAULT_AWZ (1<<16)
// class is a Swift class
#define FAST_IS_SWIFT (1UL<<0)
// class or superclass has default retain/release/autorelease/retainCount/
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
#define FAST_HAS_DEFAULT_RR (1UL<<1)
// class's instances requires raw isa
#define FAST_REQUIRES_RAW_ISA (1UL<<2)
// data pointer
#define FAST_DATA_MASK 0x00007ffffffffff8UL
#else
// Leaks-incompatible version that steals lots of bits.
// class is a Swift class
#define FAST_IS_SWIFT (1UL<<0)
// class's instances requires raw isa
#define FAST_REQUIRES_RAW_ISA (1UL<<1)
// class or superclass has .cxx_destruct implementation
// This bit is aligned with isa_t->hasCxxDtor to save an instruction.
#define FAST_HAS_CXX_DTOR (1UL<<2)
// data pointer
#define FAST_DATA_MASK 0x00007ffffffffff8UL
// class or superclass has .cxx_construct implementation
#define FAST_HAS_CXX_CTOR (1UL<<47)
// class or superclass has default alloc/allocWithZone: implementation
// Note this is is stored in the metaclass.
#define FAST_HAS_DEFAULT_AWZ (1UL<<48)
// class or superclass has default retain/release/autorelease/retainCount/
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference
#define FAST_HAS_DEFAULT_RR (1UL<<49)
// summary bit for fast alloc path: !hasCxxCtor and
// !instancesRequireRawIsa and instanceSize fits into shiftedSize
#define FAST_ALLOC (1UL<<50)
// instance size in units of 16 bytes
// or 0 if the instance size is too big in this field
// This field must be LAST
#define FAST_SHIFTED_SIZE_SHIFT 51
// FAST_ALLOC means
// FAST_HAS_CXX_CTOR is set
// FAST_REQUIRES_RAW_ISA is not set
// FAST_SHIFTED_SIZE is not zero
// FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that
// bit is stored on the metaclass.
#define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA)
#define FAST_ALLOC_VALUE (0)
#endif
#ifndef _DLFCN_H_
typedef struct dl_info {
const char *dli_fname; /* Pathname of shared object */
void *dli_fbase; /* Base address of shared object */
const char *dli_sname; /* Name of nearest symbol */
void *dli_saddr; /* Address of nearest symbol */
} Dl_info;
#endif // _DLFCN_H_
//*****************************************************************************/
#pragma mark - Methods
//*****************************************************************************/
typedef struct method_t {
char * name;
const char *types;
IMP imp;
} method_t;
typedef struct method_list_t {
uint32_t entsizeAndFlags;
uint32_t count;
method_t *first;
} method_list_t;
typedef struct method_array_t {
uint32_t count;
method_list_t *methods;
} method_array_t;
//*****************************************************************************/
#pragma mark - Ivars
//*****************************************************************************/
typedef struct ivar_t {
#if __x86_64__
// *offset was originally 64-bit on some x86_64 platforms.
// We read and write only 32 bits of it.
// Some metadata provides all 64 bits. This is harmless for unsigned
// little-endian values.
// Some code uses all 64 bits. class_addIvar() over-allocates the
// offset for their benefit.
#endif
int32_t *offset;
const char *name;
const char *type;
// alignment is sometimes -1; use alignment() instead
uint32_t alignment_raw;
uint32_t size;
} ivar_t;
typedef struct ivar_list_t {
uint32_t entsizeAndFlags;
uint32_t count;
ivar_t *first;
} ivar_list_t;
//*****************************************************************************/
#pragma mark - Properties
//*****************************************************************************/
typedef struct property_t {
const char *name;
const char *attributes;
} property_t;
typedef struct property_list_t {
uint32_t entsizeAndFlags;
uint32_t count;
property_t *first;
} property_list_t;
typedef struct property_array_t {
uint32_t count;
property_list_t *properties;
} property_array_t;
//*****************************************************************************/
#pragma mark - Protocols
//*****************************************************************************/
typedef struct dsprotocol_t {
uint32_t flags;
uint32_t version;
const char *name;
// struct protocol_list_t *protocols;
// method_list_t *instanceMethods;
// method_list_t *classMethods;
// method_list_t *optionalInstanceMethods;
// method_list_t *optionalClassMethods;
// property_list_t *instanceProperties;
// uint32_t size; // sizeof(protocol_t)
// uint32_t flags;
// // Fields below this point are not always present on disk.
// const char **_extendedMethodTypes;
// const char *_demangledName;
// property_list_t *_classProperties;
} dsprotocol_t;
typedef struct protocol_list_t {
uintptr_t count;
dsprotocol_t *first;
} protocol_list_t;
typedef struct protocol_array_t {
uint32_t count;
protocol_list_t *protocols;
} protocol_array_t;
//*****************************************************************************/
#pragma mark - Categories
//*****************************************************************************/
typedef struct class_ro_t {
uint32_t flags;
uint32_t instanceStart;
uint32_t instanceSize;
#ifdef __LP64__
uint32_t reserved;
#endif
const uint8_t * ivarLayout;
const char * name;
method_list_t * baseMethodList;
protocol_list_t * baseProtocols;
ivar_list_t * ivars;
uint8_t * weakIvarLayout;
property_list_t *baseProperties;
} class_ro_t;
typedef struct class_rw_t {
uint32_t flags;
uint32_t version;
const class_ro_t *ro;
method_array_t methods; // redefined from method_array_t
property_array_t properties; // redefined from property_array_t
protocol_list_t protocols; // redefined from protocol_array_t
struct dsobjc_class* firstSubclass;
struct dsobjc_class* nextSiblingClass;
char *demangledName;
} class_rw_t;
typedef struct dsobjc_class {
struct dsobjc_class* isa;
struct dsobjc_class* superclass;
void *_buckets; // formerly cache pointer and vtable
uint32_t _mask;
uint32_t _occupied;
uintptr_t bits;
class_rw_t *ds_data() {
return (class_rw_t *)(bits & FAST_DATA_MASK);
}
} dsobjc_class;
typedef struct dsswift_class {
struct dsobjc_class *isa;
struct dsobjc_class *superclass;
void *_buckets;
void *maskAndOccupied;
uintptr_t bits;
uint32_t flags;
uint32_t instanceAddressPoint;
uint32_t instanceSize;
uint16_t instanceAlignMask;
uint16_t runtimeReservedBits;
uint32_t classSize;
uint32_t classAddressPoint;
uintptr_t typeDescriptor;
uintptr_t ivarDestroyer;
uintptr_t *methods;
class_rw_t *ds_data() {
return (class_rw_t *)(bits & FAST_DATA_MASK);
}
} dsswift_class;
dsobjc_class *dsclass = (dsobjc_class*)''' + classInfo + r''';
dsobjc_class *dsclass_meta = (dsobjc_class*)object_getClass((Class)dsclass);
uint32_t roflags = dsclass->ds_data()->ro->flags;
uint32_t rwflags = dsclass->ds_data()->flags;
const char* name = dsclass->ds_data()->ro->name;
const char* superclassName = dsclass->superclass ? dsclass->superclass->ds_data()->ro->name : nil;
property_list_t *bprops = dsclass->ds_data()->ro->baseProperties;
protocol_list_t *bprot = dsclass->ds_data()->ro->baseProtocols;
method_list_t *bmeth = dsclass->ds_data()->ro->baseMethodList;
ivar_list_t *bivar = dsclass->ds_data()->ro->ivars;
NSMutableString *returnString = [NSMutableString new];
if (verboseOutput) {
[returnString appendString:@"\n******************************************\n"];
[returnString appendString:@" "];
[returnString appendString:[NSString stringWithUTF8String:(char *)name]];
if (superclassName && (roflags & RO_META)) {
[returnString appendString:@" : (META)"];
} else if (superclassName) {
[returnString appendString:@" : "];
[returnString appendString:[NSString stringWithUTF8String:(char *)superclassName]];
}
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" (%p)", dsclass]];
[returnString appendString:@"\n******************************************\n\n"];
[returnString appendString:@"Found in: "];
[returnString appendString:[NSString stringWithUTF8String:(char *)class_getImageName((Class)dsclass)]];
[returnString appendString:@"\n\n"];
[returnString appendString:@"Swift:\t\t\t"];
[returnString appendString:dsclass->bits & FAST_IS_SWIFT ? @"YES\n" : @"NO\n" ];
[returnString appendString:@"Size:\t\t\t"];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"0x%x bytes", dsclass->ds_data()->ro->instanceSize]];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\nInstance Start:\t0x%x", dsclass->ds_data()->ro->instanceStart]];
[returnString appendString:@"\nMeta:\t\t\t"];
[returnString appendString:(BOOL)class_isMetaClass((Class)dsclass) ? @"YES" : @"NO"];;
[returnString appendString:@"\n\n"];
///////////////////////////////////////////////////////////////////
[returnString appendString:@"Protocols: "];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t\t%d\t%p\n", bprot ? bprot->count : 0, bprot ? &bprot->first : 0]];
[returnString appendString:@"Ivars: "];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t\t\t%d\t%p\n", bivar ? bivar->count : 0, bivar ? &bivar->first : 0]];
[returnString appendString:@"Properties: "];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", bprops ? bprops->count : 0, bprops ? &bprops->first : 0]];
if (!(roflags & RO_META)) {
[returnString appendString:@"I ObjC Meth: "];
} else {
[returnString appendString:@"C ObjC Meth: "];
}
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", bmeth ? bmeth->count : 0, bmeth ? &bmeth->first : 0]];
if (!(roflags & RO_META) && NSClassFromString(@"UIView") && dsclass_meta) { // Cocoa's isa layout is different?
method_list_t *classmeth = dsclass_meta->ds_data()->ro->baseMethodList;
[returnString appendString:@"C ObjC Meth: "];
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", classmeth ? classmeth->count : 0, classmeth ? &classmeth->first : 0]];
}
///////////////////////////////////////////////////////////////////
[returnString appendString:@"\nRW Flags:\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_REALIZED) ? @"1" : @"0"];
[returnString appendString:@"\tRW_REALIZED\t\t\tclass is realized\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_FUTURE) ? @"1" : @"0"];
[returnString appendString:@"\tRW_FUTURE\t\t\tclass is unresolved future class\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_INITIALIZED) ? @"1" : @"0"];
[returnString appendString:@"\tRW_INITIALIZED\t\tclass is initialized\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_INITIALIZING) ? @"1" : @"0"];
[returnString appendString:@"\tRW_INITIALIZING\t\tclass is initializing\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_COPIED_RO) ? @"1" : @"0"];
[returnString appendString:@"\tRW_COPIED_RO\t\tclass_rw_t->ro is heap copy of class_ro_t\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_CONSTRUCTING) ? @"1" : @"0"];
[returnString appendString:@"\tRW_CONSTRUCTING\t\tclass allocated but not yet registered\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_CONSTRUCTED) ? @"1" : @"0"];
[returnString appendString:@"\tRW_CONSTRUCTED\t\tclass allocated and registered\n"];
[returnString appendString:@" "];
[returnString appendString:(rwflags & RW_LOADED) ? @"1" : @"0"];
[returnString appendString:@"\tRW_LOADED\t\t\tclass +load has been called\n"];
/////////////////////////////////////////////////////////////////////
[returnString appendString:@"\nRO Flags:\n"];
[returnString appendString:@" "];
[returnString appendString:(roflags & RO_META) ? @"1" : @"0"];
[returnString appendString:@"\tRO_META\t\t\t\tclass is a metaclass\n"];
[returnString appendString:@" "];
[returnString appendString: roflags & RO_ROOT ? @"1" : @"0"];
[returnString appendString:@"\tRO_ROOT\t\t\t\tclass is a root class\n"];
[returnString appendString:@" "];
[returnString appendString: roflags & RO_HAS_CXX_STRUCTORS ? @"1" : @"0"];
[returnString appendString:@"\tRO_HAS_CXX_STRUCTORS\tclass has .cxx_construct/destruct implementations\n"];
[returnString appendString:@" "];
[returnString appendString: roflags & RO_HIDDEN ? @"1": @"0"];
[returnString appendString:@"\tRO_HIDDEN\t\t\t\tclass has visibility=hidden set\n"];
[returnString appendString:@" "];
[returnString appendString:roflags & RO_EXCEPTION ? @"1" : @"0"];
[returnString appendString:@"\tRO_EXCEPTION\t\t\tclass has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak\n"];
[returnString appendString:@" "];
[returnString appendString:roflags & RO_IS_ARC ? @"1" : @"0"];
[returnString appendString:@"\tRO_IS_ARC\t\t\t\tclass compiled with ARC\n"];
[returnString appendString:@" "];
[returnString appendString:roflags & RO_HAS_CXX_DTOR_ONLY ? @"1" : @"0"];
[returnString appendString:@"\tRO_HAS_CXX_DTOR_ONLY\tclass has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)\n"];
[returnString appendString:@" "];
[returnString appendString:roflags & RO_HAS_WEAK_WITHOUT_ARC ? @"1" : @"0"];
[returnString appendString:@"\tRO_HAS_WEAK_WITHOUT_ARC\tclass is not ARC but has ARC-style weak ivar layout\n"];
[returnString appendString:@" "];
[returnString appendString:roflags & RO_FROM_BUNDLE ? @"1" : @"0"];
[returnString appendString:@"\tRO_FROM_BUNDLE\t\tclass is in an unloadable bundle - must never be set by compiler\n"];
[returnString appendString:@" "];
[returnString appendFormat:roflags & RO_FUTURE ? @"1" : @"0"];
[returnString appendFormat:@"\tRO_FUTURE\t\t\tclass is unrealized future class - must never be set by compiler\n"];
[returnString appendString:@" "];
[returnString appendFormat:roflags & RO_REALIZED ? @"1" : @"0"];
[returnString appendFormat:@"\tRO_REALIZED\t\t\tclass is realized - must never be set by compiler\n"];
}
[returnString appendFormat:@"\n@interface "];
[returnString appendString:[NSString stringWithUTF8String:(char *)name]];
[returnString appendString:@" : "];
if (superclassName) {
[returnString appendString:[NSString stringWithUTF8String:(char *)superclassName]];
}
if (bprot) {
[returnString appendString:@" <"];
for (int i = 0; i < bprot->count; i++) {
dsprotocol_t **pp = (&bprot->first);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"%s", pp[i]->name]];
if (i < (bprot->count - 1)) {
[returnString appendString:@", "];
}
}
[returnString appendString:@">"];
}
[returnString appendString:@"\n{\n"];
if (bivar) {
for (int i = 0; i < bivar->count; i++) {
ivar_t *dsiv = (ivar_t *)(&bivar->first);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %20s %-30s; offset 0x%x, 0x%x\n", (char *)dsiv[i].type, (char *)dsiv[i].name, *(int32_t *)dsiv[i].offset, (int)dsiv[i].size]];
}
}
[returnString appendString:@"}\n\n"];
if (bprops) {
for (int i = 0; i < bprops->count; i++) {
property_t *dsiv = (property_t *)(&bprops->first);
[returnString appendString:@"@property "];
[returnString appendString:[NSString stringWithUTF8String:(char *)dsiv[i].attributes]];
[returnString appendString:@" *"];
[returnString appendString:[NSString stringWithUTF8String:(char *)dsiv[i].name]];
[returnString appendString:@"\n"];
}
}
[returnString appendString:@"\n"];
if (bmeth) {
for (int i = 0; i < bmeth->count; i++) {
NSString *methodType = (BOOL)class_isMetaClass((Class)dsclass) ? @"+" : @"-";
method_t *mt = (method_t*)(&bmeth->first);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %s%40s %p\n", [methodType UTF8String], mt[i].name, mt[i].imp]];
}
if ((BOOL)class_isMetaClass((Class)dsclass) == NO) {
dsobjc_class* dsmetaclass = (dsobjc_class*)objc_getMetaClass(name);
method_list_t *bmetameth = dsmetaclass->ds_data()->ro->baseMethodList;
if (bmetameth) {
for (int i = 0; i < bmetameth->count; i++) {
NSString *methodType = (BOOL)class_isMetaClass((Class)dsmetaclass) ? @"+" : @"-";
method_t *mt = (method_t*)(&bmetameth->first);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %s%40s %p\n", [methodType UTF8String], mt[i].name, mt[i].imp]];
}
}
}
}
if (!(roflags & RO_META) && NSClassFromString(@"UIView") && dsclass_meta) { // Cocoa's isa is different? TODO
method_list_t *classmeth = dsclass_meta->ds_data()->ro->baseMethodList;
if (classmeth) {
for (int i = 0; i < classmeth->count; i++) {
method_t *mt = (method_t*)(&classmeth->first);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" +%40s %p\n", mt[i].name, mt[i].imp]];
}
}
}
if (dsclass->bits & FAST_IS_SWIFT) {
dsswift_class *dsswiftcls = (dsswift_class*)dsclass;
unsigned long methodsAddress = (unsigned long)&dsswiftcls->methods;
unsigned long endAddress = (unsigned long)dsswiftcls + dsswiftcls->classSize - dsswiftcls->classAddressPoint;
int methodCount = ((int)(endAddress - methodsAddress)) / sizeof(uintptr_t*);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"Swift methods: %d\n", methodCount]];
for (int i = 0; i < methodCount; i++) {
uintptr_t * ptr = (uintptr_t*)methodsAddress;
Dl_info dsinfo = {};
dladdr((void*)ptr[i], (Dl_info *)&dsinfo);
[returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"(%p) %s\n", ptr[i], dsinfo.dli_sname]];
}
}
[returnString appendString:@"\n"];
[returnString appendString:@"@end\n"];
returnString;
'''
return script
def generate_option_parser():
usage = "usage: %prog [options] /optional/path/to/executable/or/bundle"
parser = optparse.OptionParser(usage=usage, prog="dump_classes")
parser.add_option("-f", "--filter",
action="store",
default=None,
dest="filter",
help="List all the classes in the module that are subclasses of class. -f UIView")
parser.add_option("-m", "--module",
action="store",
default=None,
dest="module",
help="Filter class by module. You only need to give the module name and not fullpath")
parser.add_option("-r", "--regular_expression",
action="store",
default=None,
dest="regular_expression",
help="Search the available classes using a regular expression search")
parser.add_option("-t", "--class_type",
action="store",
default=None,
dest="class_type",
help="Specifies the class type, only supports \"objc\" or \"swift\"")
parser.add_option("-v", "--verbose",
action="store_true",
default=False,
dest="verbose",
help="Enables verbose mode for dumping classes. Doesn't work w/ -g or -p")
parser.add_option("-g", "--generate_header",
action="store_true",
default=False,
dest="generate_header",
help="Generate a header for the specified class. -h UIView")
parser.add_option("-P", "--generate_protocol",
action="store_true",
default=False,
dest="generate_protocol",
help="Generate a protocol that you can cast to any object")
parser.add_option("-o", "--dump_code_output",
action="store_true",
default=False,
dest="dump_code_output",
help="Dump all classes and code per module, use \"__all\" to dump all ObjC modules known to proc")
parser.add_option("-l", "--search_protocols",
action="store_true",
default=False,
dest="search_protocols",
help="Search for protocols instead of ObjC classes")
parser.add_option("-p", "--conforms_to_protocol",
action="store",
default=None,
dest="conforms_to_protocol",
help="Only returns the classes that conforms to a particular protocol")
parser.add_option("-s", "--superclass",
action="store",
default=None,
dest="superclass",
help="Returns only if the parent class is of type")
parser.add_option("-i", "--info",
action="store",
default=None,
dest="info",
help="Get the info about a Objectie-C class, i.e. dclass -i UIViewController")
parser.add_option("-I", "--verbose_info",
action="store",
default=None,
dest="verbose_info",
help="Get the info about a Objectie-C class, i.e. dclass -i UIViewController")
return parser
| 55,689 | 0 | 161 |
221f73105bd9b503a9d81c9f68e5ebf08e4a3a89 | 2,362 | py | Python | src/genconfig/log.py | klihub/gen-config | 29c1b5114247190ca1101bc22c53ac28997d627d | [
"BSD-3-Clause"
] | null | null | null | src/genconfig/log.py | klihub/gen-config | 29c1b5114247190ca1101bc22c53ac28997d627d | [
"BSD-3-Clause"
] | null | null | null | src/genconfig/log.py | klihub/gen-config | 29c1b5114247190ca1101bc22c53ac28997d627d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import re, inspect
set_mask(Logger.default_levels)
| 22.495238 | 71 | 0.588908 | #!/usr/bin/env python3
import re, inspect
class Logger:
LOG_FATAL = 0
LOG_ERROR = 1
LOG_WARNING = 2
LOG_PROGRESS = 3
LOG_INFO = 4
LOG_NOTE = 5
LOG_DEBUG = 6
default_levels = [LOG_WARNING, LOG_PROGRESS]
debug_contexts = []
log_prefix = {
LOG_FATAL: 'fatal error: ',
LOG_ERROR: 'E: ',
LOG_WARNING: 'W: ',
LOG_PROGRESS: '',
LOG_INFO: '',
LOG_NOTE: '',
LOG_DEBUG: 'D: '
}
log_unmaskable = (1 << LOG_FATAL) | (1 << LOG_ERROR)
log_mask = log_unmaskable | (1 << LOG_WARNING)
def __init__(self, levels = default_levels):
log_set_mask(levels)
def set_mask(levels):
Logger.log_mask |= Logger.log_unmaskable
if type(levels) == type([]):
for l in levels:
Logger.log_mask |= 1 << l
else:
Logger.log_mask |= 1 << levels
def log(level, msg):
if Logger.log_mask & (1 << level):
prefix = Logger.log_prefix[level]
print('%s%s' % (prefix, msg))
def fatal(self, msg):
log(LOG_FATAL, msg)
sys.exit(1)
def error(msg):
log(Logger.LOG_ERROR, msg)
def warning(msg):
log(Logger.LOG_WARNING, msg)
def progress(msg):
log(Logger.LOG_PROGRESS, msg)
def info(msg):
log(Logger.LOG_INFO, msg)
def note(msg):
log(Logger.LOG_NOTE, msg)
def debug_enable(contexts):
if type(contexts) == type(''):
contexts = re.sub(' *', '', contexts).split(',')
Logger.debug_contexts = list(set(Logger.debug_contexts + contexts))
set_mask(Logger.LOG_DEBUG)
def debug_enabled(contexts):
if not Logger.log_mask & (1 << Logger.LOG_DEBUG):
return False
if not contexts or not Logger.debug_contexts:
return True
if '*' in Logger.debug_contexts or 'all' in Logger.debug_contexts:
return True
for c in contexts:
if c in Logger.debug_contexts:
return True
return False
def debug(*args):
caller = inspect.stack()[1][3]
if caller == 'debug':
caller = inspect.stack()[2][3]
if type(args[0]) == type(''):
contexts = [caller]
msg = args[0]
else:
contexts = args[0]
msg = args[1]
contexts.insert(0, caller)
if debug_enabled(contexts):
log(Logger.LOG_DEBUG, '[%s] %s' % (caller, msg))
set_mask(Logger.default_levels)
| 1,447 | 550 | 284 |
7cac0f1cfe59dc8f4cf2a7aaab20ea3f74774e43 | 7,660 | py | Python | circus/plugins/__init__.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | 1 | 2019-06-13T15:59:34.000Z | 2019-06-13T15:59:34.000Z | circus/plugins/__init__.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | null | null | null | circus/plugins/__init__.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | null | null | null | """ Base class to create Circus subscribers plugins.
"""
import sys
import logging
import errno
import uuid
import argparse
from circus import zmq
from zmq.eventloop import ioloop, zmqstream
from zmq.utils.jsonapi import jsonmod as json
from circus import logger, __version__
from circus.client import make_message, cast_message
from circus.util import (debuglog, to_bool, resolve_name, close_on_exec,
LOG_LEVELS, LOG_FMT, LOG_DATE_FMT,
DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB,
get_connection)
class CircusPlugin(object):
"""Base class to write plugins.
Options:
- **context** -- the ZMQ context to use
- **endpoint** -- the circusd ZMQ endpoint
- **pubsub_endpoint** -- the circusd ZMQ pub/sub endpoint
- **check_delay** -- the configured check delay
- **config** -- free config mapping
"""
name = ''
@debuglog
@debuglog
@debuglog
def call(self, command, **props):
"""Sends to **circusd** the command.
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
Returns the JSON mapping sent back by **circusd**
"""
msg = make_message(command, **props)
self.client.send(json.dumps(msg))
msg = self.client.recv()
return json.loads(msg)
def cast(self, command, **props):
"""Fire-and-forget a command to **circusd**
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
"""
msg = cast_message(command, **props)
self.client.send(json.dumps(msg))
#
# methods to override.
#
def handle_recv(self, data):
"""Receives every event published by **circusd**
Options:
- **data** -- a tuple containing the topic and the message.
"""
raise NotImplementedError()
def handle_stop(self):
"""Called right before the plugin is stopped by Circus.
"""
pass
def handle_init(self):
"""Called right befor a plugin is started - in the thread context.
"""
pass
if __name__ == '__main__':
main()
| 29.236641 | 79 | 0.570496 | """ Base class to create Circus subscribers plugins.
"""
import sys
import logging
import errno
import uuid
import argparse
from circus import zmq
from zmq.eventloop import ioloop, zmqstream
from zmq.utils.jsonapi import jsonmod as json
from circus import logger, __version__
from circus.client import make_message, cast_message
from circus.util import (debuglog, to_bool, resolve_name, close_on_exec,
LOG_LEVELS, LOG_FMT, LOG_DATE_FMT,
DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB,
get_connection)
class CircusPlugin(object):
"""Base class to write plugins.
Options:
- **context** -- the ZMQ context to use
- **endpoint** -- the circusd ZMQ endpoint
- **pubsub_endpoint** -- the circusd ZMQ pub/sub endpoint
- **check_delay** -- the configured check delay
- **config** -- free config mapping
"""
name = ''
def __init__(self, endpoint, pubsub_endpoint, check_delay, ssh_server=None,
**config):
self.daemon = True
self.config = config
self.active = to_bool(config.get('active', True))
self.context = zmq.Context()
self.pubsub_endpoint = pubsub_endpoint
self.endpoint = endpoint
self.check_delay = check_delay
self.ssh_server = ssh_server
self.loop = ioloop.IOLoop()
self._id = uuid.uuid4().hex # XXX os.getpid()+thread id is enough...
self.running = False
@debuglog
def initialize(self):
self.client = self.context.socket(zmq.DEALER)
self.client.setsockopt(zmq.IDENTITY, self._id)
get_connection(self.client, self.endpoint, self.ssh_server)
self.client.linger = 0
self.sub_socket = self.context.socket(zmq.SUB)
self.sub_socket.setsockopt(zmq.SUBSCRIBE, b'watcher.')
self.sub_socket.connect(self.pubsub_endpoint)
self.substream = zmqstream.ZMQStream(self.sub_socket, self.loop)
self.substream.on_recv(self.handle_recv)
@debuglog
def start(self):
if not self.active:
raise ValueError('Will not start an inactive plugin')
self.handle_init()
self.initialize()
self.running = True
while True:
try:
self.loop.start()
except zmq.ZMQError as e:
logger.debug(str(e))
if e.errno == errno.EINTR:
continue
elif e.errno == zmq.ETERM:
break
else:
logger.debug("got an unexpected error %s (%s)", str(e),
e.errno)
raise
else:
break
self.client.close()
self.sub_socket.close()
@debuglog
def stop(self):
if not self.running:
return
try:
self.handle_stop()
finally:
self.loop.stop()
self.running = False
def call(self, command, **props):
"""Sends to **circusd** the command.
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
Returns the JSON mapping sent back by **circusd**
"""
msg = make_message(command, **props)
self.client.send(json.dumps(msg))
msg = self.client.recv()
return json.loads(msg)
def cast(self, command, **props):
"""Fire-and-forget a command to **circusd**
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
"""
msg = cast_message(command, **props)
self.client.send(json.dumps(msg))
#
# methods to override.
#
def handle_recv(self, data):
"""Receives every event published by **circusd**
Options:
- **data** -- a tuple containing the topic and the message.
"""
raise NotImplementedError()
def handle_stop(self):
"""Called right before the plugin is stopped by Circus.
"""
pass
def handle_init(self):
"""Called right befor a plugin is started - in the thread context.
"""
pass
def _cfg2str(cfg):
return ':::'.join(['%s:%s' % (key, val) for key, val in cfg.items()])
def _str2cfg(data):
cfg = {}
if data is None:
return cfg
for item in data.split(':::'):
item = item.split(':', 1)
if len(item) != 2:
continue
key, value = item
cfg[key.strip()] = value.strip()
return cfg
def get_plugin_cmd(config, endpoint, pubsub, check_delay, ssh_server,
debug=False):
fqn = config['use']
# makes sure the name exists
resolve_name(fqn)
# we're good, serializing the config
del config['use']
config = _cfg2str(config)
cmd = "%s -c 'from circus import plugins;plugins.main()'" % sys.executable
cmd += ' --endpoint %s' % endpoint
cmd += ' --pubsub %s' % pubsub
if ssh_server is not None:
cmd += ' --ssh %s' % ssh_server
if len(config) > 0:
cmd += ' --config %s' % config
if debug:
cmd += ' --log-level DEBUG'
cmd += ' %s' % fqn
return cmd
def main():
parser = argparse.ArgumentParser(description='Runs a plugin.')
parser.add_argument('--endpoint',
help='The circusd ZeroMQ socket to connect to',
default=DEFAULT_ENDPOINT_DEALER)
parser.add_argument('--pubsub',
help='The circusd ZeroMQ pub/sub socket to connect to',
default=DEFAULT_ENDPOINT_SUB)
parser.add_argument('--config', help='The plugin configuration',
default=None)
parser.add_argument('--version', action='store_true', default=False,
help='Displays Circus version and exits.')
parser.add_argument('--check-delay', type=float, default=5.,
help='Checck delay.')
parser.add_argument('plugin',
help='Fully qualified name of the plugin class.',
nargs='?')
parser.add_argument('--log-level', dest='loglevel', default='info',
help="log level")
parser.add_argument('--log-output', dest='logoutput', default='-',
help="log output")
parser.add_argument('--ssh', default=None, help='SSH Server')
args = parser.parse_args()
if args.version:
print(__version__)
sys.exit(0)
if args.plugin is None:
parser.print_usage()
sys.exit(0)
# configure the logger
loglevel = LOG_LEVELS.get(args.loglevel.lower(), logging.INFO)
logger.setLevel(loglevel)
if args.logoutput == "-":
h = logging.StreamHandler()
else:
h = logging.FileHandler(args.logoutput)
close_on_exec(h.stream.fileno())
fmt = logging.Formatter(LOG_FMT, LOG_DATE_FMT)
h.setFormatter(fmt)
logger.addHandler(h)
# load the plugin and run it.
logger.info('Loading the plugin...')
logger.info('Endpoint: %r' % args.endpoint)
logger.info('Pub/sub: %r' % args.pubsub)
plugin = resolve_name(args.plugin)(args.endpoint, args.pubsub,
args.check_delay, args.ssh,
**_str2cfg(args.config))
logger.info('Starting')
try:
plugin.start()
except KeyboardInterrupt:
pass
finally:
logger.info('Stopping')
plugin.stop()
sys.exit(0)
if __name__ == '__main__':
main()
| 5,195 | 0 | 197 |
5b5bf3f3597f272e365b329d8ae63834b3793440 | 737 | py | Python | src/globus_sdk/services/search/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/services/search/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/services/search/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import requests
from globus_sdk import exc
class SearchAPIError(exc.GlobusAPIError):
"""
Error class for the Search API client. In addition to the
inherited ``code`` and ``message`` instance variables, provides ``error_data``.
:ivar error_data: Additional object returned in the error response. May be
a dict, list, or None.
"""
# the Search API always and only returns 'message' for string messages
MESSAGE_FIELDS = ["message"]
| 29.48 | 83 | 0.660787 | import requests
from globus_sdk import exc
class SearchAPIError(exc.GlobusAPIError):
"""
Error class for the Search API client. In addition to the
inherited ``code`` and ``message`` instance variables, provides ``error_data``.
:ivar error_data: Additional object returned in the error response. May be
a dict, list, or None.
"""
# the Search API always and only returns 'message' for string messages
MESSAGE_FIELDS = ["message"]
def __init__(self, r: requests.Response) -> None:
self.error_data = None
super().__init__(r)
def _load_from_json(self, data: dict) -> None:
super()._load_from_json(data)
self.error_data = data.get("error_data")
| 199 | 0 | 54 |
c8eff3d49f91ab01dc95dbb9bbdf8c3c4249b42e | 428 | py | Python | desafios/des109b/teste.py | Ericssm96/python | 764d0d704be685db9e993c4b74d3df78da12cc6f | [
"MIT"
] | null | null | null | desafios/des109b/teste.py | Ericssm96/python | 764d0d704be685db9e993c4b74d3df78da12cc6f | [
"MIT"
] | null | null | null | desafios/des109b/teste.py | Ericssm96/python | 764d0d704be685db9e993c4b74d3df78da12cc6f | [
"MIT"
] | null | null | null | import des109moeda
n1 = float(input('Digite o preço: R$'))
print(f'O valor {des109moeda.moeda(n1)} dobrado é {des109moeda.dobro(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} pela metade é {des109moeda.metade(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} aumentado em 15% é {des109moeda.aumenta(n1, 15, True)}.')
print(f'O valor {des109moeda.moeda(n1)} diminuído em 15% é {des109moeda.diminui(n1, 15, True)}.')
| 47.555556 | 97 | 0.714953 | import des109moeda
n1 = float(input('Digite o preço: R$'))
print(f'O valor {des109moeda.moeda(n1)} dobrado é {des109moeda.dobro(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} pela metade é {des109moeda.metade(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} aumentado em 15% é {des109moeda.aumenta(n1, 15, True)}.')
print(f'O valor {des109moeda.moeda(n1)} diminuído em 15% é {des109moeda.diminui(n1, 15, True)}.')
| 0 | 0 | 0 |
428477cf31aa1fb45eb197b6d8d66e622343cdc7 | 2,972 | py | Python | src/env_alias/EnvAliasGenerator.py | ndejong/env-alias | 0047098ebfc3be08b426b78a5cb9e747c87c28cf | [
"BSD-2-Clause"
] | 2 | 2020-12-11T23:01:53.000Z | 2021-07-02T05:49:25.000Z | src/env_alias/EnvAliasGenerator.py | ndejong/env-alias | 0047098ebfc3be08b426b78a5cb9e747c87c28cf | [
"BSD-2-Clause"
] | null | null | null | src/env_alias/EnvAliasGenerator.py | ndejong/env-alias | 0047098ebfc3be08b426b78a5cb9e747c87c28cf | [
"BSD-2-Clause"
] | null | null | null |
from env_alias import __title__ as NAME
from env_alias import __version__ as VERSION
from env_alias.utils import logger
from env_alias.exceptions.EnvAliasException import EnvAliasException
from env_alias.utils.config import EnvAliasConfig
from env_alias.utils.content import EnvAliasContent
from env_alias.utils.selector import EnvAliasSelector
| 38.102564 | 107 | 0.632571 |
from env_alias import __title__ as NAME
from env_alias import __version__ as VERSION
from env_alias.utils import logger
from env_alias.exceptions.EnvAliasException import EnvAliasException
from env_alias.utils.config import EnvAliasConfig
from env_alias.utils.content import EnvAliasContent
from env_alias.utils.selector import EnvAliasSelector
class EnvAliasGenerator:
def __init__(self, logger_level='warning'):
logger.init(name=NAME, level=logger_level)
logger.info('{} v{}'.format(NAME, VERSION))
def main(self, configuration_file=None, no_space=False):
if type(configuration_file) is list:
configuration_file = configuration_file[0]
configuration = EnvAliasConfig(config_root=NAME).load_config(
configuration_file=configuration_file,
return_config=True
)
if not configuration:
raise EnvAliasException('Empty configuration provided')
for config_k, config_v in configuration.items():
env_name = config_k
if 'name' in config_v.keys():
env_name = config_v['name']
output_prefix = ' ' # prevents shell command history
if no_space is True:
output_prefix = ''
setting_value = self.get_setting(config_k, config_v)
if setting_value is not None:
output = '{}export "{}"="{}"'.format(output_prefix, env_name, setting_value)
logger.debug(output)
print(output)
return
return
def get_setting(self, config_key, config):
if 'value' in config.keys():
return config['value']
elif 'source' in config.keys() and config['source'][0:4] == 'http':
content, content_type = EnvAliasContent.remote(config['source'])
elif 'source' in config.keys():
content, content_type = EnvAliasContent.local(config['source'])
elif 'exec' in config.keys():
content, content_type = EnvAliasContent.execute(config['exec'])
else:
raise EnvAliasException('Configuration of env-alias item "{}" is malformed'.format(config_key))
parser = content_type
if 'parser' in config.keys():
parser = config['parser'].lower()
selector = None
if 'selector' in config.keys():
if config['selector'] is None or config['selector'] == 'null':
selector = 'none' # edge case where "selector" exists and set to none (or null)
else:
selector = config['selector']
if parser == 'ini':
return EnvAliasSelector.ini_content(content, selector)
elif parser == 'json':
return EnvAliasSelector.json_content(content, selector)
elif parser in ['yaml', 'yml']:
return EnvAliasSelector.yaml_content(content, selector)
return EnvAliasSelector.text_content(content, selector)
| 2,517 | 3 | 104 |
2f9304fc5642e22578d6f99047f4468709db07d9 | 39,178 | py | Python | sarkas/utilities/io.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
] | null | null | null | sarkas/utilities/io.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
] | null | null | null | sarkas/utilities/io.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
] | null | null | null | """
Module handling the I/O for an MD run.
"""
import csv
import pickle
import re
import sys
import yaml
from copy import copy, deepcopy
from IPython import get_ipython
from numpy import float64
from numpy import load as np_load
from numpy import savetxt, savez, zeros
from numpy.random import randint
from os import listdir, mkdir
from os.path import basename, exists, join
from pyfiglet import Figlet, print_figlet
from warnings import warn
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# If you are using Jupyter Notebook
from tqdm import tqdm_notebook as tqdm
else:
# If you are using IPython or Python kernel
from tqdm import tqdm
FONTS = ["speed", "starwars", "graffiti", "chunky", "epic", "larry3d", "ogre"]
# Light Colors.
LIGHT_COLORS = [
"255;255;255",
"13;177;75",
"153;162;162",
"240;133;33",
"144;154;183",
"209;222;63",
"232;217;181",
"200;154;88",
"148;174;74",
"203;90;40",
]
# Dark Colors.
DARK_COLORS = ["24;69;49", "0;129;131", "83;80;84", "110;0;95"]
class InputOutput:
"""
Class handling the input and output functions of the MD run.
Parameters
----------
process : str
Name of the process class containing MD run info.
"""
electrostatic_equilibration: bool = False
eq_dump_dir: str = "dumps"
equilibration_dir: str = "Equilibration"
input_file: str = None # MD run input file.
job_dir: str = None
job_id: str = None
log_file: str = None
mag_dump_dir: str = "dumps"
magnetization_dir: str = "Magnetization"
magnetized: bool = False
preprocess_file: str = None
preprocessing: bool = False
preprocessing_dir: str = "PreProcessing"
process: str = "preprocessing"
processes_dir: str = None
prod_dump_dir: str = "dumps"
production_dir: str = "Production"
postprocessing_dir: str = "PostProcessing"
simulations_dir: str = "Simulations"
simulation_dir: str = "Simulation"
verbose: bool = False
xyz_dir: str = None
xyz_filename: str = None
def __copy__(self):
"""Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__."""
# Create a new object
_copy = type(self)()
# copy the dictionary
_copy.__dict__.update(self.__dict__)
return _copy
def from_dict(self, input_dict: dict):
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def setup(self):
"""Create file paths and directories for the simulation."""
self.create_file_paths()
self.make_directories()
self.file_header()
def from_yaml(self, filename: str):
"""
Parse inputs from YAML file.
Parameters
----------
filename: str
Input YAML file.
Returns
-------
dics : dict
Content of YAML file parsed in a nested dictionary
"""
self.input_file = filename
with open(filename, "r") as stream:
dics = yaml.load(stream, Loader=yaml.FullLoader)
self.__dict__.update(dics["IO"])
if "Parameters" in dics.keys():
keyed = "Parameters"
for key, value in dics[keyed].items():
if key == "verbose":
self.verbose = value
if key == "magnetized":
self.magnetized = value
if key == "load_method":
self.load_method = value
if value[-7:] == "restart":
self.restart = True
else:
self.restart = False
if key == "preprocessing":
self.preprocessing = value
if "Integrator" in dics.keys():
keyed = "Integrator"
for key, value in dics[keyed].items():
if key == "electrostatic_equilibration":
self.electrostatic_equilibration = value
# rdf_nbins can be defined in either Parameters or Postprocessing. However, Postprocessing will always
# supersede Parameters choice.
if "Observables" in dics.keys():
for i in dics["Observables"]:
if "RadialDistributionFunction" in i.keys():
dics["Parameters"]["rdf_nbins"] = i["RadialDistributionFunction"]["no_bins"]
return dics
def create_file_paths(self):
"""Create all directories', subdirectories', and files' paths."""
if self.job_dir is None:
self.job_dir = basename(self.input_file).split(".")[0]
if self.job_id is None:
self.job_id = self.job_dir
self.job_dir = join(self.simulations_dir, self.job_dir)
# Create Processes directories
self.processes_dir = [
join(self.job_dir, self.preprocessing_dir),
join(self.job_dir, self.simulation_dir),
join(self.job_dir, self.postprocessing_dir),
]
# Redundancy
self.preprocessing_dir = self.processes_dir[0]
self.simulation_dir = self.processes_dir[1]
self.postprocessing_dir = self.processes_dir[2]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
# Equilibration directory and sub_dir
self.equilibration_dir = join(self.processes_dir[indx], self.equilibration_dir)
self.eq_dump_dir = join(self.equilibration_dir, "dumps")
# Production dir and sub_dir
self.production_dir = join(self.processes_dir[indx], self.production_dir)
self.prod_dump_dir = join(self.production_dir, "dumps")
# Production phase filenames
self.prod_energy_filename = join(self.production_dir, "ProductionEnergy_" + self.job_id + ".csv")
self.prod_ptcls_filename = join(self.prod_dump_dir, "checkpoint_")
# Equilibration phase filenames
self.eq_energy_filename = join(self.equilibration_dir, "EquilibrationEnergy_" + self.job_id + ".csv")
self.eq_ptcls_filename = join(self.eq_dump_dir, "checkpoint_")
# Magnetic dir
if self.electrostatic_equilibration:
self.magnetization_dir = join(self.processes_dir[indx], self.magnetization_dir)
self.mag_dump_dir = join(self.magnetization_dir, "dumps")
# Magnetization phase filenames
self.mag_energy_filename = join(self.magnetization_dir, "MagnetizationEnergy_" + self.job_id + ".csv")
self.mag_ptcls_filename = join(self.mag_dump_dir, "checkpoint_")
if self.process == "postprocessing":
indx = 2 # Redirect to the correct folder
# Log File
if self.log_file is None:
self.log_file = join(self.processes_dir[indx], "log_" + self.job_id + ".out")
else:
self.log_file = join(self.processes_dir[indx], self.log_file)
def make_directories(self):
"""Create directories where to store MD results."""
# Check if the directories exist
if not exists(self.simulations_dir):
mkdir(self.simulations_dir)
if not exists(self.job_dir):
mkdir(self.job_dir)
# Create Process' directories and their subdir
for i in self.processes_dir:
if not exists(i):
mkdir(i)
# The following automatically create directories in the correct Process
if not exists(self.equilibration_dir):
mkdir(self.equilibration_dir)
if not exists(self.eq_dump_dir):
mkdir(self.eq_dump_dir)
if not exists(self.production_dir):
mkdir(self.production_dir)
if not exists(self.prod_dump_dir):
mkdir(self.prod_dump_dir)
if self.electrostatic_equilibration:
if not exists(self.magnetization_dir):
mkdir(self.magnetization_dir)
if not exists(self.mag_dump_dir):
mkdir(self.mag_dump_dir)
if self.preprocessing:
if not exists(self.preprocessing_dir):
mkdir(self.preprocessing_dir)
if not exists(self.postprocessing_dir):
mkdir(self.postprocessing_dir)
def file_header(self):
"""Create the log file and print the figlet if not a restart run."""
if not self.restart:
with open(self.log_file, "w+") as f_log:
figlet_obj = Figlet(font="starwars")
print(figlet_obj.renderText("Sarkas"), file=f_log)
print("An open-source pure-Python molecular dynamics suite for non-ideal plasmas.", file=f_log)
# Print figlet to screen if verbose
if self.verbose:
self.screen_figlet()
def simulation_summary(self, simulation):
"""
Print out to file a summary of simulation's parameters.
If verbose output then it will print twice: the first time to file and second time to screen.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Simulation's parameters
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
if simulation.parameters.load_method in ["production_restart", "prod_restart"]:
print("\n\n--------------------------- Production Restart -------------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["equilibration_restart", "eq_restart"]:
print("\n\n------------------------ Equilibration Restart ----------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["magnetization_restart", "mag_restart"]:
print("\n\n------------------------ Magnetization Restart ----------------------------------")
self.time_info(simulation)
elif self.process == "postprocessing":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"PostProcessing directory: \n{self.postprocessing_dir}")
print(f"\nEquilibration dumps directory: {self.eq_dump_dir}")
print(f"Production dumps directory: \n{self.prod_dump_dir}")
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
else:
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"\nEquilibration dumps directory: \n", {self.eq_dump_dir})
print(f"Production dumps directory: \n", {self.prod_dump_dir})
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
print("\nPARTICLES:")
print("Total No. of particles = ", simulation.parameters.total_num_ptcls)
for isp, sp in enumerate(simulation.species):
if sp.name == "electron_background":
sp_index = isp
print("No. of species = ", len(simulation.species[:isp]))
for isp, sp in enumerate(simulation.species):
if sp.name != "electron_background":
print("Species ID: {}".format(isp))
sp.pretty_print(simulation.potential.type, simulation.parameters.units)
# Parameters Info
simulation.parameters.pretty_print()
# Potential Info
simulation.potential.pretty_print()
# Integrator
simulation.integrator.pretty_print()
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def time_stamp(self, time_stamp, t):
"""
Print out to screen elapsed times. If verbose output, print to file first and then to screen.
Parameters
----------
time_stamp : str
Array of time stamps.
t : float
Elapsed time.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if "Potential Initialization" in time_stamp:
print("\n\n{:-^70} \n".format("Initialization Times"))
if t_hrs == 0 and t_min == 0 and t_sec <= 2:
print(f"\n{time_stamp} Time: {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec")
else:
print(f"\n{time_stamp} Time: {int(t_hrs)} hrs {int(t_min)} min {int(t_sec)} sec")
repeat -= 1
sys.stdout = screen
f_log.close()
def timing_study(self, simulation):
"""
Info specific for timing study.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the info to print.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
print("\n\n------------ Conclusion ------------\n")
print("Suggested Mesh = [ {} , {} , {} ]".format(*simulation.potential.pppm_mesh))
print(
"Suggested Ewald parameter alpha = {:2.4f} / a_ws = {:1.6e} ".format(
simulation.potential.pppm_alpha_ewald * simulation.parameters.a_ws,
simulation.potential.pppm_alpha_ewald,
),
end="",
)
print("[1/cm]" if simulation.parameters.units == "cgs" else "[1/m]")
print(
"Suggested rcut = {:2.4f} a_ws = {:.6e} ".format(
simulation.potential.rc / simulation.parameters.a_ws, simulation.potential.rc
),
end="",
)
print("[cm]" if simulation.parameters.units == "cgs" else "[m]")
self.algorithm_info(simulation)
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def preprocess_sizing(self, sizes):
"""Print the estimated file sizes."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
print("\n\n{:=^70} \n".format(" Filesize Estimates "))
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 0])
print("\nEquilibration:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
if self.electrostatic_equilibration:
print("\nMagnetization:\n")
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 0])
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 0])
print("\nProduction:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[:, 1].sum())
print(
"\nTotal minimum needed space: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
repeat -= 1
sys.stdout = screen
f_log.close()
def preprocess_timing(self, str_id, t, loops):
"""Print times estimates of simulation to file first and then to screen if verbose."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if str_id == "header":
print("\n\n{:=^70} \n".format(" Times Estimates "))
elif str_id == "GF":
print(
"Optimal Green's Function Time: \n"
"{} min {} sec {} msec {} usec {} nsec \n".format(
int(t_min), int(t_sec), int(t_msec), int(t_usec), int(t_nsec)
)
)
elif str_id in ["PP", "PM", "FMM"]:
print(f"Time of {str_id} acceleration calculation averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
elif str_id in ["Equilibration", "Magnetization", "Production"]:
print(f"Time of a single {str_id} step averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
if str_id == "Production":
print("\n\n{:-^70} \n".format(" Total Estimated Times "))
repeat -= 1
sys.stdout = screen
f_log.close()
def postprocess_info(self, simulation, write_to_file=False, observable=None):
"""
Print Post-processing info to file and/or screen in a reader-friendly format.
Parameters
----------
simulation : :class:`sarkas.processes.PostProcess`
PostProcess class.
write_to_file : bool
Flag for printing info also to file. Default= False.
observable : str
Observable whose info to print. Default = None.
Choices = ['header','rdf', 'ccf', 'dsf', 'ssf', 'vm']
"""
choices = ["header", "rdf", "ccf", "dsf", "ssf", "vd"]
msg = (
"Observable not defined. \n "
"Please choose an observable from this list \n"
"'rdf' = Radial Distribution Function, \n"
"'ccf' = Current Correlation Function, \n"
"'dsf' = Dynamic Structure Function, \n"
"'ssf' = Static Structure Factor, \n"
"'vd' = Velocity Distribution"
)
if observable is None:
raise ValueError(msg)
if observable not in choices:
raise ValueError(msg)
if write_to_file:
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
else:
repeat = 1
while repeat > 0:
if observable == "header":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
elif observable == "rdf":
simulation.rdf.pretty_print()
elif observable == "ssf":
simulation.ssf.pretty_print()
elif observable == "dsf":
simulation.dsf.pretty_print()
elif observable == "ccf":
simulation.ccf.pretty_print()
elif observable == "vd":
simulation.vm.setup(simulation.parameters)
print("\nVelocity Moments:")
print("Maximum no. of moments = {}".format(simulation.vm.max_no_moment))
print("Maximum velocity moment = {}".format(int(2 * simulation.vm.max_no_moment)))
repeat -= 1
if write_to_file:
sys.stdout = screen
if write_to_file:
f_log.close()
@staticmethod
def screen_figlet():
"""
Print a colored figlet of Sarkas to screen.
"""
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# Assume white background in Jupyter Notebook
clr = DARK_COLORS[randint(0, len(DARK_COLORS))]
else:
# Assume dark background in IPython/Python Kernel
clr = LIGHT_COLORS[randint(0, len(LIGHT_COLORS))]
fnt = FONTS[randint(0, len(FONTS))]
print_figlet("\nSarkas\n", font=fnt, colors=clr)
print("\nAn open-source pure-python molecular dynamics suite for non-ideal plasmas.\n\n")
@staticmethod
def time_info(simulation):
"""
Print time simulation's parameters.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the timing info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release.\n" "Use Integrator.pretty_print()",
category=DeprecationWarning,
)
simulation.integrator.pretty_print()
@staticmethod
def algorithm_info(simulation):
"""
Print algorithm information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the algorithm info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.method_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.method_pretty_print()
@staticmethod
def potential_info(simulation):
"""
Print potential information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the potential info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.pot_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.pot_pretty_print(simulation.potential)
def copy_params(self, params):
"""
Copy necessary parameters.
Parameters
----------
params: :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
self.dt = params.dt
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
self.species_names = params.species_names.copy()
self.coupling = params.coupling_constant * params.T_desired
def setup_checkpoint(self, params):
"""
Assign attributes needed for saving dumps.
Parameters
----------
params : :class:`sarkas.core.Parameters`
General simulation parameters.
species : :class:`sarkas.plasma.Species`
List of Species classes.
"""
self.copy_params(params)
# Check whether energy files exist already
if not exists(self.prod_energy_filename):
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.prod_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if not exists(self.eq_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.eq_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if self.electrostatic_equilibration:
if not exists(self.mag_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.mag_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
def save_pickle(self, simulation):
"""
Save all simulations parameters in pickle files.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential", "species"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "wb") as pickle_file:
pickle.dump(simulation.__dict__[fl], pickle_file)
pickle_file.close()
def read_pickle(self, process):
"""
Read pickle files containing all the simulation information.
Parameters
----------
process : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.__dict__[fl] = copy(data)
# Read species
filename = join(self.processes_dir[indx], "species.pickle")
process.species = []
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.species = copy(data)
def read_pickle_single(self, class_to_read: str):
"""
Read the desired pickle file.
Parameters
----------
class_to_read : str
Name of the class to read.
Returns
-------
_copy : cls
Copy of desired class.
"""
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
filename = join(self.processes_dir[indx], class_to_read + ".pickle")
with open(filename, "rb") as pickle_file:
data = pickle.load(pickle_file)
_copy = deepcopy(data)
return _copy
def dump(self, phase, ptcls, it):
"""
Save particles' data to binary file for future restart.
Parameters
----------
phase : str
Simulation phase.
ptcls : :class:`sarkas.particles.Particles`
Particles data.
it : int
Timestep number.
"""
if phase == "production":
ptcls_file = self.prod_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
cntr=ptcls.pbc_cntr,
rdf_hist=ptcls.rdf_hist,
virial=ptcls.virial,
time=tme,
)
energy_file = self.prod_energy_filename
elif phase == "equilibration":
ptcls_file = self.eq_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.eq_energy_filename
elif phase == "magnetization":
ptcls_file = self.mag_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.mag_energy_filename
kinetic_energies, temperatures = ptcls.kinetic_temperature()
potential_energies = ptcls.potential_energies()
# Save Energy data
data = {
"Time": it * self.dt,
"Total Energy": kinetic_energies.sum() + ptcls.potential_energy,
"Total Kinetic Energy": kinetic_energies.sum(),
"Potential Energy": ptcls.potential_energy,
"Total Temperature": ptcls.species_num.transpose() @ temperatures / ptcls.total_num_ptcls,
}
if len(temperatures) > 1:
for sp, kin in enumerate(kinetic_energies):
data[f"{self.species_names[sp]} Kinetic Energy"] = kin
data[f"{self.species_names[sp]} Potential Energy"] = potential_energies[sp]
data[f"{self.species_names[sp]} Temperature"] = temperatures[sp]
with open(energy_file, "a") as f:
w = csv.writer(f)
w.writerow(data.values())
def dump_xyz(self, phase: str = "production"):
"""
Save the XYZ file by reading Sarkas dumps.
Parameters
----------
phase : str
Phase from which to read dumps. 'equilibration' or 'production'.
dump_skip : int
Interval of dumps to skip. Default = 1
"""
if phase == "equilibration":
self.xyz_filename = join(self.equilibration_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.eq_dump_dir
else:
self.xyz_filename = join(self.production_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.prod_dump_dir
f_xyz = open(self.xyz_filename, "w+")
if not hasattr(self, "a_ws"):
params = self.read_pickle_single("parameters")
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
# Rescale constants. This is needed since OVITO has a small number limit.
pscale = 1.0 / self.a_ws
vscale = 1.0 / (self.a_ws * self.total_plasma_frequency)
ascale = 1.0 / (self.a_ws * self.total_plasma_frequency**2)
# Read the list of dumps and sort them in the correct (natural) order
dumps = listdir(dump_dir)
dumps.sort(key=num_sort)
for dump in tqdm(dumps, disable=not self.verbose):
data = self.read_npz(dump_dir, dump)
data["pos_x"] *= pscale
data["pos_y"] *= pscale
data["pos_z"] *= pscale
data["vel_x"] *= vscale
data["vel_y"] *= vscale
data["vel_z"] *= vscale
data["acc_x"] *= ascale
data["acc_y"] *= ascale
data["acc_z"] *= ascale
f_xyz.writelines("{0:d}\n".format(self.total_num_ptcls))
f_xyz.writelines("name x y z vx vy vz ax ay az\n")
savetxt(f_xyz, data, fmt="%s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
f_xyz.close()
@staticmethod
def read_npz(fldr: str, filename: str):
"""
Load particles' data from dumps.
Parameters
----------
fldr : str
Folder containing dumps.
filename: str
Name of the dump file to load.
Returns
-------
struct_array : numpy.ndarray
Structured data array.
"""
file_name = join(fldr, filename)
data = np_load(file_name, allow_pickle=True)
# Dev Notes: the old way of saving the xyz file by
# savetxt(f_xyz, np.c_[data["names"],data["pos"] ....]
# , fmt="%10s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
# was not working, because the columns of np.c_[] all have the same data type <U32
# which is in conflict with the desired fmt. i.e. data["names"] was not recognized as a string.
# So I have to create a new structured array and pass this. I could not think of a more Pythonic way.
struct_array = zeros(
data["names"].size,
dtype=[
("names", "U6"),
("pos_x", float64),
("pos_y", float64),
("pos_z", float64),
("vel_x", float64),
("vel_y", float64),
("vel_z", float64),
("acc_x", float64),
("acc_y", float64),
("acc_z", float64),
],
)
struct_array["names"] = data["names"]
struct_array["pos_x"] = data["pos"][:, 0]
struct_array["pos_y"] = data["pos"][:, 1]
struct_array["pos_z"] = data["pos"][:, 2]
struct_array["vel_x"] = data["vel"][:, 0]
struct_array["vel_y"] = data["vel"][:, 1]
struct_array["vel_z"] = data["vel"][:, 2]
struct_array["acc_x"] = data["acc"][:, 0]
struct_array["acc_y"] = data["acc"][:, 1]
struct_array["acc_z"] = data["acc"][:, 2]
return struct_array
def alpha_to_int(text):
"""Convert strings of numbers into integers.
Parameters
----------
text : str
Text to be converted into an int, if `text` is a number.
Returns
-------
_ : int, str
Integral number otherwise returns a string.
"""
return int(text) if text.isdigit() else text
def num_sort(text):
"""
Sort strings with numbers inside.
Parameters
----------
text : str
Text to be split into str and int
Returns
-------
: list
List containing text and integers
Notes
-----
Function copied from
https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside.
Originally from http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [alpha_to_int(c) for c in re.split(r"(\d+)", text)]
def convert_bytes(tot_bytes):
"""Convert bytes to human-readable GB, MB, KB.
Parameters
----------
tot_bytes : int
Total number of bytes.
Returns
-------
[GB, MB, KB, rem] : list
Bytes divided into Giga, Mega, Kilo bytes.
"""
GB, rem = divmod(tot_bytes, 1024 * 1024 * 1024)
MB, rem = divmod(rem, 1024 * 1024)
KB, rem = divmod(rem, 1024)
return [GB, MB, KB, rem]
| 34.793961 | 125 | 0.550105 | """
Module handling the I/O for an MD run.
"""
import csv
import pickle
import re
import sys
import yaml
from copy import copy, deepcopy
from IPython import get_ipython
from numpy import float64
from numpy import load as np_load
from numpy import savetxt, savez, zeros
from numpy.random import randint
from os import listdir, mkdir
from os.path import basename, exists, join
from pyfiglet import Figlet, print_figlet
from warnings import warn
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# If you are using Jupyter Notebook
from tqdm import tqdm_notebook as tqdm
else:
# If you are using IPython or Python kernel
from tqdm import tqdm
FONTS = ["speed", "starwars", "graffiti", "chunky", "epic", "larry3d", "ogre"]
# Light Colors.
LIGHT_COLORS = [
"255;255;255",
"13;177;75",
"153;162;162",
"240;133;33",
"144;154;183",
"209;222;63",
"232;217;181",
"200;154;88",
"148;174;74",
"203;90;40",
]
# Dark Colors.
DARK_COLORS = ["24;69;49", "0;129;131", "83;80;84", "110;0;95"]
class InputOutput:
"""
Class handling the input and output functions of the MD run.
Parameters
----------
process : str
Name of the process class containing MD run info.
"""
electrostatic_equilibration: bool = False
eq_dump_dir: str = "dumps"
equilibration_dir: str = "Equilibration"
input_file: str = None # MD run input file.
job_dir: str = None
job_id: str = None
log_file: str = None
mag_dump_dir: str = "dumps"
magnetization_dir: str = "Magnetization"
magnetized: bool = False
preprocess_file: str = None
preprocessing: bool = False
preprocessing_dir: str = "PreProcessing"
process: str = "preprocessing"
processes_dir: str = None
prod_dump_dir: str = "dumps"
production_dir: str = "Production"
postprocessing_dir: str = "PostProcessing"
simulations_dir: str = "Simulations"
simulation_dir: str = "Simulation"
verbose: bool = False
xyz_dir: str = None
xyz_filename: str = None
def __init__(self, process: str = "preprocess"):
self.process = process
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = "InputOuput( \n"
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ")"
return disp
def __copy__(self):
"""Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__."""
# Create a new object
_copy = type(self)()
# copy the dictionary
_copy.__dict__.update(self.__dict__)
return _copy
def from_dict(self, input_dict: dict):
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def setup(self):
"""Create file paths and directories for the simulation."""
self.create_file_paths()
self.make_directories()
self.file_header()
def from_yaml(self, filename: str):
"""
Parse inputs from YAML file.
Parameters
----------
filename: str
Input YAML file.
Returns
-------
dics : dict
Content of YAML file parsed in a nested dictionary
"""
self.input_file = filename
with open(filename, "r") as stream:
dics = yaml.load(stream, Loader=yaml.FullLoader)
self.__dict__.update(dics["IO"])
if "Parameters" in dics.keys():
keyed = "Parameters"
for key, value in dics[keyed].items():
if key == "verbose":
self.verbose = value
if key == "magnetized":
self.magnetized = value
if key == "load_method":
self.load_method = value
if value[-7:] == "restart":
self.restart = True
else:
self.restart = False
if key == "preprocessing":
self.preprocessing = value
if "Integrator" in dics.keys():
keyed = "Integrator"
for key, value in dics[keyed].items():
if key == "electrostatic_equilibration":
self.electrostatic_equilibration = value
# rdf_nbins can be defined in either Parameters or Postprocessing. However, Postprocessing will always
# supersede Parameters choice.
if "Observables" in dics.keys():
for i in dics["Observables"]:
if "RadialDistributionFunction" in i.keys():
dics["Parameters"]["rdf_nbins"] = i["RadialDistributionFunction"]["no_bins"]
return dics
def create_file_paths(self):
"""Create all directories', subdirectories', and files' paths."""
if self.job_dir is None:
self.job_dir = basename(self.input_file).split(".")[0]
if self.job_id is None:
self.job_id = self.job_dir
self.job_dir = join(self.simulations_dir, self.job_dir)
# Create Processes directories
self.processes_dir = [
join(self.job_dir, self.preprocessing_dir),
join(self.job_dir, self.simulation_dir),
join(self.job_dir, self.postprocessing_dir),
]
# Redundancy
self.preprocessing_dir = self.processes_dir[0]
self.simulation_dir = self.processes_dir[1]
self.postprocessing_dir = self.processes_dir[2]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
# Equilibration directory and sub_dir
self.equilibration_dir = join(self.processes_dir[indx], self.equilibration_dir)
self.eq_dump_dir = join(self.equilibration_dir, "dumps")
# Production dir and sub_dir
self.production_dir = join(self.processes_dir[indx], self.production_dir)
self.prod_dump_dir = join(self.production_dir, "dumps")
# Production phase filenames
self.prod_energy_filename = join(self.production_dir, "ProductionEnergy_" + self.job_id + ".csv")
self.prod_ptcls_filename = join(self.prod_dump_dir, "checkpoint_")
# Equilibration phase filenames
self.eq_energy_filename = join(self.equilibration_dir, "EquilibrationEnergy_" + self.job_id + ".csv")
self.eq_ptcls_filename = join(self.eq_dump_dir, "checkpoint_")
# Magnetic dir
if self.electrostatic_equilibration:
self.magnetization_dir = join(self.processes_dir[indx], self.magnetization_dir)
self.mag_dump_dir = join(self.magnetization_dir, "dumps")
# Magnetization phase filenames
self.mag_energy_filename = join(self.magnetization_dir, "MagnetizationEnergy_" + self.job_id + ".csv")
self.mag_ptcls_filename = join(self.mag_dump_dir, "checkpoint_")
if self.process == "postprocessing":
indx = 2 # Redirect to the correct folder
# Log File
if self.log_file is None:
self.log_file = join(self.processes_dir[indx], "log_" + self.job_id + ".out")
else:
self.log_file = join(self.processes_dir[indx], self.log_file)
def make_directories(self):
"""Create directories where to store MD results."""
# Check if the directories exist
if not exists(self.simulations_dir):
mkdir(self.simulations_dir)
if not exists(self.job_dir):
mkdir(self.job_dir)
# Create Process' directories and their subdir
for i in self.processes_dir:
if not exists(i):
mkdir(i)
# The following automatically create directories in the correct Process
if not exists(self.equilibration_dir):
mkdir(self.equilibration_dir)
if not exists(self.eq_dump_dir):
mkdir(self.eq_dump_dir)
if not exists(self.production_dir):
mkdir(self.production_dir)
if not exists(self.prod_dump_dir):
mkdir(self.prod_dump_dir)
if self.electrostatic_equilibration:
if not exists(self.magnetization_dir):
mkdir(self.magnetization_dir)
if not exists(self.mag_dump_dir):
mkdir(self.mag_dump_dir)
if self.preprocessing:
if not exists(self.preprocessing_dir):
mkdir(self.preprocessing_dir)
if not exists(self.postprocessing_dir):
mkdir(self.postprocessing_dir)
def file_header(self):
"""Create the log file and print the figlet if not a restart run."""
if not self.restart:
with open(self.log_file, "w+") as f_log:
figlet_obj = Figlet(font="starwars")
print(figlet_obj.renderText("Sarkas"), file=f_log)
print("An open-source pure-Python molecular dynamics suite for non-ideal plasmas.", file=f_log)
# Print figlet to screen if verbose
if self.verbose:
self.screen_figlet()
def simulation_summary(self, simulation):
"""
Print out to file a summary of simulation's parameters.
If verbose output then it will print twice: the first time to file and second time to screen.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Simulation's parameters
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
if simulation.parameters.load_method in ["production_restart", "prod_restart"]:
print("\n\n--------------------------- Production Restart -------------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["equilibration_restart", "eq_restart"]:
print("\n\n------------------------ Equilibration Restart ----------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["magnetization_restart", "mag_restart"]:
print("\n\n------------------------ Magnetization Restart ----------------------------------")
self.time_info(simulation)
elif self.process == "postprocessing":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"PostProcessing directory: \n{self.postprocessing_dir}")
print(f"\nEquilibration dumps directory: {self.eq_dump_dir}")
print(f"Production dumps directory: \n{self.prod_dump_dir}")
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
else:
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"\nEquilibration dumps directory: \n", {self.eq_dump_dir})
print(f"Production dumps directory: \n", {self.prod_dump_dir})
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
print("\nPARTICLES:")
print("Total No. of particles = ", simulation.parameters.total_num_ptcls)
for isp, sp in enumerate(simulation.species):
if sp.name == "electron_background":
sp_index = isp
print("No. of species = ", len(simulation.species[:isp]))
for isp, sp in enumerate(simulation.species):
if sp.name != "electron_background":
print("Species ID: {}".format(isp))
sp.pretty_print(simulation.potential.type, simulation.parameters.units)
# Parameters Info
simulation.parameters.pretty_print()
# Potential Info
simulation.potential.pretty_print()
# Integrator
simulation.integrator.pretty_print()
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def time_stamp(self, time_stamp, t):
"""
Print out to screen elapsed times. If verbose output, print to file first and then to screen.
Parameters
----------
time_stamp : str
Array of time stamps.
t : float
Elapsed time.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if "Potential Initialization" in time_stamp:
print("\n\n{:-^70} \n".format("Initialization Times"))
if t_hrs == 0 and t_min == 0 and t_sec <= 2:
print(f"\n{time_stamp} Time: {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec")
else:
print(f"\n{time_stamp} Time: {int(t_hrs)} hrs {int(t_min)} min {int(t_sec)} sec")
repeat -= 1
sys.stdout = screen
f_log.close()
def timing_study(self, simulation):
"""
Info specific for timing study.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the info to print.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
print("\n\n------------ Conclusion ------------\n")
print("Suggested Mesh = [ {} , {} , {} ]".format(*simulation.potential.pppm_mesh))
print(
"Suggested Ewald parameter alpha = {:2.4f} / a_ws = {:1.6e} ".format(
simulation.potential.pppm_alpha_ewald * simulation.parameters.a_ws,
simulation.potential.pppm_alpha_ewald,
),
end="",
)
print("[1/cm]" if simulation.parameters.units == "cgs" else "[1/m]")
print(
"Suggested rcut = {:2.4f} a_ws = {:.6e} ".format(
simulation.potential.rc / simulation.parameters.a_ws, simulation.potential.rc
),
end="",
)
print("[cm]" if simulation.parameters.units == "cgs" else "[m]")
self.algorithm_info(simulation)
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def preprocess_sizing(self, sizes):
"""Print the estimated file sizes."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
print("\n\n{:=^70} \n".format(" Filesize Estimates "))
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 0])
print("\nEquilibration:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
if self.electrostatic_equilibration:
print("\nMagnetization:\n")
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 0])
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 0])
print("\nProduction:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[:, 1].sum())
print(
"\nTotal minimum needed space: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
repeat -= 1
sys.stdout = screen
f_log.close()
def preprocess_timing(self, str_id, t, loops):
"""Print times estimates of simulation to file first and then to screen if verbose."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if str_id == "header":
print("\n\n{:=^70} \n".format(" Times Estimates "))
elif str_id == "GF":
print(
"Optimal Green's Function Time: \n"
"{} min {} sec {} msec {} usec {} nsec \n".format(
int(t_min), int(t_sec), int(t_msec), int(t_usec), int(t_nsec)
)
)
elif str_id in ["PP", "PM", "FMM"]:
print(f"Time of {str_id} acceleration calculation averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
elif str_id in ["Equilibration", "Magnetization", "Production"]:
print(f"Time of a single {str_id} step averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
if str_id == "Production":
print("\n\n{:-^70} \n".format(" Total Estimated Times "))
repeat -= 1
sys.stdout = screen
f_log.close()
def postprocess_info(self, simulation, write_to_file=False, observable=None):
"""
Print Post-processing info to file and/or screen in a reader-friendly format.
Parameters
----------
simulation : :class:`sarkas.processes.PostProcess`
PostProcess class.
write_to_file : bool
Flag for printing info also to file. Default= False.
observable : str
Observable whose info to print. Default = None.
Choices = ['header','rdf', 'ccf', 'dsf', 'ssf', 'vm']
"""
choices = ["header", "rdf", "ccf", "dsf", "ssf", "vd"]
msg = (
"Observable not defined. \n "
"Please choose an observable from this list \n"
"'rdf' = Radial Distribution Function, \n"
"'ccf' = Current Correlation Function, \n"
"'dsf' = Dynamic Structure Function, \n"
"'ssf' = Static Structure Factor, \n"
"'vd' = Velocity Distribution"
)
if observable is None:
raise ValueError(msg)
if observable not in choices:
raise ValueError(msg)
if write_to_file:
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
else:
repeat = 1
while repeat > 0:
if observable == "header":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
elif observable == "rdf":
simulation.rdf.pretty_print()
elif observable == "ssf":
simulation.ssf.pretty_print()
elif observable == "dsf":
simulation.dsf.pretty_print()
elif observable == "ccf":
simulation.ccf.pretty_print()
elif observable == "vd":
simulation.vm.setup(simulation.parameters)
print("\nVelocity Moments:")
print("Maximum no. of moments = {}".format(simulation.vm.max_no_moment))
print("Maximum velocity moment = {}".format(int(2 * simulation.vm.max_no_moment)))
repeat -= 1
if write_to_file:
sys.stdout = screen
if write_to_file:
f_log.close()
@staticmethod
def screen_figlet():
"""
Print a colored figlet of Sarkas to screen.
"""
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# Assume white background in Jupyter Notebook
clr = DARK_COLORS[randint(0, len(DARK_COLORS))]
else:
# Assume dark background in IPython/Python Kernel
clr = LIGHT_COLORS[randint(0, len(LIGHT_COLORS))]
fnt = FONTS[randint(0, len(FONTS))]
print_figlet("\nSarkas\n", font=fnt, colors=clr)
print("\nAn open-source pure-python molecular dynamics suite for non-ideal plasmas.\n\n")
@staticmethod
def time_info(simulation):
"""
Print time simulation's parameters.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the timing info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release.\n" "Use Integrator.pretty_print()",
category=DeprecationWarning,
)
simulation.integrator.pretty_print()
@staticmethod
def algorithm_info(simulation):
"""
Print algorithm information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the algorithm info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.method_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.method_pretty_print()
@staticmethod
def potential_info(simulation):
"""
Print potential information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the potential info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.pot_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.pot_pretty_print(simulation.potential)
def copy_params(self, params):
"""
Copy necessary parameters.
Parameters
----------
params: :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
self.dt = params.dt
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
self.species_names = params.species_names.copy()
self.coupling = params.coupling_constant * params.T_desired
def setup_checkpoint(self, params):
"""
Assign attributes needed for saving dumps.
Parameters
----------
params : :class:`sarkas.core.Parameters`
General simulation parameters.
species : :class:`sarkas.plasma.Species`
List of Species classes.
"""
self.copy_params(params)
# Check whether energy files exist already
if not exists(self.prod_energy_filename):
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.prod_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if not exists(self.eq_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.eq_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if self.electrostatic_equilibration:
if not exists(self.mag_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.mag_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
def save_pickle(self, simulation):
"""
Save all simulations parameters in pickle files.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential", "species"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "wb") as pickle_file:
pickle.dump(simulation.__dict__[fl], pickle_file)
pickle_file.close()
def read_pickle(self, process):
"""
Read pickle files containing all the simulation information.
Parameters
----------
process : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.__dict__[fl] = copy(data)
# Read species
filename = join(self.processes_dir[indx], "species.pickle")
process.species = []
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.species = copy(data)
def read_pickle_single(self, class_to_read: str):
"""
Read the desired pickle file.
Parameters
----------
class_to_read : str
Name of the class to read.
Returns
-------
_copy : cls
Copy of desired class.
"""
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
filename = join(self.processes_dir[indx], class_to_read + ".pickle")
with open(filename, "rb") as pickle_file:
data = pickle.load(pickle_file)
_copy = deepcopy(data)
return _copy
def dump(self, phase, ptcls, it):
"""
Save particles' data to binary file for future restart.
Parameters
----------
phase : str
Simulation phase.
ptcls : :class:`sarkas.particles.Particles`
Particles data.
it : int
Timestep number.
"""
if phase == "production":
ptcls_file = self.prod_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
cntr=ptcls.pbc_cntr,
rdf_hist=ptcls.rdf_hist,
virial=ptcls.virial,
time=tme,
)
energy_file = self.prod_energy_filename
elif phase == "equilibration":
ptcls_file = self.eq_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.eq_energy_filename
elif phase == "magnetization":
ptcls_file = self.mag_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.mag_energy_filename
kinetic_energies, temperatures = ptcls.kinetic_temperature()
potential_energies = ptcls.potential_energies()
# Save Energy data
data = {
"Time": it * self.dt,
"Total Energy": kinetic_energies.sum() + ptcls.potential_energy,
"Total Kinetic Energy": kinetic_energies.sum(),
"Potential Energy": ptcls.potential_energy,
"Total Temperature": ptcls.species_num.transpose() @ temperatures / ptcls.total_num_ptcls,
}
if len(temperatures) > 1:
for sp, kin in enumerate(kinetic_energies):
data[f"{self.species_names[sp]} Kinetic Energy"] = kin
data[f"{self.species_names[sp]} Potential Energy"] = potential_energies[sp]
data[f"{self.species_names[sp]} Temperature"] = temperatures[sp]
with open(energy_file, "a") as f:
w = csv.writer(f)
w.writerow(data.values())
def dump_xyz(self, phase: str = "production"):
"""
Save the XYZ file by reading Sarkas dumps.
Parameters
----------
phase : str
Phase from which to read dumps. 'equilibration' or 'production'.
dump_skip : int
Interval of dumps to skip. Default = 1
"""
if phase == "equilibration":
self.xyz_filename = join(self.equilibration_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.eq_dump_dir
else:
self.xyz_filename = join(self.production_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.prod_dump_dir
f_xyz = open(self.xyz_filename, "w+")
if not hasattr(self, "a_ws"):
params = self.read_pickle_single("parameters")
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
# Rescale constants. This is needed since OVITO has a small number limit.
pscale = 1.0 / self.a_ws
vscale = 1.0 / (self.a_ws * self.total_plasma_frequency)
ascale = 1.0 / (self.a_ws * self.total_plasma_frequency**2)
# Read the list of dumps and sort them in the correct (natural) order
dumps = listdir(dump_dir)
dumps.sort(key=num_sort)
for dump in tqdm(dumps, disable=not self.verbose):
data = self.read_npz(dump_dir, dump)
data["pos_x"] *= pscale
data["pos_y"] *= pscale
data["pos_z"] *= pscale
data["vel_x"] *= vscale
data["vel_y"] *= vscale
data["vel_z"] *= vscale
data["acc_x"] *= ascale
data["acc_y"] *= ascale
data["acc_z"] *= ascale
f_xyz.writelines("{0:d}\n".format(self.total_num_ptcls))
f_xyz.writelines("name x y z vx vy vz ax ay az\n")
savetxt(f_xyz, data, fmt="%s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
f_xyz.close()
@staticmethod
def read_npz(fldr: str, filename: str):
"""
Load particles' data from dumps.
Parameters
----------
fldr : str
Folder containing dumps.
filename: str
Name of the dump file to load.
Returns
-------
struct_array : numpy.ndarray
Structured data array.
"""
file_name = join(fldr, filename)
data = np_load(file_name, allow_pickle=True)
# Dev Notes: the old way of saving the xyz file by
# savetxt(f_xyz, np.c_[data["names"],data["pos"] ....]
# , fmt="%10s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
# was not working, because the columns of np.c_[] all have the same data type <U32
# which is in conflict with the desired fmt. i.e. data["names"] was not recognized as a string.
# So I have to create a new structured array and pass this. I could not think of a more Pythonic way.
struct_array = zeros(
data["names"].size,
dtype=[
("names", "U6"),
("pos_x", float64),
("pos_y", float64),
("pos_z", float64),
("vel_x", float64),
("vel_y", float64),
("vel_z", float64),
("acc_x", float64),
("acc_y", float64),
("acc_z", float64),
],
)
struct_array["names"] = data["names"]
struct_array["pos_x"] = data["pos"][:, 0]
struct_array["pos_y"] = data["pos"][:, 1]
struct_array["pos_z"] = data["pos"][:, 2]
struct_array["vel_x"] = data["vel"][:, 0]
struct_array["vel_y"] = data["vel"][:, 1]
struct_array["vel_z"] = data["vel"][:, 2]
struct_array["acc_x"] = data["acc"][:, 0]
struct_array["acc_y"] = data["acc"][:, 1]
struct_array["acc_z"] = data["acc"][:, 2]
return struct_array
def alpha_to_int(text):
"""Convert strings of numbers into integers.
Parameters
----------
text : str
Text to be converted into an int, if `text` is a number.
Returns
-------
_ : int, str
Integral number otherwise returns a string.
"""
return int(text) if text.isdigit() else text
def num_sort(text):
"""
Sort strings with numbers inside.
Parameters
----------
text : str
Text to be split into str and int
Returns
-------
: list
List containing text and integers
Notes
-----
Function copied from
https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside.
Originally from http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [alpha_to_int(c) for c in re.split(r"(\d+)", text)]
def convert_bytes(tot_bytes):
"""Convert bytes to human-readable GB, MB, KB.
Parameters
----------
tot_bytes : int
Total number of bytes.
Returns
-------
[GB, MB, KB, rem] : list
Bytes divided into Giga, Mega, Kilo bytes.
"""
GB, rem = divmod(tot_bytes, 1024 * 1024 * 1024)
MB, rem = divmod(rem, 1024 * 1024)
KB, rem = divmod(rem, 1024)
return [GB, MB, KB, rem]
| 312 | 0 | 54 |
f21374e8961ec93ce9e7165adeb1fb7f26b4600c | 4,273 | py | Python | scripts/language_model/conversion_utils/compare_xlnet_pytorch_gluon_model.py | xiaotinghe/gluon-nlp | 3ce9995329fb0d18787019df541d4f229d7c9ded | [
"Apache-2.0"
] | 1 | 2021-06-17T12:59:25.000Z | 2021-06-17T12:59:25.000Z | scripts/language_model/conversion_utils/compare_xlnet_pytorch_gluon_model.py | xiaotinghe/gluon-nlp | 3ce9995329fb0d18787019df541d4f229d7c9ded | [
"Apache-2.0"
] | 3 | 2020-09-01T05:45:57.000Z | 2020-10-22T23:14:20.000Z | scripts/language_model/conversion_utils/compare_xlnet_pytorch_gluon_model.py | xiaotinghe/gluon-nlp | 3ce9995329fb0d18787019df541d4f229d7c9ded | [
"Apache-2.0"
] | 3 | 2021-07-20T07:40:15.000Z | 2021-08-03T08:39:17.000Z | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Script for model comparison between TF and Gluon."""
import argparse
import logging
import os
import sys
import mxnet as mx
import numpy as np
import torch
import gluonnlp as nlp
import transformers
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Comparison script for Tensorflow and GLuon XLNet model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model-name', type=str, required=True,
choices=['xlnet_cased_L-12_H-768_A-12',
'xlnet_cased_L-24_H-1024_A-16'], help='Model name')
parser.add_argument('--gluon-parameter-file', type=str, required=True,
help='gluon parameter file name.')
parser.add_argument('--gluon-vocab-file', type=str, required=True,
help='gluon vocab file corresponding to --gluon_parameter_file.')
parser.add_argument('--debug', action='store_true', help='debugging mode')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if args.debug else logging.INFO)
logging.info(args)
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir)))
from transformer import XLNet
compare_xlnet(args)
| 39.201835 | 96 | 0.674234 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Script for model comparison between TF and Gluon."""
import argparse
import logging
import os
import sys
import mxnet as mx
import numpy as np
import torch
import gluonnlp as nlp
import transformers
def compare_xlnet(args):
batch_size, qlen, mlen = 2, 16, 100
model_p = transformers.XLNetLMHeadModel.from_pretrained(
'xlnet-base-cased'
if args.model_name == 'xlnet_cased_L-12_H-768_A-12' else 'xlnet-large-cased', dropout=0)
model_p.transformer.attentions = False # no change of default
model_p.transformer.output_hidden_states = True
model_p.transformer.mem_len = mlen
if args.model_name == 'xlnet_cased_L-12_H-768_A-12':
kwargs = {
'hidden_size': 3072,
'units': 768,
'activation': 'approx_gelu',
'num_heads': 12,
'num_layers': 12,
'vocab_size': 32000
}
elif args.model_name == 'xlnet_cased_L-24_H-1024_A-16':
kwargs = {
'hidden_size': 4096,
'units': 1024,
'activation': 'approx_gelu',
'num_heads': 16,
'num_layers': 24,
'vocab_size': 32000
}
with open(args.gluon_vocab_file, 'r') as f:
vocab = nlp.Vocab.from_json(f.read())
ctx = mx.cpu()
assert kwargs['vocab_size'] == len(vocab)
clamp_len = model_p.transformer.clamp_len if model_p.transformer.clamp_len > 0 else None
model = XLNet(clamp_len=clamp_len, **kwargs)
model.initialize(ctx=ctx)
model.load_parameters(args.gluon_parameter_file, ignore_extra=False)
model.hybridize()
# Computation
mems = model.begin_mems(batch_size, mlen, context=mx.cpu())
x = mx.nd.ones(shape=(batch_size, qlen))
token_types = mx.nd.ones(shape=(batch_size, qlen))
output, new_mems = model(x, token_types, mems)
x_p = torch.tensor(x.asnumpy(), dtype=torch.long)
mems_p = [torch.tensor(mems_i.transpose((1, 0, 2)).asnumpy()) for mems_i in mems]
token_types_p = torch.tensor(token_types.asnumpy(), dtype=torch.long)
output_p, new_mems_p, hids_p = model_p(x_p, token_type_ids=token_types_p, mems=mems_p)
for i in range(kwargs['num_layers']):
a, b = new_mems[i][:, -qlen:].asnumpy(), hids_p[i].detach().numpy()
assert np.all(np.isclose(a, b, atol=1e-5))
assert np.all(np.isclose(output.asnumpy(), output_p.detach().numpy(), atol=5e-5))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Comparison script for Tensorflow and GLuon XLNet model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model-name', type=str, required=True,
choices=['xlnet_cased_L-12_H-768_A-12',
'xlnet_cased_L-24_H-1024_A-16'], help='Model name')
parser.add_argument('--gluon-parameter-file', type=str, required=True,
help='gluon parameter file name.')
parser.add_argument('--gluon-vocab-file', type=str, required=True,
help='gluon vocab file corresponding to --gluon_parameter_file.')
parser.add_argument('--debug', action='store_true', help='debugging mode')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if args.debug else logging.INFO)
logging.info(args)
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir)))
from transformer import XLNet
compare_xlnet(args)
| 2,169 | 0 | 23 |
388e640585fa2c9a76ef6f70de8d309b86953deb | 1,374 | py | Python | dopplerr/tasks/tests/test_download_subtitles.py | Stibbons/sonarr-sub-downloader-docker | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 9 | 2018-04-27T18:49:31.000Z | 2020-01-29T08:23:26.000Z | dopplerr/tasks/tests/test_download_subtitles.py | Stibbons/sonarr-sub-downloader-docker | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 7 | 2017-05-31T16:38:40.000Z | 2017-06-05T12:06:48.000Z | dopplerr/tasks/tests/test_download_subtitles.py | Stibbons/subdlsrv | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 3 | 2018-04-22T08:40:29.000Z | 2018-08-19T00:41:25.000Z | # coding: utf-8
# Standard Libraries
import unittest
from pathlib import Path
# Dopplerr
from dopplerr.tasks.download_subtitles import DownloadSubtitleTask
# Todo:
# glob test of "The.Series.Name.S07E06.720p.BluRay.DD5.1.x264-EbP-Obfuscated"
| 37.135135 | 98 | 0.679039 | # coding: utf-8
# Standard Libraries
import unittest
from pathlib import Path
# Dopplerr
from dopplerr.tasks.download_subtitles import DownloadSubtitleTask
class TestGlob(unittest.TestCase):
def assert_list_size(self, lst, size):
if len(lst) != size:
self.fail("list size should be {}, is {} : {}".format(size, len(lst), ", ".join(lst)))
def test_glob_simple_filename(self):
downloader = DownloadSubtitleTask()
found = downloader.search_file(Path(__file__).parent / "vectors", "videofile.mp4")
self.assert_list_size(found, 4)
found = sorted(found)
self.assertIn("/a_subfolder/prepended-videofile.mp4", found[0])
self.assertIn("/a_subfolder/videofile-suffixed.mp4", found[1])
self.assertIn("/a_subfolder/videofile.mp4", found[2])
self.assertIn("/videofile.mp4", found[3])
def test_glob_filename_with_bracket(self):
downloader = DownloadSubtitleTask()
found = downloader.search_file(Path(__file__).parent / "vectors", "complex[name].mkv")
self.assert_list_size(found, 2)
found = sorted(found)
self.assertIn("vectors/a_subfolder/complex[name].mkv", found[0])
self.assertIn("vectors/complex[name][withanothersuffix].mkv", found[1])
# Todo:
# glob test of "The.Series.Name.S07E06.720p.BluRay.DD5.1.x264-EbP-Obfuscated"
| 1,003 | 13 | 104 |
cb86658971cb216bdc9560ee683f36441b257034 | 27 | py | Python | tests/syntax/backslash_instead_of_slash.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 287 | 2019-04-08T13:18:29.000Z | 2021-03-14T19:10:21.000Z | tests/syntax/backslash_instead_of_slash.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 191 | 2019-04-08T14:39:18.000Z | 2021-03-14T22:14:56.000Z | tests/syntax/backslash_instead_of_slash.py | matan-h/friendly | 3ab0fc6541c837271e8865e247750007acdd18fb | [
"MIT"
] | 9 | 2019-04-08T12:54:08.000Z | 2020-11-20T02:26:27.000Z | a = 3 \ 4.0
print('hello')
| 9 | 14 | 0.518519 | a = 3 \ 4.0
print('hello')
| 0 | 0 | 0 |
cf4d043a2f6b91fa9adba7629ae93b1a128f31e7 | 806 | py | Python | asyncspotify/object.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 7 | 2020-06-16T21:24:42.000Z | 2022-03-10T20:23:29.000Z | asyncspotify/object.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 13 | 2020-03-22T12:07:04.000Z | 2021-08-15T19:06:57.000Z | asyncspotify/object.py | minibox24/asyncspotify | 3767cf19cf598fb179883cffd878e2440c16a57c | [
"MIT"
] | 5 | 2020-03-22T18:21:55.000Z | 2021-10-03T06:30:30.000Z | class SpotifyObject:
'''
Represents a generic Spotify Object.
Attributes
----------
id: str
Spotify ID of the object.
name: str
Name of the object.
uri: str
Spotify URI of the object.
'''
_type = None
@property
| 18.744186 | 43 | 0.632754 | class SpotifyObject:
'''
Represents a generic Spotify Object.
Attributes
----------
id: str
Spotify ID of the object.
name: str
Name of the object.
uri: str
Spotify URI of the object.
'''
_type = None
def __init__(self, client, data):
self._client = client
self.id = data.pop('id', None)
self.name = data.pop('name', None)
self.href = data.pop('href', None)
self.uri = data.pop('uri', None)
@property
def type(self):
return self._type
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
repr = self.__class__.__name__
if self.id is not None:
repr += ' id=\'{}\''.format(self.id)
if self.name is not None:
repr += ' name=\'{}\''.format(self.name)
return '<%s>' % repr
| 451 | 0 | 119 |
c3c65355a7596d90458cda447d86bf73b1d7d9b3 | 413 | py | Python | app.py | nick-korres/city_guess_api | ac7cbf8d2f710c6e85d3070e30ce7e259f8a79b0 | [
"MIT"
] | null | null | null | app.py | nick-korres/city_guess_api | ac7cbf8d2f710c6e85d3070e30ce7e259f8a79b0 | [
"MIT"
] | null | null | null | app.py | nick-korres/city_guess_api | ac7cbf8d2f710c6e85d3070e30ce7e259f8a79b0 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
app = Flask(__name__, static_url_path='/static')
app.config.from_object('config.DevConfig')
CORS(app)
ma = Marshmallow(app)
db = SQLAlchemy(app)
from routes import images
app.register_blueprint(images)
from dbSetup import Setup
if __name__ == '__main__':
app.run()
| 17.956522 | 48 | 0.782082 | from flask import Flask
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
app = Flask(__name__, static_url_path='/static')
app.config.from_object('config.DevConfig')
CORS(app)
ma = Marshmallow(app)
db = SQLAlchemy(app)
from routes import images
app.register_blueprint(images)
from dbSetup import Setup
if __name__ == '__main__':
app.run()
| 0 | 0 | 0 |
a725160631e3fffc2ebff7fa6c999c1d751a9a8b | 3,192 | py | Python | python3/koans/about_scoring_project.py | JosephKiranBabu/python3-koans | 26e5ac5ab3821d5a8e9d32776ac3d65150938d0f | [
"MIT"
] | 1 | 2020-12-17T19:09:58.000Z | 2020-12-17T19:09:58.000Z | python3/koans/about_scoring_project.py | JosephKiranBabu/python3-koans | 26e5ac5ab3821d5a8e9d32776ac3d65150938d0f | [
"MIT"
] | null | null | null | python3/koans/about_scoring_project.py | JosephKiranBabu/python3-koans | 26e5ac5ab3821d5a8e9d32776ac3d65150938d0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
# Test cases start from here
| 29.284404 | 79 | 0.622807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
def add_scores(): return sum(map(score_1, dice))
if len(dice) == 0:
return 0
elif len(dice) < 3:
return add_scores()
else:
n = has_3_identical(dice)
def calc_sum(remainder):
if n == 1:
return 1000 + remainder
else:
return n*100 + remainder
if n != 0:
if has_all_identical(dice):
return calc_sum((len(dice) - 3)*score_1(dice[0]))
else:
return calc_sum(sum(map(score_1, remove_3(dice, n))))
else:
return add_scores()
def score_1(i): return {1: 100, 5: 50}.get(i, 0)
def has_3_identical(seq):
for i in range(1,7):
if len(list(filter(lambda n: n == i, seq))) >= 3:
return i
return 0
def remove_3(seq, elem):
for _ in range(3): seq.remove(elem)
return seq
def has_all_identical(seq):
for i in range(1,len(seq)):
if seq[i-1] != seq[1]: return False
return True
# Test cases start from here
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1,5,5,1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2,3,4,6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1,1,1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2,2,2]))
self.assertEqual(300, score([3,3,3]))
self.assertEqual(400, score([4,4,4]))
self.assertEqual(500, score([5,5,5]))
self.assertEqual(600, score([6,6,6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2,5,2,2,3]))
self.assertEqual(550, score([5,5,5,5]))
self.assertEqual(1150, score([1,1,1,5,1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1,2,2,2]))
self.assertEqual(350, score([1,5,2,2,2]))
| 1,888 | 11 | 379 |
3124a8905edfb540210abb3d2bbcb440586edbeb | 796 | py | Python | Matrices.py | huisaluisqj/Castor | 7a41f2ff8196f90e59d8b1e3da56e7b2a2cdac5d | [
"MIT"
] | null | null | null | Matrices.py | huisaluisqj/Castor | 7a41f2ff8196f90e59d8b1e3da56e7b2a2cdac5d | [
"MIT"
] | null | null | null | Matrices.py | huisaluisqj/Castor | 7a41f2ff8196f90e59d8b1e3da56e7b2a2cdac5d | [
"MIT"
] | 2 | 2020-08-12T00:45:07.000Z | 2020-08-12T02:42:54.000Z | # Función de generación de matrices de clases y características
| 21.513514 | 66 | 0.636935 | # Función de generación de matrices de clases y características
def generateMatrice(data, K_mer, k):
# Variables
X = []
y = []
# Generar diccionario K-mer
X_dict = {}
for i, e in enumerate(K_mer): X_dict[e] = 0;
# Generar X (atributos de matriz)
for d in data:
x = []
x_dict = X_dict.copy()
# Contar ocurrencias de K-mer (con superposición)
for i in range(0, len(d[1]) - k + 1, 1):
try: x_dict[d[1][i:i + k]] = x_dict[d[1][i:i + k]] + 1;
except: pass
# Obtener todas las ocurrencias del diccionario
for value in x_dict:
x.append(x_dict.get(value))
X.append(x)
# Genera y (clase Matrix) si existe un archivo csv
if len(data[0]) == 3:
for i in data: y.append(i[2])
# Retornar matrices X e y (atributos de matriz y clase de matriz)
return X, y
| 705 | 0 | 22 |
9125cd0fc3e838e23d42032ebe3f4ad39140e1fb | 7,245 | py | Python | ProjectFireQuake/assets/dash_plot.py | MariannaMangat/FireQuake | 0a1d2162d9ce60bc6e53f956f0e0d8a39008ac36 | [
"MIT"
] | 1 | 2018-10-03T05:17:43.000Z | 2018-10-03T05:17:43.000Z | ProjectFireQuake/assets/dash_plot.py | MariannaMangat/Firequake | 0a1d2162d9ce60bc6e53f956f0e0d8a39008ac36 | [
"MIT"
] | null | null | null | ProjectFireQuake/assets/dash_plot.py | MariannaMangat/Firequake | 0a1d2162d9ce60bc6e53f956f0e0d8a39008ac36 | [
"MIT"
] | 2 | 2018-12-05T18:58:41.000Z | 2020-06-30T12:12:51.000Z | # import dash
# import dash_core_components as dcc
# import dash_html_components as html
# import pandas as pd
# import plotly.graph_objs as go
# from dash.dependencies import Input, Output
# import requests, json
# # df = pd.read_csv(
# # 'wiki_fires_cleaned_2015-2018.csv')
# year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# # print(type(year_data))
# df = pd.read_json(year_data.content)
# print(year_data)
# app = dash.Dash()
# app.layout = html.Div([
# dcc.Graph(id = 'graph-with-slider'),
# dcc.Slider(
# id = 'year-slider',
# min = df['Fire Year'].min(),
# max = df['Fire Year'].max(),
# value = df['Fire Year'].min(),
# step = None,
# marks = {str(Year): str(Year) for Year in df['Fire Year'].unique()}
# )
# ])
# @app.callback(
# dash.dependencies.Output('graph-with-slider', 'figure'),
# [dash.dependencies.Input('year-slider', 'value')])
# def update_figure(selected_year):
# filtered_df = df[df["Fire Year"] == selected_year]
# traces = []
# for i in filtered_df.County.unique():
# df_by_county = filtered_df[filtered_df['County'] == i]
# traces.append(go.Scatter(
# x = df_by_county['Number of Days'],
# y = df_by_county['Acres Burned'],
# text = f"{i}, {selected_year}",
# mode = 'markers',
# opacity = 0.7,
# marker = {
# 'size': 15,
# 'line': {'width': 0.5, 'color': 'white'}
# },
# name = i
# ))
# return {
# 'data': traces,
# 'layout': go.Layout(
# xaxis = {'type': 'linear', 'title': 'Number of Days'},
# yaxis = {'title': 'Acres Burned', 'range': [0, 30000]},
# margin = {'l': 40, 'b': 40, 't': 10, 'r': 10},
# hovermode = 'closest'
# )
# }
# if __name__ == '__main__':
# app.run_server()
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import requests, json
# df = pd.read_csv(
# 'wiki_fires_cleaned_2015-2018.csv')
year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# print(type(year_data))
df = pd.read_json(year_data.content)
app = dash.Dash()
app.layout = html.Div([
html.Div([
dcc.Graph(id='graph-with-slider',
hoverData={'points':[{'customdata':"San Bernardino"}] })
], style={'width': '49%', 'height': '550', 'display': 'inline-block', 'padding': '0.20'}),
html.Div([
dcc.Graph(id='x-time-series'),
dcc.Graph(id='y-time-series'),
], style={'display': 'inline-block', 'width': '49%', 'height':'550'}),
html.Div(
dcc.Slider(
id='year-slider',
min=df['Fire Year'].min(),
max=df['Fire Year'].max(),
value=df['Fire Year'].min(),
step=None,
marks={str(Year): str(Year) for Year in df['Fire Year'].unique()}
), style={'width': '49%', 'padding':'0px 20px 20px 20px'})
])
@app.callback(
dash.dependencies.Output('graph-with-slider', 'figure'),
[dash.dependencies.Input('year-slider', 'value')])
@app.callback(
dash.dependencies.Output('x-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
@app.callback(
dash.dependencies.Output('y-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
if __name__ == '__main__':
app.run_server() | 32.782805 | 103 | 0.529745 | # import dash
# import dash_core_components as dcc
# import dash_html_components as html
# import pandas as pd
# import plotly.graph_objs as go
# from dash.dependencies import Input, Output
# import requests, json
# # df = pd.read_csv(
# # 'wiki_fires_cleaned_2015-2018.csv')
# year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# # print(type(year_data))
# df = pd.read_json(year_data.content)
# print(year_data)
# app = dash.Dash()
# app.layout = html.Div([
# dcc.Graph(id = 'graph-with-slider'),
# dcc.Slider(
# id = 'year-slider',
# min = df['Fire Year'].min(),
# max = df['Fire Year'].max(),
# value = df['Fire Year'].min(),
# step = None,
# marks = {str(Year): str(Year) for Year in df['Fire Year'].unique()}
# )
# ])
# @app.callback(
# dash.dependencies.Output('graph-with-slider', 'figure'),
# [dash.dependencies.Input('year-slider', 'value')])
# def update_figure(selected_year):
# filtered_df = df[df["Fire Year"] == selected_year]
# traces = []
# for i in filtered_df.County.unique():
# df_by_county = filtered_df[filtered_df['County'] == i]
# traces.append(go.Scatter(
# x = df_by_county['Number of Days'],
# y = df_by_county['Acres Burned'],
# text = f"{i}, {selected_year}",
# mode = 'markers',
# opacity = 0.7,
# marker = {
# 'size': 15,
# 'line': {'width': 0.5, 'color': 'white'}
# },
# name = i
# ))
# return {
# 'data': traces,
# 'layout': go.Layout(
# xaxis = {'type': 'linear', 'title': 'Number of Days'},
# yaxis = {'title': 'Acres Burned', 'range': [0, 30000]},
# margin = {'l': 40, 'b': 40, 't': 10, 'r': 10},
# hovermode = 'closest'
# )
# }
# if __name__ == '__main__':
# app.run_server()
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import requests, json
# df = pd.read_csv(
# 'wiki_fires_cleaned_2015-2018.csv')
year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# print(type(year_data))
df = pd.read_json(year_data.content)
app = dash.Dash()
app.layout = html.Div([
html.Div([
dcc.Graph(id='graph-with-slider',
hoverData={'points':[{'customdata':"San Bernardino"}] })
], style={'width': '49%', 'height': '550', 'display': 'inline-block', 'padding': '0.20'}),
html.Div([
dcc.Graph(id='x-time-series'),
dcc.Graph(id='y-time-series'),
], style={'display': 'inline-block', 'width': '49%', 'height':'550'}),
html.Div(
dcc.Slider(
id='year-slider',
min=df['Fire Year'].min(),
max=df['Fire Year'].max(),
value=df['Fire Year'].min(),
step=None,
marks={str(Year): str(Year) for Year in df['Fire Year'].unique()}
), style={'width': '49%', 'padding':'0px 20px 20px 20px'})
])
@app.callback(
dash.dependencies.Output('graph-with-slider', 'figure'),
[dash.dependencies.Input('year-slider', 'value')])
def update_figure(selected_year):
dff = df[df['Fire Year'] == selected_year]
traces = []
# create_time_series(filtered_df,title)
for i in dff.County.unique():
df_by_county = dff[dff['County'] == i]
traces.append(go.Scatter(
x=df_by_county['Number of Days'],
y= df_by_county['Acres Burned'],
customdata=df_by_county['County'],
text= f"{i}, {selected_year}",
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': go.Layout(
xaxis={'type': 'linear', 'title': 'Number of Days'},
yaxis={'title': 'Acres Burned', 'range': [0, 30000]},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
hovermode='closest'
)
}
def create_time_series(tracesdf, title):
return {
'data': [go.Scatter(
x=tracesdf['traces_year'],
y=tracesdf['traces1'],
mode='lines+markers'
)],
'layout': {
'height': 225,
'margin':{'l': 20, 'b':30, 'r':10, 't':10},
'annotations': [{
'x': 0, 'y':0.85, 'xanchor':'left', 'yanchor':'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': title
}],
'yaxis': {'type': 'linear'},
'xaxis': {'showgrid': False, 'title': 'Fire Year','range': [2014,2019]}
}
}
@app.callback(
dash.dependencies.Output('x-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
def update_y_timeseries(hoverData, selected_year):
traces1 = []
# print(hoverData)
county_name = hoverData['points'][0]['customdata']
# dff = df[df['Year'] == selected_year]
dff = df[df['County'] == county_name]
print(dff)
# for i in dff.County.unique():
for i in (dff["Fire Year"].unique()):
df_by_year = dff[dff['Fire Year'] == i]
max_acres = df_by_year['Acres Burned'].max()
traces1.append(max_acres)
# print(traces1)
print(traces1)
title = '<b>{}</b><br>{}'.format(county_name, 'Acres')
print(title)
tracesyear = dff['Fire Year'].unique()
d = {'traces_year': tracesyear, 'traces1': traces1}
tracesdf = pd.DataFrame(d)
print(tracesdf)
return create_time_series(tracesdf, title)
@app.callback(
dash.dependencies.Output('y-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
def update_x_timeseries(hoverData, selected_year):
traces1 = []
county_name = hoverData['points'][0]['customdata']
# dff = df[df['Year'] == selected_year]
dff = df[df['County'] == county_name]
for i in (dff['Fire Year'].unique()):
df_by_year = dff[dff['Fire Year'] == i]
number_days = df_by_year['Number of Days'].max()
traces1.append(number_days)
# print(traces1)
print(traces1)
tracesyear = dff['Fire Year'].unique()
d = {'traces_year': tracesyear, 'traces1': traces1}
tracesdf = pd.DataFrame(d)
print(tracesdf)
title = '<b>{}</b><br>{}'.format(county_name, 'Days')
print(title)
return create_time_series(tracesdf, title)
if __name__ == '__main__':
app.run_server() | 3,242 | 0 | 96 |
e08dbfa0b08b84419e4f2dcf2847e0cada947e96 | 6,375 | py | Python | experiments/tests/tests.py | Socrats/beelbe | fcd051ea9385660d2cfd40e5d53c5bf6f13636a6 | [
"Apache-2.0"
] | null | null | null | experiments/tests/tests.py | Socrats/beelbe | fcd051ea9385660d2cfd40e5d53c5bf6f13636a6 | [
"Apache-2.0"
] | null | null | null | experiments/tests/tests.py | Socrats/beelbe | fcd051ea9385660d2cfd40e5d53c5bf6f13636a6 | [
"Apache-2.0"
] | null | null | null | from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils import timezone
import experiments.utils as utils
from experiments.models import RequestMonitor, Player, Experiment, Treatment, Session
from experiments.views import login_view, game_view, finish_round_view
# Create your tests here.
def add_session_to_request(request):
"""Annotate a request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
| 38.173653 | 118 | 0.640471 | from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils import timezone
import experiments.utils as utils
from experiments.models import RequestMonitor, Player, Experiment, Treatment, Session
from experiments.views import login_view, game_view, finish_round_view
# Create your tests here.
class RegisteredParticipantModel(TestCase):
pass
class PlayerModelTests(TestCase):
pass
class RequestMonitorModelTests(TestCase):
def test_var_updates_correctly(self):
pass
def test_check_condition(self):
pass
def test_wait(self):
pass
def test_signal(self):
pass
def add_session_to_request(request):
"""Annotate a request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
class LogInViewTests(TestCase):
def test_login_participant(self):
pass
def test_not_participant_cant_login(self):
pass
def test_participant_cant_be_loggedin_twice(self):
pass
def test_restrict_participant_login_ip(self):
pass
def test_player_logged_in(self):
pass
def test_player_state(self):
pass
class GameViewTests(TestCase):
def setUp(self):
# set up DB state
game_loader = utils.GamesLoader(
'./experiments/db_data/games.json')
game_loader.load()
experiment_loader = utils.ExperimentsLoader(
'./experiments/db_data/experiments.json')
experiment_loader.load()
participants_loader = utils.ParticipantsLoader(
'./experiments/db_data/participants.json')
participants_loader.load()
run_now_loader = utils.RunLoader(
'./experiments/db_data/runnow.json')
run_now_loader.load()
# print(RegisteredParticipant.objects.all().values_list('password', flat=True))
# Every test needs access to the request factory
self.factory = RequestFactory()
# We set the session id first
self.experiment = Experiment.objects.get(experiment_name="Collective-Risk Experiment")
self.treatment = Treatment.objects.get(experiment=self.experiment,
treatment_name="Treatment 1")
self.session = Session.objects.get(experiment=self.experiment, treatment=self.treatment,
session_number=1)
self.login_reverse = reverse('experiments:login', kwargs={'session_id': self.session.id})
self.requests = {}
def test_participants_sync(self):
"""
Participants at game_view (state S2)
Participants should be redirected to wait after their move, until all members of the group
have finished. Then, they should be redirected to the results_view (S3).
"""
# First we initialize the experiment
utils.init_experiment()
# Then we setup the participants
passwords = ['testa', 'testb', 'testc', 'testd']
for psswd in passwords:
request = self.factory.post(self.login_reverse, {'password': psswd})
add_session_to_request(request)
login_view(request, session_id=self.session.id)
self.requests[request.session['user_id']] = request
# Now we assign groups
num_groups = utils.assign_groups2players(self.experiment.id, self.session.id)
# Let's get a pointer to the players on db
players = Player.objects.all()
# Create monitors for each group of players
for i in range(num_groups):
monitor1 = RequestMonitor(name="monitorS2g{}".format(i + 1), group=(i + 1))
monitor2 = RequestMonitor(name="monitorS3g{}".format(i + 1), group=(i + 1))
monitor1.save()
monitor2.save()
# Call game_view
# # Create the request
for player in players:
request = self.factory.get(reverse('experiments:game', kwargs={'session_id': self.session.id}))
add_session_to_request(request)
request.session = self.requests[player.id].session
# get to the game
game_view(request, session_id=self.session.id)
# make the action to be redirected to wait
request = self.factory.post(reverse('experiments:game', kwargs={'session_id': self.session.id}),
{'action': 2, 'time_round_start': timezone.now().isoformat(),
'time_round_end': timezone.now().isoformat(),
'time_elapsed': timezone.now().time().isoformat()
})
add_session_to_request(request)
request.session = self.requests[player.id].session
response = finish_round_view(request, session_id=self.session.id)
self.assertEqual(response.status_code, 200)
self.assertContains(response, text="<p>Wait until all members of the group have made their choice.</p>",
status_code=200)
class ResultsViewTests(TestCase):
def setUp(self):
# Every test needs access to the request factory
self.factory = RequestFactory()
# We set the session id first
self.experiment_id = 1
self.session_id = 1
self.login_reverse = reverse('experiments:login', kwargs={'session_id': self.session_id})
# generate participants
passwords = ["testa", "testb", "testc"]
for psswd in passwords:
request = self.factory.post(self.login_reverse, {'password': psswd})
add_session_to_request(request)
login_view(request, session_id=self.session_id)
def test_participants_sync(self):
"""
Participants at results_view (state S3). If the game is finished (last round) they should be redirected to the
survey when they press continue. Else, they should be redirected to WAIT until all members of the group
have finished. Then, they should be redirected to the game_view (S2).
"""
# First we initialize the experiment
utils.init_experiment()
# Then we setup the participants
self.setUp()
| 2,145 | 3,226 | 405 |
67382531729b61a696d40d490aadde39f69ffbf0 | 730 | py | Python | xoeuf/cli/server.py | merchise-autrement/xoeuf | 583a0faa345480e73110d467203eefd142b0a710 | [
"BSD-3-Clause"
] | 3 | 2015-05-16T04:40:14.000Z | 2016-01-26T05:36:20.000Z | xoeuf/cli/server.py | merchise-autrement/xoeuf | 583a0faa345480e73110d467203eefd142b0a710 | [
"BSD-3-Clause"
] | null | null | null | xoeuf/cli/server.py | merchise-autrement/xoeuf | 583a0faa345480e73110d467203eefd142b0a710 | [
"BSD-3-Clause"
] | 1 | 2017-03-23T23:08:50.000Z | 2017-03-23T23:08:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
"""An example of an application that use :mod:`xoeuf.cli`.
It behaves similar to "openerp-server" script. This module does not provide any
external facilities, but uses :func:`xotl.tools.cli.app.main` to run the
OpenERP server. Usage::
$ python server.py [options...]
"""
if __name__ == "__main__":
server()
| 24.333333 | 79 | 0.632877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
"""An example of an application that use :mod:`xoeuf.cli`.
It behaves similar to "openerp-server" script. This module does not provide any
external facilities, but uses :func:`xotl.tools.cli.app.main` to run the
OpenERP server. Usage::
$ python server.py [options...]
"""
def server():
from xoeuf.cli import DEFAULT_COMMAND
from xotl.tools.cli.app import main
main(default=DEFAULT_COMMAND)
if __name__ == "__main__":
server()
| 109 | 0 | 23 |
e4bfeb3e23d955a6c65e9f90b941d39dd92f5f1d | 2,959 | py | Python | backend/pollaris/app/migrations/0007_drop_fks.py | Elizabeth-Warren/pollaris | 153ca4297ceed1ac1685ca80c5890384800cfa8a | [
"MIT"
] | 78 | 2020-03-27T14:49:11.000Z | 2021-11-15T10:24:11.000Z | backend/pollaris/app/migrations/0007_drop_fks.py | Elizabeth-Warren/pollaris | 153ca4297ceed1ac1685ca80c5890384800cfa8a | [
"MIT"
] | 10 | 2020-06-06T01:47:56.000Z | 2022-02-27T23:34:30.000Z | backend/pollaris/app/migrations/0007_drop_fks.py | Elizabeth-Warren/pollaris | 153ca4297ceed1ac1685ca80c5890384800cfa8a | [
"MIT"
] | 14 | 2020-03-27T17:36:30.000Z | 2020-05-21T04:50:07.000Z | # Generated by Django 2.2.6 on 2019-12-12 22:24
import django.db.models.deletion
from django.db import migrations, models
| 32.877778 | 61 | 0.532612 | # Generated by Django 2.2.6 on 2019-12-12 22:24
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("app", "0006_nullable_fields_location")]
operations = [
migrations.AlterField(
model_name="precincttodropboxlocation",
name="dropbox_location",
field=models.ForeignKey(
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.DropboxLocation",
),
),
migrations.AlterField(
model_name="precincttodropboxlocation",
name="precinct",
field=models.ForeignKey(
db_column="van_precinct_id",
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.Precinct",
),
),
migrations.AlterField(
model_name="precincttoevlocation",
name="early_vote_location",
field=models.ForeignKey(
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.EarlyVoteLocation",
),
),
migrations.AlterField(
model_name="precincttoevlocation",
name="precinct",
field=models.ForeignKey(
db_column="van_precinct_id",
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.Precinct",
),
),
migrations.AlterField(
model_name="precincttopollinglocation",
name="polling_location",
field=models.ForeignKey(
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.PollingLocation",
),
),
migrations.AlterField(
model_name="precincttopollinglocation",
name="precinct",
field=models.ForeignKey(
db_column="van_precinct_id",
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.Precinct",
),
),
migrations.AlterField(
model_name="streetsegment",
name="precinct",
field=models.ForeignKey(
db_column="van_precinct_id",
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.Precinct",
),
),
migrations.AlterField(
model_name="zip9toprecinct",
name="precinct",
field=models.ForeignKey(
db_column="van_precinct_id",
db_constraint=False,
on_delete=django.db.models.deletion.CASCADE,
to="app.Precinct",
),
),
]
| 0 | 2,812 | 23 |
918fcefb7037542c10d27b6f662cacfc4d637c78 | 16,227 | py | Python | vo/orb/pattern.py | yangzuyuanhao/vin | 0f485fcb6bf53341898142021b6a1295a0ca87a3 | [
"MIT"
] | 1 | 2022-03-23T01:13:57.000Z | 2022-03-23T01:13:57.000Z | vo/orb/pattern.py | yangzuyuanhao/vin | 0f485fcb6bf53341898142021b6a1295a0ca87a3 | [
"MIT"
] | null | null | null | vo/orb/pattern.py | yangzuyuanhao/vin | 0f485fcb6bf53341898142021b6a1295a0ca87a3 | [
"MIT"
] | null | null | null |
bit_pattern_31 = \
[
8,-3, 9,5, #/*mean (0), correlation (0)*/,
4,2, 7,-12, #/*mean (1.12461e-05), correlation (0.0437584)*/,
-11,9, -8,2, #/*mean (3.37382e-05), correlation (0.0617409)*/,
7,-12, 12,-13, #/*mean (5.62303e-05), correlation (0.0636977)*/,
2,-13, 2,12, #/*mean (0.000134953), correlation (0.085099)*/,
1,-7, 1,6, #/*mean (0.000528565), correlation (0.0857175)*/,
-2,-10, -2,-4, #/*mean (0.0188821), correlation (0.0985774)*/,
-13,-13, -11,-8, #/*mean (0.0363135), correlation (0.0899616)*/,
-13,-3, -12,-9, #/*mean (0.121806), correlation (0.099849)*/,
10,4, 11,9, #/*mean (0.122065), correlation (0.093285)*/,
-13,-8, -8,-9, #/*mean (0.162787), correlation (0.0942748)*/,
-11,7, -9,12, #/*mean (0.21561), correlation (0.0974438)*/,
7,7, 12,6, #/*mean (0.160583), correlation (0.130064)*/,
-4,-5, -3,0, #/*mean (0.228171), correlation (0.132998)*/,
-13,2, -12,-3, #/*mean (0.00997526), correlation (0.145926)*/,
-9,0, -7,5, #/*mean (0.198234), correlation (0.143636)*/,
12,-6, 12,-1, #/*mean (0.0676226), correlation (0.16689)*/,
-3,6, -2,12, #/*mean (0.166847), correlation (0.171682)*/,
-6,-13, -4,-8, #/*mean (0.101215), correlation (0.179716)*/,
11,-13, 12,-8, #/*mean (0.200641), correlation (0.192279)*/,
4,7, 5,1, #/*mean (0.205106), correlation (0.186848)*/,
5,-3, 10,-3, #/*mean (0.234908), correlation (0.192319)*/,
3,-7, 6,12, #/*mean (0.0709964), correlation (0.210872)*/,
-8,-7, -6,-2, #/*mean (0.0939834), correlation (0.212589)*/,
-2,11, -1,-10, #/*mean (0.127778), correlation (0.20866)*/,
-13,12, -8,10, #/*mean (0.14783), correlation (0.206356)*/,
-7,3, -5,-3, #/*mean (0.182141), correlation (0.198942)*/,
-4,2, -3,7, #/*mean (0.188237), correlation (0.21384)*/,
-10,-12, -6,11, #/*mean (0.14865), correlation (0.23571)*/,
5,-12, 6,-7, #/*mean (0.222312), correlation (0.23324)*/,
5,-6, 7,-1, #/*mean (0.229082), correlation (0.23389)*/,
1,0, 4,-5, #/*mean (0.241577), correlation (0.215286)*/,
9,11, 11,-13, #/*mean (0.00338507), correlation (0.251373)*/,
4,7, 4,12, #/*mean (0.131005), correlation (0.257622)*/,
2,-1, 4,4, #/*mean (0.152755), correlation (0.255205)*/,
-4,-12, -2,7, #/*mean (0.182771), correlation (0.244867)*/,
-8,-5, -7,-10, #/*mean (0.186898), correlation (0.23901)*/,
4,11, 9,12, #/*mean (0.226226), correlation (0.258255)*/,
0,-8, 1,-13, #/*mean (0.0897886), correlation (0.274827)*/,
-13,-2, -8,2, #/*mean (0.148774), correlation (0.28065)*/,
-3,-2, -2,3, #/*mean (0.153048), correlation (0.283063)*/,
-6,9, -4,-9, #/*mean (0.169523), correlation (0.278248)*/,
8,12, 10,7, #/*mean (0.225337), correlation (0.282851)*/,
0,9, 1,3, #/*mean (0.226687), correlation (0.278734)*/,
7,-5, 11,-10, #/*mean (0.00693882), correlation (0.305161)*/,
-13,-6, -11,0, #/*mean (0.0227283), correlation (0.300181)*/,
10,7, 12,1, #/*mean (0.125517), correlation (0.31089)*/,
-6,-3, -6,12, #/*mean (0.131748), correlation (0.312779)*/,
10,-9, 12,-4, #/*mean (0.144827), correlation (0.292797)*/,
-13,8, -8,-12, #/*mean (0.149202), correlation (0.308918)*/,
-13,0, -8,-4, #/*mean (0.160909), correlation (0.310013)*/,
3,3, 7,8, #/*mean (0.177755), correlation (0.309394)*/,
5,7, 10,-7, #/*mean (0.212337), correlation (0.310315)*/,
-1,7, 1,-12, #/*mean (0.214429), correlation (0.311933)*/,
3,-10, 5,6, #/*mean (0.235807), correlation (0.313104)*/,
2,-4, 3,-10, #/*mean (0.00494827), correlation (0.344948)*/,
-13,0, -13,5, #/*mean (0.0549145), correlation (0.344675)*/,
-13,-7, -12,12, #/*mean (0.103385), correlation (0.342715)*/,
-13,3, -11,8, #/*mean (0.134222), correlation (0.322922)*/,
-7,12, -4,7, #/*mean (0.153284), correlation (0.337061)*/,
6,-10, 12,8, #/*mean (0.154881), correlation (0.329257)*/,
-9,-1, -7,-6, #/*mean (0.200967), correlation (0.33312)*/,
-2,-5, 0,12, #/*mean (0.201518), correlation (0.340635)*/,
-12,5, -7,5, #/*mean (0.207805), correlation (0.335631)*/,
3,-10, 8,-13, #/*mean (0.224438), correlation (0.34504)*/,
-7,-7, -4,5, #/*mean (0.239361), correlation (0.338053)*/,
-3,-2, -1,-7, #/*mean (0.240744), correlation (0.344322)*/,
2,9, 5,-11, #/*mean (0.242949), correlation (0.34145)*/,
-11,-13, -5,-13, #/*mean (0.244028), correlation (0.336861)*/,
-1,6, 0,-1, #/*mean (0.247571), correlation (0.343684)*/,
5,-3, 5,2, #/*mean (0.000697256), correlation (0.357265)*/,
-4,-13, -4,12, #/*mean (0.00213675), correlation (0.373827)*/,
-9,-6, -9,6, #/*mean (0.0126856), correlation (0.373938)*/,
-12,-10, -8,-4, #/*mean (0.0152497), correlation (0.364237)*/,
10,2, 12,-3, #/*mean (0.0299933), correlation (0.345292)*/,
7,12, 12,12, #/*mean (0.0307242), correlation (0.366299)*/,
-7,-13, -6,5, #/*mean (0.0534975), correlation (0.368357)*/,
-4,9, -3,4, #/*mean (0.099865), correlation (0.372276)*/,
7,-1, 12,2, #/*mean (0.117083), correlation (0.364529)*/,
-7,6, -5,1, #/*mean (0.126125), correlation (0.369606)*/,
-13,11, -12,5, #/*mean (0.130364), correlation (0.358502)*/,
-3,7, -2,-6, #/*mean (0.131691), correlation (0.375531)*/,
7,-8, 12,-7, #/*mean (0.160166), correlation (0.379508)*/,
-13,-7, -11,-12, #/*mean (0.167848), correlation (0.353343)*/,
1,-3, 12,12, #/*mean (0.183378), correlation (0.371916)*/,
2,-6, 3,0, #/*mean (0.228711), correlation (0.371761)*/,
-4,3, -2,-13, #/*mean (0.247211), correlation (0.364063)*/,
-1,-13, 1,9, #/*mean (0.249325), correlation (0.378139)*/,
7,1, 8,-6, #/*mean (0.000652272), correlation (0.411682)*/,
1,-1, 3,12, #/*mean (0.00248538), correlation (0.392988)*/,
9,1, 12,6, #/*mean (0.0206815), correlation (0.386106)*/,
-1,-9, -1,3, #/*mean (0.0364485), correlation (0.410752)*/,
-13,-13, -10,5, #/*mean (0.0376068), correlation (0.398374)*/,
7,7, 10,12, #/*mean (0.0424202), correlation (0.405663)*/,
12,-5, 12,9, #/*mean (0.0942645), correlation (0.410422)*/,
6,3, 7,11, #/*mean (0.1074), correlation (0.413224)*/,
5,-13, 6,10, #/*mean (0.109256), correlation (0.408646)*/,
2,-12, 2,3, #/*mean (0.131691), correlation (0.416076)*/,
3,8, 4,-6, #/*mean (0.165081), correlation (0.417569)*/,
2,6, 12,-13, #/*mean (0.171874), correlation (0.408471)*/,
9,-12, 10,3, #/*mean (0.175146), correlation (0.41296)*/,
-8,4, -7,9, #/*mean (0.183682), correlation (0.402956)*/,
-11,12, -4,-6, #/*mean (0.184672), correlation (0.416125)*/,
1,12, 2,-8, #/*mean (0.191487), correlation (0.386696)*/,
6,-9, 7,-4, #/*mean (0.192668), correlation (0.394771)*/,
2,3, 3,-2, #/*mean (0.200157), correlation (0.408303)*/,
6,3, 11,0, #/*mean (0.204588), correlation (0.411762)*/,
3,-3, 8,-8, #/*mean (0.205904), correlation (0.416294)*/,
7,8, 9,3, #/*mean (0.213237), correlation (0.409306)*/,
-11,-5, -6,-4, #/*mean (0.243444), correlation (0.395069)*/,
-10,11, -5,10, #/*mean (0.247672), correlation (0.413392)*/,
-5,-8, -3,12, #/*mean (0.24774), correlation (0.411416)*/,
-10,5, -9,0, #/*mean (0.00213675), correlation (0.454003)*/,
8,-1, 12,-6, #/*mean (0.0293635), correlation (0.455368)*/,
4,-6, 6,-11, #/*mean (0.0404971), correlation (0.457393)*/,
-10,12, -8,7, #/*mean (0.0481107), correlation (0.448364)*/,
4,-2, 6,7, #/*mean (0.050641), correlation (0.455019)*/,
-2,0, -2,12, #/*mean (0.0525978), correlation (0.44338)*/,
-5,-8, -5,2, #/*mean (0.0629667), correlation (0.457096)*/,
7,-6, 10,12, #/*mean (0.0653846), correlation (0.445623)*/,
-9,-13, -8,-8, #/*mean (0.0858749), correlation (0.449789)*/,
-5,-13, -5,-2, #/*mean (0.122402), correlation (0.450201)*/,
8,-8, 9,-13, #/*mean (0.125416), correlation (0.453224)*/,
-9,-11, -9,0, #/*mean (0.130128), correlation (0.458724)*/,
1,-8, 1,-2, #/*mean (0.132467), correlation (0.440133)*/,
7,-4, 9,1, #/*mean (0.132692), correlation (0.454)*/,
-2,1, -1,-4, #/*mean (0.135695), correlation (0.455739)*/,
11,-6, 12,-11, #/*mean (0.142904), correlation (0.446114)*/,
-12,-9, -6,4, #/*mean (0.146165), correlation (0.451473)*/,
3,7, 7,12, #/*mean (0.147627), correlation (0.456643)*/,
5,5, 10,8, #/*mean (0.152901), correlation (0.455036)*/,
0,-4, 2,8, #/*mean (0.167083), correlation (0.459315)*/,
-9,12, -5,-13, #/*mean (0.173234), correlation (0.454706)*/,
0,7, 2,12, #/*mean (0.18312), correlation (0.433855)*/,
-1,2, 1,7, #/*mean (0.185504), correlation (0.443838)*/,
5,11, 7,-9, #/*mean (0.185706), correlation (0.451123)*/,
3,5, 6,-8, #/*mean (0.188968), correlation (0.455808)*/,
-13,-4, -8,9, #/*mean (0.191667), correlation (0.459128)*/,
-5,9, -3,-3, #/*mean (0.193196), correlation (0.458364)*/,
-4,-7, -3,-12, #/*mean (0.196536), correlation (0.455782)*/,
6,5, 8,0, #/*mean (0.1972), correlation (0.450481)*/,
-7,6, -6,12, #/*mean (0.199438), correlation (0.458156)*/,
-13,6, -5,-2, #/*mean (0.211224), correlation (0.449548)*/,
1,-10, 3,10, #/*mean (0.211718), correlation (0.440606)*/,
4,1, 8,-4, #/*mean (0.213034), correlation (0.443177)*/,
-2,-2, 2,-13, #/*mean (0.234334), correlation (0.455304)*/,
2,-12, 12,12, #/*mean (0.235684), correlation (0.443436)*/,
-2,-13, 0,-6, #/*mean (0.237674), correlation (0.452525)*/,
4,1, 9,3, #/*mean (0.23962), correlation (0.444824)*/,
-6,-10, -3,-5, #/*mean (0.248459), correlation (0.439621)*/,
-3,-13, -1,1, #/*mean (0.249505), correlation (0.456666)*/,
7,5, 12,-11, #/*mean (0.00119208), correlation (0.495466)*/,
4,-2, 5,-7, #/*mean (0.00372245), correlation (0.484214)*/,
-13,9, -9,-5, #/*mean (0.00741116), correlation (0.499854)*/,
7,1, 8,6, #/*mean (0.0208952), correlation (0.499773)*/,
7,-8, 7,6, #/*mean (0.0220085), correlation (0.501609)*/,
-7,-4, -7,1, #/*mean (0.0233806), correlation (0.496568)*/,
-8,11, -7,-8, #/*mean (0.0236505), correlation (0.489719)*/,
-13,6, -12,-8, #/*mean (0.0268781), correlation (0.503487)*/,
2,4, 3,9, #/*mean (0.0323324), correlation (0.501938)*/,
10,-5, 12,3, #/*mean (0.0399235), correlation (0.494029)*/,
-6,-5, -6,7, #/*mean (0.0420153), correlation (0.486579)*/,
8,-3, 9,-8, #/*mean (0.0548021), correlation (0.484237)*/,
2,-12, 2,8, #/*mean (0.0616622), correlation (0.496642)*/,
-11,-2, -10,3, #/*mean (0.0627755), correlation (0.498563)*/,
-12,-13, -7,-9, #/*mean (0.0829622), correlation (0.495491)*/,
-11,0, -10,-5, #/*mean (0.0843342), correlation (0.487146)*/,
5,-3, 11,8, #/*mean (0.0929937), correlation (0.502315)*/,
-2,-13, -1,12, #/*mean (0.113327), correlation (0.48941)*/,
-1,-8, 0,9, #/*mean (0.132119), correlation (0.467268)*/,
-13,-11, -12,-5, #/*mean (0.136269), correlation (0.498771)*/,
-10,-2, -10,11, #/*mean (0.142173), correlation (0.498714)*/,
-3,9, -2,-13, #/*mean (0.144141), correlation (0.491973)*/,
2,-3, 3,2, #/*mean (0.14892), correlation (0.500782)*/,
-9,-13, -4,0, #/*mean (0.150371), correlation (0.498211)*/,
-4,6, -3,-10, #/*mean (0.152159), correlation (0.495547)*/,
-4,12, -2,-7, #/*mean (0.156152), correlation (0.496925)*/,
-6,-11, -4,9, #/*mean (0.15749), correlation (0.499222)*/,
6,-3, 6,11, #/*mean (0.159211), correlation (0.503821)*/,
-13,11, -5,5, #/*mean (0.162427), correlation (0.501907)*/,
11,11, 12,6, #/*mean (0.16652), correlation (0.497632)*/,
7,-5, 12,-2, #/*mean (0.169141), correlation (0.484474)*/,
-1,12, 0,7, #/*mean (0.169456), correlation (0.495339)*/,
-4,-8, -3,-2, #/*mean (0.171457), correlation (0.487251)*/,
-7,1, -6,7, #/*mean (0.175), correlation (0.500024)*/,
-13,-12, -8,-13, #/*mean (0.175866), correlation (0.497523)*/,
-7,-2, -6,-8, #/*mean (0.178273), correlation (0.501854)*/,
-8,5, -6,-9, #/*mean (0.181107), correlation (0.494888)*/,
-5,-1, -4,5, #/*mean (0.190227), correlation (0.482557)*/,
-13,7, -8,10, #/*mean (0.196739), correlation (0.496503)*/,
1,5, 5,-13, #/*mean (0.19973), correlation (0.499759)*/,
1,0, 10,-13, #/*mean (0.204465), correlation (0.49873)*/,
9,12, 10,-1, #/*mean (0.209334), correlation (0.49063)*/,
5,-8, 10,-9, #/*mean (0.211134), correlation (0.503011)*/,
-1,11, 1,-13, #/*mean (0.212), correlation (0.499414)*/,
-9,-3, -6,2, #/*mean (0.212168), correlation (0.480739)*/,
-1,-10, 1,12, #/*mean (0.212731), correlation (0.502523)*/,
-13,1, -8,-10, #/*mean (0.21327), correlation (0.489786)*/,
8,-11, 10,-6, #/*mean (0.214159), correlation (0.488246)*/,
2,-13, 3,-6, #/*mean (0.216993), correlation (0.50287)*/,
7,-13, 12,-9, #/*mean (0.223639), correlation (0.470502)*/,
-10,-10, -5,-7, #/*mean (0.224089), correlation (0.500852)*/,
-10,-8, -8,-13, #/*mean (0.228666), correlation (0.502629)*/,
4,-6, 8,5, #/*mean (0.22906), correlation (0.498305)*/,
3,12, 8,-13, #/*mean (0.233378), correlation (0.503825)*/,
-4,2, -3,-3, #/*mean (0.234323), correlation (0.476692)*/,
5,-13, 10,-12, #/*mean (0.236392), correlation (0.475462)*/,
4,-13, 5,-1, #/*mean (0.236842), correlation (0.504132)*/,
-9,9, -4,3, #/*mean (0.236977), correlation (0.497739)*/,
0,3, 3,-9, #/*mean (0.24314), correlation (0.499398)*/,
-12,1, -6,1, #/*mean (0.243297), correlation (0.489447)*/,
3,2, 4,-8, #/*mean (0.00155196), correlation (0.553496)*/,
-10,-10, -10,9, #/*mean (0.00239541), correlation (0.54297)*/,
8,-13, 12,12, #/*mean (0.0034413), correlation (0.544361)*/,
-8,-12, -6,-5, #/*mean (0.003565), correlation (0.551225)*/,
2,2, 3,7, #/*mean (0.00835583), correlation (0.55285)*/,
10,6, 11,-8, #/*mean (0.00885065), correlation (0.540913)*/,
6,8, 8,-12, #/*mean (0.0101552), correlation (0.551085)*/,
-7,10, -6,5, #/*mean (0.0102227), correlation (0.533635)*/,
-3,-9, -3,9, #/*mean (0.0110211), correlation (0.543121)*/,
-1,-13, -1,5, #/*mean (0.0113473), correlation (0.550173)*/,
-3,-7, -3,4, #/*mean (0.0140913), correlation (0.554774)*/,
-8,-2, -8,3, #/*mean (0.017049), correlation (0.55461)*/,
4,2, 12,12, #/*mean (0.01778), correlation (0.546921)*/,
2,-5, 3,11, #/*mean (0.0224022), correlation (0.549667)*/,
6,-9, 11,-13, #/*mean (0.029161), correlation (0.546295)*/,
3,-1, 7,12, #/*mean (0.0303081), correlation (0.548599)*/,
11,-1, 12,4, #/*mean (0.0355151), correlation (0.523943)*/,
-3,0, -3,6, #/*mean (0.0417904), correlation (0.543395)*/,
4,-11, 4,12, #/*mean (0.0487292), correlation (0.542818)*/,
2,-4, 2,1, #/*mean (0.0575124), correlation (0.554888)*/,
-10,-6, -8,1, #/*mean (0.0594242), correlation (0.544026)*/,
-13,7, -11,1, #/*mean (0.0597391), correlation (0.550524)*/,
-13,12, -11,-13, #/*mean (0.0608974), correlation (0.55383)*/,
6,0, 11,-13, #/*mean (0.065126), correlation (0.552006)*/,
0,-1, 1,4, #/*mean (0.074224), correlation (0.546372)*/,
-13,3, -9,-2, #/*mean (0.0808592), correlation (0.554875)*/,
-9,8, -6,-3, #/*mean (0.0883378), correlation (0.551178)*/,
-13,-6, -8,-2, #/*mean (0.0901035), correlation (0.548446)*/,
5,-9, 8,10, #/*mean (0.0949843), correlation (0.554694)*/,
2,7, 3,-9, #/*mean (0.0994152), correlation (0.550979)*/,
-1,-6, -1,-1, #/*mean (0.10045), correlation (0.552714)*/,
9,5, 11,-2, #/*mean (0.100686), correlation (0.552594)*/,
11,-3, 12,-8, #/*mean (0.101091), correlation (0.532394)*/,
3,0, 3,5, #/*mean (0.101147), correlation (0.525576)*/,
-1,4, 0,10, #/*mean (0.105263), correlation (0.531498)*/,
3,-6, 4,5, #/*mean (0.110785), correlation (0.540491)*/,
-13,0, -10,5, #/*mean (0.112798), correlation (0.536582)*/,
5,8, 12,11, #/*mean (0.114181), correlation (0.555793)*/,
8,9, 9,-6, #/*mean (0.117431), correlation (0.553763)*/,
7,-4, 8,-12, #/*mean (0.118522), correlation (0.553452)*/,
-10,4, -10,9, #/*mean (0.12094), correlation (0.554785)*/,
7,3, 12,4, #/*mean (0.122582), correlation (0.555825)*/,
9,-7, 10,-2, #/*mean (0.124978), correlation (0.549846)*/,
7,0, 12,-2, #/*mean (0.127002), correlation (0.537452)*/,
-1,-6, 0,-11, #/*mean (0.127148), correlation (0.547401)*/
]
| 61.935115 | 68 | 0.542244 |
bit_pattern_31 = \
[
8,-3, 9,5, #/*mean (0), correlation (0)*/,
4,2, 7,-12, #/*mean (1.12461e-05), correlation (0.0437584)*/,
-11,9, -8,2, #/*mean (3.37382e-05), correlation (0.0617409)*/,
7,-12, 12,-13, #/*mean (5.62303e-05), correlation (0.0636977)*/,
2,-13, 2,12, #/*mean (0.000134953), correlation (0.085099)*/,
1,-7, 1,6, #/*mean (0.000528565), correlation (0.0857175)*/,
-2,-10, -2,-4, #/*mean (0.0188821), correlation (0.0985774)*/,
-13,-13, -11,-8, #/*mean (0.0363135), correlation (0.0899616)*/,
-13,-3, -12,-9, #/*mean (0.121806), correlation (0.099849)*/,
10,4, 11,9, #/*mean (0.122065), correlation (0.093285)*/,
-13,-8, -8,-9, #/*mean (0.162787), correlation (0.0942748)*/,
-11,7, -9,12, #/*mean (0.21561), correlation (0.0974438)*/,
7,7, 12,6, #/*mean (0.160583), correlation (0.130064)*/,
-4,-5, -3,0, #/*mean (0.228171), correlation (0.132998)*/,
-13,2, -12,-3, #/*mean (0.00997526), correlation (0.145926)*/,
-9,0, -7,5, #/*mean (0.198234), correlation (0.143636)*/,
12,-6, 12,-1, #/*mean (0.0676226), correlation (0.16689)*/,
-3,6, -2,12, #/*mean (0.166847), correlation (0.171682)*/,
-6,-13, -4,-8, #/*mean (0.101215), correlation (0.179716)*/,
11,-13, 12,-8, #/*mean (0.200641), correlation (0.192279)*/,
4,7, 5,1, #/*mean (0.205106), correlation (0.186848)*/,
5,-3, 10,-3, #/*mean (0.234908), correlation (0.192319)*/,
3,-7, 6,12, #/*mean (0.0709964), correlation (0.210872)*/,
-8,-7, -6,-2, #/*mean (0.0939834), correlation (0.212589)*/,
-2,11, -1,-10, #/*mean (0.127778), correlation (0.20866)*/,
-13,12, -8,10, #/*mean (0.14783), correlation (0.206356)*/,
-7,3, -5,-3, #/*mean (0.182141), correlation (0.198942)*/,
-4,2, -3,7, #/*mean (0.188237), correlation (0.21384)*/,
-10,-12, -6,11, #/*mean (0.14865), correlation (0.23571)*/,
5,-12, 6,-7, #/*mean (0.222312), correlation (0.23324)*/,
5,-6, 7,-1, #/*mean (0.229082), correlation (0.23389)*/,
1,0, 4,-5, #/*mean (0.241577), correlation (0.215286)*/,
9,11, 11,-13, #/*mean (0.00338507), correlation (0.251373)*/,
4,7, 4,12, #/*mean (0.131005), correlation (0.257622)*/,
2,-1, 4,4, #/*mean (0.152755), correlation (0.255205)*/,
-4,-12, -2,7, #/*mean (0.182771), correlation (0.244867)*/,
-8,-5, -7,-10, #/*mean (0.186898), correlation (0.23901)*/,
4,11, 9,12, #/*mean (0.226226), correlation (0.258255)*/,
0,-8, 1,-13, #/*mean (0.0897886), correlation (0.274827)*/,
-13,-2, -8,2, #/*mean (0.148774), correlation (0.28065)*/,
-3,-2, -2,3, #/*mean (0.153048), correlation (0.283063)*/,
-6,9, -4,-9, #/*mean (0.169523), correlation (0.278248)*/,
8,12, 10,7, #/*mean (0.225337), correlation (0.282851)*/,
0,9, 1,3, #/*mean (0.226687), correlation (0.278734)*/,
7,-5, 11,-10, #/*mean (0.00693882), correlation (0.305161)*/,
-13,-6, -11,0, #/*mean (0.0227283), correlation (0.300181)*/,
10,7, 12,1, #/*mean (0.125517), correlation (0.31089)*/,
-6,-3, -6,12, #/*mean (0.131748), correlation (0.312779)*/,
10,-9, 12,-4, #/*mean (0.144827), correlation (0.292797)*/,
-13,8, -8,-12, #/*mean (0.149202), correlation (0.308918)*/,
-13,0, -8,-4, #/*mean (0.160909), correlation (0.310013)*/,
3,3, 7,8, #/*mean (0.177755), correlation (0.309394)*/,
5,7, 10,-7, #/*mean (0.212337), correlation (0.310315)*/,
-1,7, 1,-12, #/*mean (0.214429), correlation (0.311933)*/,
3,-10, 5,6, #/*mean (0.235807), correlation (0.313104)*/,
2,-4, 3,-10, #/*mean (0.00494827), correlation (0.344948)*/,
-13,0, -13,5, #/*mean (0.0549145), correlation (0.344675)*/,
-13,-7, -12,12, #/*mean (0.103385), correlation (0.342715)*/,
-13,3, -11,8, #/*mean (0.134222), correlation (0.322922)*/,
-7,12, -4,7, #/*mean (0.153284), correlation (0.337061)*/,
6,-10, 12,8, #/*mean (0.154881), correlation (0.329257)*/,
-9,-1, -7,-6, #/*mean (0.200967), correlation (0.33312)*/,
-2,-5, 0,12, #/*mean (0.201518), correlation (0.340635)*/,
-12,5, -7,5, #/*mean (0.207805), correlation (0.335631)*/,
3,-10, 8,-13, #/*mean (0.224438), correlation (0.34504)*/,
-7,-7, -4,5, #/*mean (0.239361), correlation (0.338053)*/,
-3,-2, -1,-7, #/*mean (0.240744), correlation (0.344322)*/,
2,9, 5,-11, #/*mean (0.242949), correlation (0.34145)*/,
-11,-13, -5,-13, #/*mean (0.244028), correlation (0.336861)*/,
-1,6, 0,-1, #/*mean (0.247571), correlation (0.343684)*/,
5,-3, 5,2, #/*mean (0.000697256), correlation (0.357265)*/,
-4,-13, -4,12, #/*mean (0.00213675), correlation (0.373827)*/,
-9,-6, -9,6, #/*mean (0.0126856), correlation (0.373938)*/,
-12,-10, -8,-4, #/*mean (0.0152497), correlation (0.364237)*/,
10,2, 12,-3, #/*mean (0.0299933), correlation (0.345292)*/,
7,12, 12,12, #/*mean (0.0307242), correlation (0.366299)*/,
-7,-13, -6,5, #/*mean (0.0534975), correlation (0.368357)*/,
-4,9, -3,4, #/*mean (0.099865), correlation (0.372276)*/,
7,-1, 12,2, #/*mean (0.117083), correlation (0.364529)*/,
-7,6, -5,1, #/*mean (0.126125), correlation (0.369606)*/,
-13,11, -12,5, #/*mean (0.130364), correlation (0.358502)*/,
-3,7, -2,-6, #/*mean (0.131691), correlation (0.375531)*/,
7,-8, 12,-7, #/*mean (0.160166), correlation (0.379508)*/,
-13,-7, -11,-12, #/*mean (0.167848), correlation (0.353343)*/,
1,-3, 12,12, #/*mean (0.183378), correlation (0.371916)*/,
2,-6, 3,0, #/*mean (0.228711), correlation (0.371761)*/,
-4,3, -2,-13, #/*mean (0.247211), correlation (0.364063)*/,
-1,-13, 1,9, #/*mean (0.249325), correlation (0.378139)*/,
7,1, 8,-6, #/*mean (0.000652272), correlation (0.411682)*/,
1,-1, 3,12, #/*mean (0.00248538), correlation (0.392988)*/,
9,1, 12,6, #/*mean (0.0206815), correlation (0.386106)*/,
-1,-9, -1,3, #/*mean (0.0364485), correlation (0.410752)*/,
-13,-13, -10,5, #/*mean (0.0376068), correlation (0.398374)*/,
7,7, 10,12, #/*mean (0.0424202), correlation (0.405663)*/,
12,-5, 12,9, #/*mean (0.0942645), correlation (0.410422)*/,
6,3, 7,11, #/*mean (0.1074), correlation (0.413224)*/,
5,-13, 6,10, #/*mean (0.109256), correlation (0.408646)*/,
2,-12, 2,3, #/*mean (0.131691), correlation (0.416076)*/,
3,8, 4,-6, #/*mean (0.165081), correlation (0.417569)*/,
2,6, 12,-13, #/*mean (0.171874), correlation (0.408471)*/,
9,-12, 10,3, #/*mean (0.175146), correlation (0.41296)*/,
-8,4, -7,9, #/*mean (0.183682), correlation (0.402956)*/,
-11,12, -4,-6, #/*mean (0.184672), correlation (0.416125)*/,
1,12, 2,-8, #/*mean (0.191487), correlation (0.386696)*/,
6,-9, 7,-4, #/*mean (0.192668), correlation (0.394771)*/,
2,3, 3,-2, #/*mean (0.200157), correlation (0.408303)*/,
6,3, 11,0, #/*mean (0.204588), correlation (0.411762)*/,
3,-3, 8,-8, #/*mean (0.205904), correlation (0.416294)*/,
7,8, 9,3, #/*mean (0.213237), correlation (0.409306)*/,
-11,-5, -6,-4, #/*mean (0.243444), correlation (0.395069)*/,
-10,11, -5,10, #/*mean (0.247672), correlation (0.413392)*/,
-5,-8, -3,12, #/*mean (0.24774), correlation (0.411416)*/,
-10,5, -9,0, #/*mean (0.00213675), correlation (0.454003)*/,
8,-1, 12,-6, #/*mean (0.0293635), correlation (0.455368)*/,
4,-6, 6,-11, #/*mean (0.0404971), correlation (0.457393)*/,
-10,12, -8,7, #/*mean (0.0481107), correlation (0.448364)*/,
4,-2, 6,7, #/*mean (0.050641), correlation (0.455019)*/,
-2,0, -2,12, #/*mean (0.0525978), correlation (0.44338)*/,
-5,-8, -5,2, #/*mean (0.0629667), correlation (0.457096)*/,
7,-6, 10,12, #/*mean (0.0653846), correlation (0.445623)*/,
-9,-13, -8,-8, #/*mean (0.0858749), correlation (0.449789)*/,
-5,-13, -5,-2, #/*mean (0.122402), correlation (0.450201)*/,
8,-8, 9,-13, #/*mean (0.125416), correlation (0.453224)*/,
-9,-11, -9,0, #/*mean (0.130128), correlation (0.458724)*/,
1,-8, 1,-2, #/*mean (0.132467), correlation (0.440133)*/,
7,-4, 9,1, #/*mean (0.132692), correlation (0.454)*/,
-2,1, -1,-4, #/*mean (0.135695), correlation (0.455739)*/,
11,-6, 12,-11, #/*mean (0.142904), correlation (0.446114)*/,
-12,-9, -6,4, #/*mean (0.146165), correlation (0.451473)*/,
3,7, 7,12, #/*mean (0.147627), correlation (0.456643)*/,
5,5, 10,8, #/*mean (0.152901), correlation (0.455036)*/,
0,-4, 2,8, #/*mean (0.167083), correlation (0.459315)*/,
-9,12, -5,-13, #/*mean (0.173234), correlation (0.454706)*/,
0,7, 2,12, #/*mean (0.18312), correlation (0.433855)*/,
-1,2, 1,7, #/*mean (0.185504), correlation (0.443838)*/,
5,11, 7,-9, #/*mean (0.185706), correlation (0.451123)*/,
3,5, 6,-8, #/*mean (0.188968), correlation (0.455808)*/,
-13,-4, -8,9, #/*mean (0.191667), correlation (0.459128)*/,
-5,9, -3,-3, #/*mean (0.193196), correlation (0.458364)*/,
-4,-7, -3,-12, #/*mean (0.196536), correlation (0.455782)*/,
6,5, 8,0, #/*mean (0.1972), correlation (0.450481)*/,
-7,6, -6,12, #/*mean (0.199438), correlation (0.458156)*/,
-13,6, -5,-2, #/*mean (0.211224), correlation (0.449548)*/,
1,-10, 3,10, #/*mean (0.211718), correlation (0.440606)*/,
4,1, 8,-4, #/*mean (0.213034), correlation (0.443177)*/,
-2,-2, 2,-13, #/*mean (0.234334), correlation (0.455304)*/,
2,-12, 12,12, #/*mean (0.235684), correlation (0.443436)*/,
-2,-13, 0,-6, #/*mean (0.237674), correlation (0.452525)*/,
4,1, 9,3, #/*mean (0.23962), correlation (0.444824)*/,
-6,-10, -3,-5, #/*mean (0.248459), correlation (0.439621)*/,
-3,-13, -1,1, #/*mean (0.249505), correlation (0.456666)*/,
7,5, 12,-11, #/*mean (0.00119208), correlation (0.495466)*/,
4,-2, 5,-7, #/*mean (0.00372245), correlation (0.484214)*/,
-13,9, -9,-5, #/*mean (0.00741116), correlation (0.499854)*/,
7,1, 8,6, #/*mean (0.0208952), correlation (0.499773)*/,
7,-8, 7,6, #/*mean (0.0220085), correlation (0.501609)*/,
-7,-4, -7,1, #/*mean (0.0233806), correlation (0.496568)*/,
-8,11, -7,-8, #/*mean (0.0236505), correlation (0.489719)*/,
-13,6, -12,-8, #/*mean (0.0268781), correlation (0.503487)*/,
2,4, 3,9, #/*mean (0.0323324), correlation (0.501938)*/,
10,-5, 12,3, #/*mean (0.0399235), correlation (0.494029)*/,
-6,-5, -6,7, #/*mean (0.0420153), correlation (0.486579)*/,
8,-3, 9,-8, #/*mean (0.0548021), correlation (0.484237)*/,
2,-12, 2,8, #/*mean (0.0616622), correlation (0.496642)*/,
-11,-2, -10,3, #/*mean (0.0627755), correlation (0.498563)*/,
-12,-13, -7,-9, #/*mean (0.0829622), correlation (0.495491)*/,
-11,0, -10,-5, #/*mean (0.0843342), correlation (0.487146)*/,
5,-3, 11,8, #/*mean (0.0929937), correlation (0.502315)*/,
-2,-13, -1,12, #/*mean (0.113327), correlation (0.48941)*/,
-1,-8, 0,9, #/*mean (0.132119), correlation (0.467268)*/,
-13,-11, -12,-5, #/*mean (0.136269), correlation (0.498771)*/,
-10,-2, -10,11, #/*mean (0.142173), correlation (0.498714)*/,
-3,9, -2,-13, #/*mean (0.144141), correlation (0.491973)*/,
2,-3, 3,2, #/*mean (0.14892), correlation (0.500782)*/,
-9,-13, -4,0, #/*mean (0.150371), correlation (0.498211)*/,
-4,6, -3,-10, #/*mean (0.152159), correlation (0.495547)*/,
-4,12, -2,-7, #/*mean (0.156152), correlation (0.496925)*/,
-6,-11, -4,9, #/*mean (0.15749), correlation (0.499222)*/,
6,-3, 6,11, #/*mean (0.159211), correlation (0.503821)*/,
-13,11, -5,5, #/*mean (0.162427), correlation (0.501907)*/,
11,11, 12,6, #/*mean (0.16652), correlation (0.497632)*/,
7,-5, 12,-2, #/*mean (0.169141), correlation (0.484474)*/,
-1,12, 0,7, #/*mean (0.169456), correlation (0.495339)*/,
-4,-8, -3,-2, #/*mean (0.171457), correlation (0.487251)*/,
-7,1, -6,7, #/*mean (0.175), correlation (0.500024)*/,
-13,-12, -8,-13, #/*mean (0.175866), correlation (0.497523)*/,
-7,-2, -6,-8, #/*mean (0.178273), correlation (0.501854)*/,
-8,5, -6,-9, #/*mean (0.181107), correlation (0.494888)*/,
-5,-1, -4,5, #/*mean (0.190227), correlation (0.482557)*/,
-13,7, -8,10, #/*mean (0.196739), correlation (0.496503)*/,
1,5, 5,-13, #/*mean (0.19973), correlation (0.499759)*/,
1,0, 10,-13, #/*mean (0.204465), correlation (0.49873)*/,
9,12, 10,-1, #/*mean (0.209334), correlation (0.49063)*/,
5,-8, 10,-9, #/*mean (0.211134), correlation (0.503011)*/,
-1,11, 1,-13, #/*mean (0.212), correlation (0.499414)*/,
-9,-3, -6,2, #/*mean (0.212168), correlation (0.480739)*/,
-1,-10, 1,12, #/*mean (0.212731), correlation (0.502523)*/,
-13,1, -8,-10, #/*mean (0.21327), correlation (0.489786)*/,
8,-11, 10,-6, #/*mean (0.214159), correlation (0.488246)*/,
2,-13, 3,-6, #/*mean (0.216993), correlation (0.50287)*/,
7,-13, 12,-9, #/*mean (0.223639), correlation (0.470502)*/,
-10,-10, -5,-7, #/*mean (0.224089), correlation (0.500852)*/,
-10,-8, -8,-13, #/*mean (0.228666), correlation (0.502629)*/,
4,-6, 8,5, #/*mean (0.22906), correlation (0.498305)*/,
3,12, 8,-13, #/*mean (0.233378), correlation (0.503825)*/,
-4,2, -3,-3, #/*mean (0.234323), correlation (0.476692)*/,
5,-13, 10,-12, #/*mean (0.236392), correlation (0.475462)*/,
4,-13, 5,-1, #/*mean (0.236842), correlation (0.504132)*/,
-9,9, -4,3, #/*mean (0.236977), correlation (0.497739)*/,
0,3, 3,-9, #/*mean (0.24314), correlation (0.499398)*/,
-12,1, -6,1, #/*mean (0.243297), correlation (0.489447)*/,
3,2, 4,-8, #/*mean (0.00155196), correlation (0.553496)*/,
-10,-10, -10,9, #/*mean (0.00239541), correlation (0.54297)*/,
8,-13, 12,12, #/*mean (0.0034413), correlation (0.544361)*/,
-8,-12, -6,-5, #/*mean (0.003565), correlation (0.551225)*/,
2,2, 3,7, #/*mean (0.00835583), correlation (0.55285)*/,
10,6, 11,-8, #/*mean (0.00885065), correlation (0.540913)*/,
6,8, 8,-12, #/*mean (0.0101552), correlation (0.551085)*/,
-7,10, -6,5, #/*mean (0.0102227), correlation (0.533635)*/,
-3,-9, -3,9, #/*mean (0.0110211), correlation (0.543121)*/,
-1,-13, -1,5, #/*mean (0.0113473), correlation (0.550173)*/,
-3,-7, -3,4, #/*mean (0.0140913), correlation (0.554774)*/,
-8,-2, -8,3, #/*mean (0.017049), correlation (0.55461)*/,
4,2, 12,12, #/*mean (0.01778), correlation (0.546921)*/,
2,-5, 3,11, #/*mean (0.0224022), correlation (0.549667)*/,
6,-9, 11,-13, #/*mean (0.029161), correlation (0.546295)*/,
3,-1, 7,12, #/*mean (0.0303081), correlation (0.548599)*/,
11,-1, 12,4, #/*mean (0.0355151), correlation (0.523943)*/,
-3,0, -3,6, #/*mean (0.0417904), correlation (0.543395)*/,
4,-11, 4,12, #/*mean (0.0487292), correlation (0.542818)*/,
2,-4, 2,1, #/*mean (0.0575124), correlation (0.554888)*/,
-10,-6, -8,1, #/*mean (0.0594242), correlation (0.544026)*/,
-13,7, -11,1, #/*mean (0.0597391), correlation (0.550524)*/,
-13,12, -11,-13, #/*mean (0.0608974), correlation (0.55383)*/,
6,0, 11,-13, #/*mean (0.065126), correlation (0.552006)*/,
0,-1, 1,4, #/*mean (0.074224), correlation (0.546372)*/,
-13,3, -9,-2, #/*mean (0.0808592), correlation (0.554875)*/,
-9,8, -6,-3, #/*mean (0.0883378), correlation (0.551178)*/,
-13,-6, -8,-2, #/*mean (0.0901035), correlation (0.548446)*/,
5,-9, 8,10, #/*mean (0.0949843), correlation (0.554694)*/,
2,7, 3,-9, #/*mean (0.0994152), correlation (0.550979)*/,
-1,-6, -1,-1, #/*mean (0.10045), correlation (0.552714)*/,
9,5, 11,-2, #/*mean (0.100686), correlation (0.552594)*/,
11,-3, 12,-8, #/*mean (0.101091), correlation (0.532394)*/,
3,0, 3,5, #/*mean (0.101147), correlation (0.525576)*/,
-1,4, 0,10, #/*mean (0.105263), correlation (0.531498)*/,
3,-6, 4,5, #/*mean (0.110785), correlation (0.540491)*/,
-13,0, -10,5, #/*mean (0.112798), correlation (0.536582)*/,
5,8, 12,11, #/*mean (0.114181), correlation (0.555793)*/,
8,9, 9,-6, #/*mean (0.117431), correlation (0.553763)*/,
7,-4, 8,-12, #/*mean (0.118522), correlation (0.553452)*/,
-10,4, -10,9, #/*mean (0.12094), correlation (0.554785)*/,
7,3, 12,4, #/*mean (0.122582), correlation (0.555825)*/,
9,-7, 10,-2, #/*mean (0.124978), correlation (0.549846)*/,
7,0, 12,-2, #/*mean (0.127002), correlation (0.537452)*/,
-1,-6, 0,-11, #/*mean (0.127148), correlation (0.547401)*/
]
| 0 | 0 | 0 |
caf7c306aef08c0285ab718e60cefb0c9fe1db35 | 12,028 | py | Python | scripts/make_dist.py | JamesHutchison/brython | 3beb92bb6125a3e2e96b3e25e8fdac5f73a58871 | [
"BSD-3-Clause"
] | null | null | null | scripts/make_dist.py | JamesHutchison/brython | 3beb92bb6125a3e2e96b3e25e8fdac5f73a58871 | [
"BSD-3-Clause"
] | null | null | null | scripts/make_dist.py | JamesHutchison/brython | 3beb92bb6125a3e2e96b3e25e8fdac5f73a58871 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Script to compact all Brython scripts in a single one."""
import datetime
import os
import re
import sys
import tarfile
import zipfile
import make_static_doc # lint:ok
try:
import slimit
minify = slimit.minify
except ImportError:
minify = slimit = None
# path of parent directory
pdir = os.path.dirname(os.getcwd())
# version info
version = [3, 3, 0, "alpha", 0]
implementation = [3, 0, 3, 'alpha', 0]
abs_path = lambda _pth: os.path.join(os.path.dirname(os.getcwd()), 'src', _pth)
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# update version number
with open(abs_path('version_info.js'), 'wb') as vinfo_file_out:
# implementation[2] = now
vinfo_file_out.write('__BRYTHON__.implementation = %s\n' % implementation)
vinfo_file_out.write('__BRYTHON__.__MAGIC__ = "%s"\n' %
'.'.join(['%s' % _i for _i in implementation[:3]]))
vinfo_file_out.write('__BRYTHON__.version_info = %s\n' % str(version))
vinfo_file_out.write('__BRYTHON__.compiled_date = "%s"\n' % str(datetime.datetime.now()))
# builtin module names = list of scripts in src/libs
vinfo_file_out.write('__BRYTHON__.builtin_module_names = ["posix",')
_modules=['"%s"' % fname.split('.')[0]
for fname in os.listdir(abs_path('libs')) if fname.endswith('.js')]
_modules.sort() #sort modules so that git diff's don't change between runs
vinfo_file_out.write(',\n '.join(_modules))
# add Python scripts in Lib that start with _ and arent found in CPython Lib
# using sys.executable to find stdlib dir doesn't work under linux.
stdlib_path = os.path.dirname(os.__file__)
# stdlib_path = os.path.join(os.path.dirname(sys.executable),'Lib')
stdlib_mods = [f for f in os.listdir(stdlib_path) if f.startswith('_')]
stdlib_mods.sort()
brython_mods = [f for f in os.listdir(abs_path('Lib'))
if f.startswith('_') and f != '__pycache__']
brython_py_builtins = [os.path.splitext(x)[0]
for x in brython_mods if x not in stdlib_mods]
brython_py_builtins.sort()
vinfo_file_out.write(',\n ' + ',\n '.join(
['"%s"' % f for f in brython_py_builtins]))
vinfo_file_out.write(']\n')
#log.info("Finished Writing file: " + abs_path('version_info.js'))
# Create file stdlib_paths.js : static mapping between module names and paths
# in the standard library
libfolder = os.path.join(os.path.dirname(os.getcwd()), 'src')
simple_javascript_template_string = """;(function($B){\n
$B.stdlib = {}
"""
with open(os.path.join(libfolder, 'stdlib_paths.js'), 'wb') as out:
out.write(simple_javascript_template_string)
jspath = os.path.join(libfolder, 'libs')
jslist = []
for dirpath, dirnames, filenames in os.walk(jspath):
for filename in filenames:
if not filename.endswith('.js'):
continue
mod_name = os.path.splitext(filename)[0]
jslist.append(mod_name)
jslist.sort()
out.write("var js=['%s']\n" % "','".join(jslist))
out.write("""for(var i=0;i<js.length;i++) $B.stdlib[js[i]]=['js']\n\n""")
pylist = []
pkglist = []
pypath = os.path.join(libfolder, 'Lib')
for dirpath, dirnames, filenames in os.walk(pypath):
for filename in filenames:
mod_name, ext = os.path.splitext(filename)
if ext != '.py':
continue
path = dirpath[len(pypath)+len(os.sep):].split(os.sep)+[mod_name]
if not path[0]:
path = path[1:]
mod_name = '.'.join(path).lstrip('.')
if filename == '__init__.py':
mod_name = '.'.join(path[:-1]).lstrip('.')
mod_path = 'Lib/'+'/'.join(path)
if filename == '__init__.py':
pkglist.append(mod_name)
else:
pylist.append(mod_name)
pylist.sort()
out.write("var pylist=['%s']\n" % "','".join(pylist))
pkglist.sort()
out.write(
"for(var i=0;i<pylist.length;i++) $B.stdlib[pylist[i]]=['py']\n\n")
out.write("var pkglist=['%s']\n" % "','".join(pkglist))
out.write(
"for(var i=0;i<pkglist.length;i++) $B.stdlib[pkglist[i]]=['py',true]\n")
out.write('})(__BRYTHON__)')
print('static stdlib mapping ok')
# build brython.js from base Javascript files
sources = [
'brython_builtins', 'version_info', 'identifiers_re', 'py2js', 'py_object',
'py_type', 'py_utils', 'py_generator', 'py_builtin_functions', 'py_bytes',
'js_objects', 'stdlib_paths', 'py_import', 'py_float', 'py_int',
'py_complex', 'py_dict', 'py_list', 'py_string', 'py_set', 'py_dom',
'py_import_hooks'
]
res = '// brython.js brython.info\n'
res += '// version %s\n' % version
res += '// implementation %s\n' % implementation
res += '// version compiled from commented, indented source files '
res += 'at github.com/brython-dev/brython\n'
src_size = 0
for fname in sources:
src = open(abs_path(fname)+'.js').read() + '\n'
src_size += len(src)
if minify is not None:
try:
res += minify(src)
except Exception as error:
print(error)
else:
res += custom_minify(src)
res = res.replace('context', 'C')
with open(abs_path('brython.js'), 'wb') as the_brythonjs_file_output:
the_brythonjs_file_output.write(res)
print(('size : originals %s compact %s gain %.2f' %
(src_size, len(res), 100 * (src_size - len(res)) / src_size)))
# version name
vname = '.'.join(str(x) for x in implementation[:3])
if implementation[3] == 'rc':
vname += 'rc%s' % implementation[4]
sys.path.append("scripts")
try:
import make_VFS # isort:skip
except ImportError:
print("Cannot find make_VFS, so we won't make py_VFS.js")
make_VFS = None
sys.exit()
make_VFS.process(os.path.join(pdir, 'src', 'py_VFS.js'))
make_VFS.process_unittest(os.path.join(pdir, 'src', 'py_unittest.js'))
# make distribution with core + libraries
with open(os.path.join(pdir, 'src', 'brython_dist.js'), 'wb') as distrib_file:
distrib_file.write(open(os.path.join(pdir, 'src', 'brython.js')).read())
distrib_file.write(open(os.path.join(pdir, 'src', 'py_VFS.js')).read())
# zip files
dest_dir = os.path.join(pdir, 'dist')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
name = 'Brython%s_site_mirror-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
dist_gz = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
for path in os.listdir(pdir):
if not is_valid(path):
continue
abs_path = os.path.join(pdir, path)
if os.path.isdir(abs_path) and path == "dist":
continue
print(('add', path))
dist_gz.add(os.path.join(pdir, path), arcname=os.path.join(name, path))
dist_gz.close()
dist_zip = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(pdir):
print(dirpath)
for path in filenames:
if not is_valid(path):
continue
abs_path = os.path.join(pdir, dirpath, path)
dist_zip.write(
os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(pdir) + 1:], path))
if 'dist' in dirnames:
dirnames.remove('dist')
if '.hg' in dirnames:
dirnames.remove('.hg')
if '.git' in dirnames:
dirnames.remove('.git')
for dirname in dirnames:
if dirname == 'dist':
continue
dist_zip.close()
print('end of mirror')
# minimum package
name = 'Brython%s-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
dist1 = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
dist2 = tarfile.open(dest_path+'.tar.bz2', mode='w:bz2')
dist3 = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
for arc, wfunc in (dist1, dist1.add), (dist2, dist2.add), (dist3, dist3.write):
for path in 'README.md', 'LICENCE.txt':
wfunc(os.path.join(pdir, path), arcname=os.path.join(name, path))
wfunc(os.path.join(pdir, 'src', 'brython.js'),
arcname=os.path.join(name, 'brython.js'))
base = os.path.join(pdir, 'src')
folders = ('libs', 'Lib')
for folder in folders:
for dirpath, dirnames, filenames in os.walk(os.path.join(base, folder)):
for path in filenames:
if os.path.splitext(path)[1] not in ('.js', '.py'):
continue
print(('add', path, dirpath[len(base):]))
wfunc(os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(base) + 1:], path))
arc.close()
# changelog file
try:
first = 'Changes in Brython version %s.%s.%s' % (
implementation[0], implementation[1], implementation[2])
with open(os.path.join(pdir, 'dist', 'changelog.txt')) as file_to_read:
input_changelog_data_string = file_to_read.read()
with open(os.path.join(pdir, 'dist', 'changelog_%s.txt' % now), 'wb') as ou:
ou.write('%s\n' % first)
ou.write('%s\n\n' % ('=' * len(first)))
ou.write(input_changelog_data_string)
except Exception as error:
print(error)
print("Warning - no changelog file")
| 34.267806 | 93 | 0.565264 | # -*- coding: utf-8 -*-
"""Script to compact all Brython scripts in a single one."""
import datetime
import os
import re
import sys
import tarfile
import zipfile
import make_static_doc # lint:ok
try:
import slimit
minify = slimit.minify
except ImportError:
minify = slimit = None
# path of parent directory
pdir = os.path.dirname(os.getcwd())
# version info
version = [3, 3, 0, "alpha", 0]
implementation = [3, 0, 3, 'alpha', 0]
def custom_minify(src):
_res, pos = '', 0
while pos < len(src):
if src[pos] in ('"', "'"):
# the end of the string is the next quote if it is not
# after an odd number of backslashes
start = pos
while True:
end = src.find(src[pos], start + 1)
if end == -1:
line = src[:pos].count('\n')
raise SyntaxError('string not closed in %s line %s : %s' %
(fname, line, src[pos:pos + 20]))
else:
# count number of backslashes before the quote
nb = 0
while src[end-nb-1] == '\\':
nb += 1
if not nb % 2:
break
else:
start = end+1
_res += src[pos:end+1]
pos = end+1
elif src[pos] == '\r':
pos += 1
elif src[pos] == ' ':
if _res and _res[-1] in '({=[)}];|\n':
pos += 1
continue
_res += ' '
while pos < len(src) and src[pos] == ' ':
pos += 1
elif src[pos:pos + 2] == '//':
end = src.find('\n', pos)
if end == -1:
break
pos = end
elif src[pos:pos + 2] == '/*':
end = src.find('*/', pos)
if end == -1:
break
pos = end+2
elif src[pos] in '={[(' and _res and _res[-1] == ' ':
_res = _res[:-1]+src[pos]
pos += 1
elif src[pos] == ';' and pos < len(src) - 1 and src[pos + 1] in '\r\n':
pos += 1
elif src[pos] in '{[,':
_res += src[pos]
while pos < len(src) - 1 and src[pos + 1] in ' \r\n':
pos += 1
pos += 1
elif src[pos] == '}':
_res += src[pos]
nxt = pos + 1
while nxt < len(src) and src[nxt] in ' \r\n':
nxt += 1
if nxt < len(src) and src[nxt] == '}':
pos = nxt - 1
pos += 1
else:
_res += src[pos]
pos += 1
while '\n\n' in _res:
_res = _res.replace('\n\n', '\n')
return _res
abs_path = lambda _pth: os.path.join(os.path.dirname(os.getcwd()), 'src', _pth)
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# update version number
with open(abs_path('version_info.js'), 'wb') as vinfo_file_out:
# implementation[2] = now
vinfo_file_out.write('__BRYTHON__.implementation = %s\n' % implementation)
vinfo_file_out.write('__BRYTHON__.__MAGIC__ = "%s"\n' %
'.'.join(['%s' % _i for _i in implementation[:3]]))
vinfo_file_out.write('__BRYTHON__.version_info = %s\n' % str(version))
vinfo_file_out.write('__BRYTHON__.compiled_date = "%s"\n' % str(datetime.datetime.now()))
# builtin module names = list of scripts in src/libs
vinfo_file_out.write('__BRYTHON__.builtin_module_names = ["posix",')
_modules=['"%s"' % fname.split('.')[0]
for fname in os.listdir(abs_path('libs')) if fname.endswith('.js')]
_modules.sort() #sort modules so that git diff's don't change between runs
vinfo_file_out.write(',\n '.join(_modules))
# add Python scripts in Lib that start with _ and arent found in CPython Lib
# using sys.executable to find stdlib dir doesn't work under linux.
stdlib_path = os.path.dirname(os.__file__)
# stdlib_path = os.path.join(os.path.dirname(sys.executable),'Lib')
stdlib_mods = [f for f in os.listdir(stdlib_path) if f.startswith('_')]
stdlib_mods.sort()
brython_mods = [f for f in os.listdir(abs_path('Lib'))
if f.startswith('_') and f != '__pycache__']
brython_py_builtins = [os.path.splitext(x)[0]
for x in brython_mods if x not in stdlib_mods]
brython_py_builtins.sort()
vinfo_file_out.write(',\n ' + ',\n '.join(
['"%s"' % f for f in brython_py_builtins]))
vinfo_file_out.write(']\n')
#log.info("Finished Writing file: " + abs_path('version_info.js'))
# Create file stdlib_paths.js : static mapping between module names and paths
# in the standard library
libfolder = os.path.join(os.path.dirname(os.getcwd()), 'src')
simple_javascript_template_string = """;(function($B){\n
$B.stdlib = {}
"""
with open(os.path.join(libfolder, 'stdlib_paths.js'), 'wb') as out:
out.write(simple_javascript_template_string)
jspath = os.path.join(libfolder, 'libs')
jslist = []
for dirpath, dirnames, filenames in os.walk(jspath):
for filename in filenames:
if not filename.endswith('.js'):
continue
mod_name = os.path.splitext(filename)[0]
jslist.append(mod_name)
jslist.sort()
out.write("var js=['%s']\n" % "','".join(jslist))
out.write("""for(var i=0;i<js.length;i++) $B.stdlib[js[i]]=['js']\n\n""")
pylist = []
pkglist = []
pypath = os.path.join(libfolder, 'Lib')
for dirpath, dirnames, filenames in os.walk(pypath):
for filename in filenames:
mod_name, ext = os.path.splitext(filename)
if ext != '.py':
continue
path = dirpath[len(pypath)+len(os.sep):].split(os.sep)+[mod_name]
if not path[0]:
path = path[1:]
mod_name = '.'.join(path).lstrip('.')
if filename == '__init__.py':
mod_name = '.'.join(path[:-1]).lstrip('.')
mod_path = 'Lib/'+'/'.join(path)
if filename == '__init__.py':
pkglist.append(mod_name)
else:
pylist.append(mod_name)
pylist.sort()
out.write("var pylist=['%s']\n" % "','".join(pylist))
pkglist.sort()
out.write(
"for(var i=0;i<pylist.length;i++) $B.stdlib[pylist[i]]=['py']\n\n")
out.write("var pkglist=['%s']\n" % "','".join(pkglist))
out.write(
"for(var i=0;i<pkglist.length;i++) $B.stdlib[pkglist[i]]=['py',true]\n")
out.write('})(__BRYTHON__)')
print('static stdlib mapping ok')
# build brython.js from base Javascript files
sources = [
'brython_builtins', 'version_info', 'identifiers_re', 'py2js', 'py_object',
'py_type', 'py_utils', 'py_generator', 'py_builtin_functions', 'py_bytes',
'js_objects', 'stdlib_paths', 'py_import', 'py_float', 'py_int',
'py_complex', 'py_dict', 'py_list', 'py_string', 'py_set', 'py_dom',
'py_import_hooks'
]
res = '// brython.js brython.info\n'
res += '// version %s\n' % version
res += '// implementation %s\n' % implementation
res += '// version compiled from commented, indented source files '
res += 'at github.com/brython-dev/brython\n'
src_size = 0
for fname in sources:
src = open(abs_path(fname)+'.js').read() + '\n'
src_size += len(src)
if minify is not None:
try:
res += minify(src)
except Exception as error:
print(error)
else:
res += custom_minify(src)
res = res.replace('context', 'C')
with open(abs_path('brython.js'), 'wb') as the_brythonjs_file_output:
the_brythonjs_file_output.write(res)
print(('size : originals %s compact %s gain %.2f' %
(src_size, len(res), 100 * (src_size - len(res)) / src_size)))
# version name
vname = '.'.join(str(x) for x in implementation[:3])
if implementation[3] == 'rc':
vname += 'rc%s' % implementation[4]
sys.path.append("scripts")
try:
import make_VFS # isort:skip
except ImportError:
print("Cannot find make_VFS, so we won't make py_VFS.js")
make_VFS = None
sys.exit()
make_VFS.process(os.path.join(pdir, 'src', 'py_VFS.js'))
make_VFS.process_unittest(os.path.join(pdir, 'src', 'py_unittest.js'))
# make distribution with core + libraries
with open(os.path.join(pdir, 'src', 'brython_dist.js'), 'wb') as distrib_file:
distrib_file.write(open(os.path.join(pdir, 'src', 'brython.js')).read())
distrib_file.write(open(os.path.join(pdir, 'src', 'py_VFS.js')).read())
# zip files
dest_dir = os.path.join(pdir, 'dist')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
name = 'Brython%s_site_mirror-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
def is_valid(filename_path):
if filename_path.startswith('.'):
return False
for extension in ('bat', 'log', 'gz', 'pyc'):
if filename_path.lower().endswith('.%s' % extension):
return False
return True
dist_gz = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
for path in os.listdir(pdir):
if not is_valid(path):
continue
abs_path = os.path.join(pdir, path)
if os.path.isdir(abs_path) and path == "dist":
continue
print(('add', path))
dist_gz.add(os.path.join(pdir, path), arcname=os.path.join(name, path))
dist_gz.close()
dist_zip = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(pdir):
print(dirpath)
for path in filenames:
if not is_valid(path):
continue
abs_path = os.path.join(pdir, dirpath, path)
dist_zip.write(
os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(pdir) + 1:], path))
if 'dist' in dirnames:
dirnames.remove('dist')
if '.hg' in dirnames:
dirnames.remove('.hg')
if '.git' in dirnames:
dirnames.remove('.git')
for dirname in dirnames:
if dirname == 'dist':
continue
dist_zip.close()
print('end of mirror')
# minimum package
name = 'Brython%s-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
dist1 = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
dist2 = tarfile.open(dest_path+'.tar.bz2', mode='w:bz2')
dist3 = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
def is_valid(filename_path):
if filename_path.startswith('.'):
return False
if not filename_path.lower().endswith('.js'):
return False
return True
for arc, wfunc in (dist1, dist1.add), (dist2, dist2.add), (dist3, dist3.write):
for path in 'README.md', 'LICENCE.txt':
wfunc(os.path.join(pdir, path), arcname=os.path.join(name, path))
wfunc(os.path.join(pdir, 'src', 'brython.js'),
arcname=os.path.join(name, 'brython.js'))
base = os.path.join(pdir, 'src')
folders = ('libs', 'Lib')
for folder in folders:
for dirpath, dirnames, filenames in os.walk(os.path.join(base, folder)):
for path in filenames:
if os.path.splitext(path)[1] not in ('.js', '.py'):
continue
print(('add', path, dirpath[len(base):]))
wfunc(os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(base) + 1:], path))
arc.close()
# changelog file
try:
first = 'Changes in Brython version %s.%s.%s' % (
implementation[0], implementation[1], implementation[2])
with open(os.path.join(pdir, 'dist', 'changelog.txt')) as file_to_read:
input_changelog_data_string = file_to_read.read()
with open(os.path.join(pdir, 'dist', 'changelog_%s.txt' % now), 'wb') as ou:
ou.write('%s\n' % first)
ou.write('%s\n\n' % ('=' * len(first)))
ou.write(input_changelog_data_string)
except Exception as error:
print(error)
print("Warning - no changelog file")
| 2,683 | 0 | 69 |
dcef7a43e88bcdbb0838537455cc888ca94069c0 | 1,865 | py | Python | Day14/higher_lower.py | Abubutt/My100DaysOfCode | d049185547f0101f5b97517399efdbbb3a5c6496 | [
"MIT"
] | null | null | null | Day14/higher_lower.py | Abubutt/My100DaysOfCode | d049185547f0101f5b97517399efdbbb3a5c6496 | [
"MIT"
] | null | null | null | Day14/higher_lower.py | Abubutt/My100DaysOfCode | d049185547f0101f5b97517399efdbbb3a5c6496 | [
"MIT"
] | null | null | null | from game_data import data
from art import logo, vs
import random
import os
def compare(choice, curr, compare_against, points, game_is_over):
"""Compares the players choice with the other choice and updates game_is_over, 'Compare A' and points. Returns points and game_is_over."""
if choice == curr and choice["follower_count"] >= compare_against["follower_count"]:
curr = compare_against
points += 1
elif choice == compare_against and choice["follower_count"] >= curr["follower_count"]:
curr = compare_against
points += 1
else:
os.system("clear")
game_is_over = True
return points, game_is_over, curr
def play_game():
"""Starts the game and intializes all variables and print statements. Returns the total amount of points player has recieved."""
points = 0
curr = random.choice(data)
game_is_over = False
while not game_is_over:
os.system("clear")
print(logo)
compare_against = random.choice(data)
while compare_against == curr:
compare_against = random.choice(data)
if points > 0:
print(f"You're right! Current score: {points}.")
print("Compare A: " + curr["name"] + ", a " +
curr["description"] + ", from " + curr["country"]+".")
print("\n" + vs + "\n")
print("Against B: " + compare_against["name"] + ", a " +
compare_against["description"] + ", from " + compare_against["country"]+".")
if input("Who has more followers? Type 'A' or 'B': ") == "A":
choice = curr
else:
choice = compare_against
points, game_is_over, curr = compare(
choice, curr, compare_against, points, game_is_over)
return points
points = play_game()
print(f"{logo} \n Sorry that's wrong, Final Score: {points} \n")
| 36.568627 | 142 | 0.618767 | from game_data import data
from art import logo, vs
import random
import os
def compare(choice, curr, compare_against, points, game_is_over):
"""Compares the players choice with the other choice and updates game_is_over, 'Compare A' and points. Returns points and game_is_over."""
if choice == curr and choice["follower_count"] >= compare_against["follower_count"]:
curr = compare_against
points += 1
elif choice == compare_against and choice["follower_count"] >= curr["follower_count"]:
curr = compare_against
points += 1
else:
os.system("clear")
game_is_over = True
return points, game_is_over, curr
def play_game():
"""Starts the game and intializes all variables and print statements. Returns the total amount of points player has recieved."""
points = 0
curr = random.choice(data)
game_is_over = False
while not game_is_over:
os.system("clear")
print(logo)
compare_against = random.choice(data)
while compare_against == curr:
compare_against = random.choice(data)
if points > 0:
print(f"You're right! Current score: {points}.")
print("Compare A: " + curr["name"] + ", a " +
curr["description"] + ", from " + curr["country"]+".")
print("\n" + vs + "\n")
print("Against B: " + compare_against["name"] + ", a " +
compare_against["description"] + ", from " + compare_against["country"]+".")
if input("Who has more followers? Type 'A' or 'B': ") == "A":
choice = curr
else:
choice = compare_against
points, game_is_over, curr = compare(
choice, curr, compare_against, points, game_is_over)
return points
points = play_game()
print(f"{logo} \n Sorry that's wrong, Final Score: {points} \n")
| 0 | 0 | 0 |
434cc0fcd5f918d5f2bace5cb4ce3a78c86b0742 | 5,815 | py | Python | tests/ex/test_move.py | trishume/VintageousPlus | 1dd62435138234979fe5bb413e1731119b017daf | [
"MIT"
] | 6 | 2017-04-01T05:30:08.000Z | 2017-04-05T14:17:40.000Z | tests/ex/test_move.py | trishume/VintageousPlus | 1dd62435138234979fe5bb413e1731119b017daf | [
"MIT"
] | 1 | 2017-04-04T06:47:13.000Z | 2017-04-04T14:26:32.000Z | tests/ex/test_move.py | trishume/VintageousPlus | 1dd62435138234979fe5bb413e1731119b017daf | [
"MIT"
] | null | null | null | import unittest
from VintageousPlus.vi.utils import modes
from VintageousPlus.state import State
from VintageousPlus.tests import get_sel
from VintageousPlus.tests import first_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.ex_commands import CURRENT_LINE_RANGE
# TODO: test with multiple selections.
| 32.127072 | 86 | 0.622012 | import unittest
from VintageousPlus.vi.utils import modes
from VintageousPlus.state import State
from VintageousPlus.tests import get_sel
from VintageousPlus.tests import first_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.ex_commands import CURRENT_LINE_RANGE
class Test_ex_move_Moving_InNormalMode_SingleLine_DefaultStart(ViewTest):
def testCanMoveDefaultLineRange(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move3'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nabc\nxxx\nabc'
self.assertEqual(expected, actual)
def testCanMoveToEof(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move4'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nabc\nabc\nxxx'
self.assertEqual(expected, actual)
def testCanMoveToBof(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move0'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'xxx\nabc\nabc\nabc'
self.assertEqual(expected, actual)
def testCanMoveToEmptyLine(self):
self.write('abc\nxxx\nabc\n\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move4'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nabc\n\nxxx\nabc'
self.assertEqual(expected, actual)
def testCanMoveToSameLine(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'address': '2'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nxxx\nabc\nabc'
self.assertEqual(expected, actual)
class Test_ex_move_Moveing_InNormalMode_MultipleLines(ViewTest):
def setUp(self):
super().setUp()
self.range = {'left_ref': '.','left_offset': 0, 'left_search_offsets': [],
'right_ref': '.', 'right_offset': 1, 'right_search_offsets': []}
def testCanMoveDefaultLineRange(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move4'})
expected = 'abc\nxxx\nabc\nxxx\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanMoveToEof(self):
self.write('aaa\nxxx\nxxx\naaa\naaa')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move5'})
expected = 'aaa\nxxx\naaa\naaa\nxxx'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanMoveToBof(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move0'})
expected = 'xxx\nabc\nxxx\nabc\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanMoveToEmptyLine(self):
self.write('aaa\nxxx\nxxx\naaa\n\naaa')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move5'})
expected = 'aaa\nxxx\naaa\n\nxxx\naaa'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
@unittest.skip("Not implemented")
def testCanMoveToSameLine(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'address': '2', 'line_range': self.range})
expected = 'abc\nxxx\nxxx\nxxx\nxxx\nabc\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
class Test_ex_move_InNormalMode_CaretPosition(ViewTest):
def testCanRepositionCaret(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_move', {'command_line': 'move3'})
actual = list(self.view.sel())
expected = [self.R((2, 0), (2, 0))]
self.assertEqual(expected, actual)
# TODO: test with multiple selections.
class Test_ex_move_ModeTransition(ViewTest):
def testFromNormalModeToNormalMode(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
state = State(self.view)
state.enter_normal_mode()
self.view.run_command('vi_enter_normal_mode')
prev_mode = state.mode
self.view.run_command('ex_move', {'address': '3'})
state = State(self.view)
new_mode = state.mode
self.assertEqual(prev_mode, new_mode)
def testFromVisualModeToNormalMode(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 1)))
state = State(self.view)
state.enter_visual_mode()
prev_mode = state.mode
self.view.run_command('ex_move', {'command_line': 'move3'})
state = State(self.view)
new_mode = state.mode
self.assertNotEqual(prev_mode, new_mode)
| 4,825 | 352 | 305 |
951d42bbe4c79186cb518c6c939950ac18334399 | 11,340 | py | Python | src/bot.py | plyalyut/Rocket-League-1v1-Agent | 92903b859dd5a1f1d8fe2b39b4b2df51c55a1eac | [
"MIT"
] | 1 | 2019-12-27T12:59:04.000Z | 2019-12-27T12:59:04.000Z | src/bot.py | plyalyut/Rocket-League-1v1-Agent | 92903b859dd5a1f1d8fe2b39b4b2df51c55a1eac | [
"MIT"
] | null | null | null | src/bot.py | plyalyut/Rocket-League-1v1-Agent | 92903b859dd5a1f1d8fe2b39b4b2df51c55a1eac | [
"MIT"
] | null | null | null | import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.orientation import Orientation
from util.vec import Vec3
import numpy as np
import os
| 38.835616 | 143 | 0.596296 | import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.orientation import Orientation
from util.vec import Vec3
import numpy as np
import os
class MyBot(BaseAgent):
def initialize_agent(self):
'''
Initializes the agent and sets up all hyperparamters
:return: No return
'''
# Sets up the controller agent
self.controller_state = SimpleControllerState()
# Initializes all value lists
self.reset_episode_data()
# Sets up all the hyperparameters
self.episode_length = 10
# Creates the model
import tensorflow as tf
self.tf = tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class A2C_Continuous(self.tf.keras.Model):
def __init__(self):
super(A2C_Continuous, self).__init__()
# Actor Network
self.hidden_sz = 100
self.num_actions = 2
self.critic_scale = 0.5
self.act1 = tf.keras.layers.Dense(units=self.hidden_sz, input_shape=(4,), activation="relu")
self.act2 = tf.keras.layers.Dense(units=self.hidden_sz, input_shape=(4,), activation='relu')
self.mean = tf.keras.layers.Dense(self.num_actions, activation="tanh")
self.std = tf.keras.layers.Dense(self.num_actions, activation='sigmoid')
# Critic Network
self.crit1 = tf.keras.layers.Dense(units= self.hidden_sz, activation="relu")
self.hidden_crit = tf.keras.layers.Dense(units = self.hidden_sz/2, activation = 'relu')
self.hidden_crit2 = tf.keras.layers.Dense(units = self.hidden_sz/10, activation = 'relu')
self.crit2 = tf.keras.layers.Dense(1)
# Create optimizer
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
@tf.function
def call(self, states):
'''
Computes the policies from the states
:param states: np.array of (batch_size x num_states)
:return: policies for the states of size (batch_size, num_actions)
'''
mean = self.mean(self.act1(states))
std = self.std(self.act2(states))
return mean, std
@tf.function
def critic(self, states):
'''
Computes the value at each of the states
:param states: np.array of (batch size x num_states)
:return: values at each of the states (batch_size, 1)
'''
return self.crit2( self.hidden_crit2(self.hidden_crit(self.crit1(states))))
@tf.function
def loss(self, states, actions, discounted_rewards):
'''
Computes the loss for a given reward
:param states: a list of states (episode_length, num_states)
:parma actions: all the actions that were taken in the episode (episode_length, num_actions)
:param discounted_rewards: A list of discounted rewards (episode_length, 1)
:return: Loss of both the actor and critic
'''
advantage = tf.cast(tf.cast(tf.reshape(discounted_rewards, (-1, 1)), dtype=tf.float32) - self.critic(states), dtype=tf.float64)
mean, std = self.call(states)
mean = tf.cast(mean, dtype=tf.float64)
std = tf.cast(std, dtype=tf.float64)
actions = tf.squeeze(actions)
# Continuous A2C model
pdf = tf.divide(1, tf.math.sqrt(2. * np.pi * tf.square(std))) * tf.exp(
-tf.divide(tf.square((actions - tf.cast(mean, dtype=tf.float64))), (2. * tf.square(std))))
log_pdf = tf.math.log(pdf + 0.0000001)
actor_loss = -tf.reduce_mean(log_pdf * tf.stop_gradient(advantage))
critic_loss = -tf.reduce_mean(tf.square(advantage))
return actor_loss + self.critic_scale * critic_loss
class A2C(self.tf.keras.Model):
def __init__(self):
super(A2C, self).__init__()
# Actor Network
self.hidden_sz = 32
self.num_actions = 4
self.critic_scale = 0.5
self.act1 = tf.keras.layers.Dense(units=self.hidden_sz, input_shape=(2,), activation="relu")
self.act2 = tf.keras.layers.Dense(self.num_actions, activation='softmax')
# Critic Network
self.crit1 = tf.keras.layers.Dense(units= self.hidden_sz, activation="relu")
self.crit2 = tf.keras.layers.Dense(1)
# Create optimizer
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
@tf.function
def call(self, states):
'''
Computes the policies from the states
:param states: np.array of (batch_size x num_states)
:return: policies for the states of size (batch_size, num_actions)
'''
return self.act2(self.act1(states))
@tf.function
def critic(self, states):
'''
Computes the value at each of the states
:param states: np.array of (batch size x num_states)
:return: values at each of the states (batch_size, 1)
'''
return self.crit2(self.crit1(states))
def loss(self, states, actions, discounted_rewards):
'''
Computes the loss for a given reward
:param states: a list of states (episode_length, num_states)
:parma actions: all the actions that were taken in the episode (episode_length, num_actions)
:param discounted_rewards: A list of discounted rewards (episode_length, 1)
:return: Loss of both the actor and critic
'''
state_values = self.critic(states)
policy = self.call(states)
advantage = tf.cast(discounted_rewards - state_values, dtype=tf.float32)
# The gather_nd is to get the probability of each action that was actually taken in the episode
log_P = tf.math.log(tf.gather_nd(policy, list(zip(np.arange(len(policy)), actions))))
actor_loss = -tf.reduce_sum(log_P * tf.stop_gradient(advantage))
critic_loss = tf.reduce_sum(tf.square(advantage))
return actor_loss + .5 * critic_loss
self.a2c = A2C()
print('Model Successfully Initialized Without any Issue')
def get_reward(self, ball_location, goal_location, player_location):
'''
Reward function for the given play. In this case,
the reward function is just the inverse distance between the
goal and the ball. This means that the reward is higher
whenever the ball is closer to the goal.
:param ball_location: Vec3 location of the ball
:param goal_location: Vec3 location of the goal
:return: Reward, float
'''
#return 1.0/(1+(ball_location-goal_location).length()) + 1/(1+(player_location-goal_location).length())
return 1/(1+(player_location-goal_location).length())
def get_discounted_rewards(self, reward_list, discount_factor):
'''
Computes the discounted rewards for the episode
:param reward_list: list of rewards for the entire play
:return: List of discounted rewards
'''
prev = 0
discounted_rewards = np.copy(reward_list).astype(np.float32)
for i in range(1, len(discounted_rewards) + 1):
discounted_rewards[-i] += prev * discount_factor
prev = discounted_rewards[-i]
return discounted_rewards
def reset_episode_data(self):
'''
Creates new lists that would store all training values.
:return: No return
'''
self.states = []
self.actions = []
self.rewards = []
def convert_v3(self, vec3):
'''
Converts vec3 to a list
:param vec3: Vector representaiton
:return: list of x, y, z coordinates of a vector
'''
return [vec3.x/4096.0, vec3.y/5120.0]
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
'''
Runs the model and captures the states, rewards, actions taken for the episode.
:param packet: GameTickPacket contains all information about boost and such
:return:
'''
# Gets the agent
agent = packet.game_cars[self.index]
# Gets the ball location as a vector
ball_location = Vec3(packet.game_ball.physics.location)
agent_location = Vec3(agent.physics.location)
target_goal_location = self.get_field_info().goals[1-agent.team].location
# State of the ball and car
state = []
#state.extend(self.convert_v3(agent_location-target_goal_location))
state.extend(self.convert_v3(agent_location))
# Generate the action it should take
prob_action = self.a2c.call(np.reshape(np.array(state), [1, 2]))
action = np.random.choice(4, p=np.squeeze(prob_action))
print(action)
# Sets all the controller states
if action%2:
self.controller_state.throttle = 1
else:
self.controller_state.throttle = 0
if int(action/2) == 0:
self.controller_state.steer = 1
else:
self.controller_state.steer = -1
#self.controller_state.throttle = np.clip(action[0][0], -1, 1)
#self.controller_state.steer = np.clip(action[0][1], -1, 1)
# Keep track of all usable information
self.states.append(state)
self.rewards.append(self.get_reward(ball_location, target_goal_location, agent_location))
self.actions.append(action)
if len(self.states) >= self.episode_length:
print('Training')
self.states_copy = np.array(self.states)
self.rewards_copy = np.array(self.rewards)
self.actions_copy = np.array(self.actions)
with self.tf.GradientTape() as tape:
discounted_rewards = self.get_discounted_rewards(self.rewards_copy, 0.99)
loss = self.a2c.loss(self.states_copy, self.actions_copy, discounted_rewards)
draw_debug(self.renderer, np.sum(discounted_rewards), loss)
gradients = tape.gradient(loss, self.a2c.trainable_variables)
self.a2c.optimizer.apply_gradients(zip(gradients, self.a2c.trainable_variables))
self.reset_episode_data()
return self.controller_state
def draw_debug(renderer, reward, loss):
renderer.begin_rendering()
# draw a line from the car to the ball
# print the action that the bot is taking
renderer.draw_string_2d(0, 0, 2, 2, 'Reward: ' + str(reward), renderer.white())
renderer.draw_string_2d(0, 30, 2, 2, 'Loss: ' + str(loss), renderer.white())
renderer.end_rendering()
| 2,118 | 8,925 | 46 |
f08164d6c685cea83082af8d4b89da3f8184a2f9 | 6,262 | py | Python | tests/qtoggleserver/core/expressions/test_comparison.py | DigitEgal/qtoggleserver | 54b6ac53742af9529fd349d4fc207b0dc8a38d3b | [
"Apache-2.0"
] | 12 | 2020-07-26T05:49:25.000Z | 2022-01-08T21:50:44.000Z | tests/qtoggleserver/core/expressions/test_comparison.py | DigitEgal/qtoggleserver | 54b6ac53742af9529fd349d4fc207b0dc8a38d3b | [
"Apache-2.0"
] | 8 | 2020-04-30T18:40:18.000Z | 2020-11-08T21:09:35.000Z | tests/qtoggleserver/core/expressions/test_comparison.py | DigitEgal/qtoggleserver | 54b6ac53742af9529fd349d4fc207b0dc8a38d3b | [
"Apache-2.0"
] | 2 | 2020-02-14T02:52:13.000Z | 2021-04-21T05:13:07.000Z |
import pytest
from qtoggleserver.core.expressions import comparison, Function
from qtoggleserver.core.expressions import InvalidNumberOfArguments
| 31.626263 | 100 | 0.712233 |
import pytest
from qtoggleserver.core.expressions import comparison, Function
from qtoggleserver.core.expressions import InvalidNumberOfArguments
async def test_if_boolean(literal_false, literal_true, literal_one, literal_two):
result = await comparison.IfFunction([literal_false, literal_one, literal_two]).eval(context={})
assert result == 2
result = await comparison.IfFunction([literal_true, literal_one, literal_two]).eval(context={})
assert result == 1
async def test_if_number(literal_zero, literal_one, literal_two):
result = await comparison.IfFunction([literal_zero, literal_one, literal_two]).eval(context={})
assert result == 2
result = await comparison.IfFunction([literal_two, literal_one, literal_two]).eval(context={})
assert result == 1
def test_if_parse():
e = Function.parse(None, 'IF(1, 2, 3)', 0)
assert isinstance(e, comparison.IfFunction)
def test_if_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'IF(1, 2)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'IF(1, 2, 3, 4)', 0)
async def test_eq_boolean(literal_false, literal_true):
result = await comparison.EqFunction([literal_false, literal_true]).eval(context={})
assert result == 0
result = await comparison.EqFunction([literal_false, literal_false]).eval(context={})
assert result == 1
async def test_eq_number(literal_one, literal_two):
result = await comparison.EqFunction([literal_one, literal_two]).eval(context={})
assert result == 0
result = await comparison.EqFunction([literal_two, literal_two]).eval(context={})
assert result == 1
def test_eq_parse():
e = Function.parse(None, 'EQ(1, 2)', 0)
assert isinstance(e, comparison.EqFunction)
def test_eq_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'EQ(1)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'EQ(1, 2, 3)', 0)
async def test_gt_boolean(literal_false, literal_true):
result = await comparison.GTFunction([literal_true, literal_false]).eval(context={})
assert result == 1
result = await comparison.GTFunction([literal_false, literal_false]).eval(context={})
assert result == 0
async def test_gt_number(literal_one, literal_two):
result = await comparison.GTFunction([literal_two, literal_one]).eval(context={})
assert result == 1
result = await comparison.GTFunction([literal_one, literal_two]).eval(context={})
assert result == 0
result = await comparison.GTFunction([literal_two, literal_two]).eval(context={})
assert result == 0
def test_gt_parse():
e = Function.parse(None, 'GT(1, 2)', 0)
assert isinstance(e, comparison.GTFunction)
def test_gt_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'GT(1)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'GT(1, 2, 3)', 0)
async def test_gte_boolean(literal_false, literal_true):
result = await comparison.GTEFunction([literal_true, literal_false]).eval(context={})
assert result == 1
result = await comparison.GTEFunction([literal_false, literal_false]).eval(context={})
assert result == 1
result = await comparison.GTEFunction([literal_false, literal_true]).eval(context={})
assert result == 0
async def test_gte_number(literal_one, literal_two):
result = await comparison.GTEFunction([literal_two, literal_one]).eval(context={})
assert result == 1
result = await comparison.GTEFunction([literal_one, literal_two]).eval(context={})
assert result == 0
result = await comparison.GTEFunction([literal_two, literal_two]).eval(context={})
assert result == 1
def test_gte_parse():
e = Function.parse(None, 'GTE(1, 2)', 0)
assert isinstance(e, comparison.GTEFunction)
def test_gte_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'GTE(1)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'GTE(1, 2, 3)', 0)
async def test_lt_boolean(literal_false, literal_true):
result = await comparison.LTFunction([literal_false, literal_true]).eval(context={})
assert result == 1
result = await comparison.LTFunction([literal_false, literal_false]).eval(context={})
assert result == 0
async def test_lt_number(literal_one, literal_two):
result = await comparison.LTFunction([literal_one, literal_two]).eval(context={})
assert result == 1
result = await comparison.LTFunction([literal_two, literal_one]).eval(context={})
assert result == 0
result = await comparison.LTFunction([literal_two, literal_two]).eval(context={})
assert result == 0
def test_lt_parse():
e = Function.parse(None, 'LT(1, 2)', 0)
assert isinstance(e, comparison.LTFunction)
def test_lt_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'LT(1)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'LT(1, 2, 3)', 0)
async def test_lte_boolean(literal_false, literal_true):
result = await comparison.LTEFunction([literal_false, literal_true]).eval(context={})
assert result == 1
result = await comparison.LTEFunction([literal_false, literal_false]).eval(context={})
assert result == 1
result = await comparison.LTEFunction([literal_true, literal_false]).eval(context={})
assert result == 0
async def test_lte_number(literal_one, literal_two):
result = await comparison.LTEFunction([literal_one, literal_two]).eval(context={})
assert result == 1
result = await comparison.LTEFunction([literal_two, literal_one]).eval(context={})
assert result == 0
result = await comparison.LTEFunction([literal_two, literal_two]).eval(context={})
assert result == 1
def test_lte_parse():
e = Function.parse(None, 'LTE(1, 2)', 0)
assert isinstance(e, comparison.LTEFunction)
def test_lte_num_args():
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'LTE(1)', 0)
with pytest.raises(InvalidNumberOfArguments):
Function.parse(None, 'LTE(1, 2, 3)', 0)
| 5,538 | 0 | 552 |
40107f17e8133f5b16a5930391e91bf5c6691d3b | 14,793 | py | Python | src/python/WMCore/WMSpec/Steps/Templates/CMSSW.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2015-02-05T13:43:46.000Z | 2015-02-05T13:43:46.000Z | src/python/WMCore/WMSpec/Steps/Templates/CMSSW.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2016-10-13T14:57:35.000Z | 2016-10-13T14:57:35.000Z | src/python/WMCore/WMSpec/Steps/Templates/CMSSW.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
_CMSSW_
Template for a CMSSW Step
"""
import pickle
from WMCore.WMSpec.ConfigSectionTree import nodeName
from WMCore.WMSpec.Steps.Template import CoreHelper, Template
class CMSSWStepHelper(CoreHelper):
"""
_CMSSWStepHelper_
Add API calls and helper methods to the basic WMStepHelper to specialise
for CMSSW tasks
"""
def setAcqEra(self, acqEra):
"""
_setAcqEra_
Set the acquisition era attribute for this step.
"""
self.data.output.acqEra = acqEra
def setProcStr(self, procStr):
"""
_setProcStr_
Set the processing string attribute for this step.
"""
self.data.output.procStr = procStr
def setProcVer(self, procVer):
"""
_setProcVer_
Set the processing version era attribute for this step.
"""
self.data.output.procVer = procVer
def getAcqEra(self):
"""
_getAcqEra_
Retrieve the acquisition era for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'acqEra', None)
def getProcStr(self):
"""
_getProcStr_
Retrieve the processing string for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procStr', None)
def getProcVer(self):
"""
_getProcVer_
Retrieve the processing version for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procVer', None)
def setPrepId(self, prepId):
"""
_setPrepId_
Set the prep_id attribute for this step.
"""
self.data.output.prepId = prepId
def getPrepId(self):
"""
_getPrepId_
Retrieve the prep_id for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'prepId', None)
def addOutputModule(self, moduleName, **details):
"""
_addOutputModule_
Add in an output module settings, all default to None unless
the value is provided in details
"""
modules = self.data.output.modules
if getattr(modules, moduleName, None) == None:
modules.section_(moduleName)
module = getattr(modules, moduleName)
for key, value in details.items():
setattr(module, key, value)
return
def listOutputModules(self):
"""
_listOutputModules_
retrieve list of output module names
"""
if hasattr(self.data.output, "modules"):
return self.data.output.modules.dictionary_().keys()
return []
def getOutputModule(self, name):
"""
_getOutputModule_
retrieve the data structure for an output module by name
None if not found
"""
return getattr(self.data.output.modules, name, None)
def setConfigCache(self, url, document, dbName="config_cache"):
"""
_setConfigCache_
Set the information required to retrieve a configuration from
the config cache.
url - base URL for the config cache instance
document - GUID for the config document
dbName - optional, name of the db instance in the couch server
"""
self.data.application.configuration.configCacheUrl = url
self.data.application.configuration.configId = document
self.data.application.configuration.cacheName = dbName
docUrl = "%s/%s/%s" % (url, dbName, document)
self.data.application.configuration.configUrl = docUrl
self.data.application.configuration.retrieveConfigUrl = "%s/configFile" % docUrl
def setDataProcessingConfig(self, scenarioName, functionName, **args):
"""
_setDataProcessingConfig_
Set a configuration library to be used from the CMSSW Release
DataProcessing package.
"""
self.data.application.configuration.scenario = scenarioName
self.data.application.configuration.function = functionName
# assume if this crashes we are dealing with complex data
# which is only supported in new agents that only look
# at pickledarguments anyways
try:
self.data.application.configuration.section_('arguments')
[setattr(self.data.application.configuration.arguments, k, v) for k, v in args.items()]
except Exception:
pass
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def cmsswSetup(self, cmsswVersion, **options):
"""
_cmsswSetup_
Provide setup details for CMSSW.
cmsswVersion - required - version of CMSSW to use
Optional:
scramCommand - defaults to scramv1
scramProject - defaults to CMSSW
scramArch - optional scram architecture, defaults to None
buildArch - optional scram build architecture, defaults to None
softwareEnvironment - setup command to bootstrap scram,defaults to None
"""
self.data.application.setup.cmsswVersion = cmsswVersion
for k, v in options.items():
setattr(self.data.application.setup, k, v)
return
def getScramArch(self):
"""
_getScramArch_
Retrieve the scram architecture used for this step.
"""
return self.data.application.setup.scramArch
def getCMSSWVersion(self):
"""
_getCMSSWVersion_
Retrieve the version of the framework used for this step.
"""
return self.data.application.setup.cmsswVersion
def setGlobalTag(self, globalTag):
"""
_setGlobalTag_
Set the global tag.
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.globalTag = globalTag
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['globalTag'] = globalTag
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getGlobalTag(self):
"""
_getGlobalTag_
Retrieve the global tag.
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "globalTag"):
return self.data.application.configuration.arguments.globalTag
return pickle.loads(self.data.application.configuration.pickledarguments)['globalTag']
def setDatasetName(self, datasetName):
"""
_setDatasetName_
Set the dataset name in the pickled arguments
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.datasetName = datasetName
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['datasetName'] = datasetName
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getDatasetName(self):
"""
_getDatasetName_
Retrieve the dataset name from the pickled arguments
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "datasetName"):
return self.data.application.configuration.arguments.datasetName
return pickle.loads(self.data.application.configuration.pickledarguments).get('datasetName', None)
def getScenario(self):
"""
_getScenario_
Retrieve the scenario from the pickled arguments, if any
"""
if hasattr(self.data.application.configuration, "scenario"):
return self.data.application.configuration.scenario
return None
def setUserSandbox(self, userSandbox):
"""
_setUserSandbox_
Sets the userSandbox. Eventually may have to move this to a proper
list rather than a one element list
"""
if userSandbox:
self.data.user.inputSandboxes = [userSandbox]
return
def setUserFiles(self, userFiles):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if userFiles:
self.data.user.userFiles = userFiles
return
def setUserLFNBase(self, lfnBase):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if lfnBase:
self.data.user.lfnBase = lfnBase
return
def setupChainedProcessing(self, inputStepName, inputOutputModule):
"""
_setupChainedProcessing_
Set values to support chained CMSSW running.
"""
self.data.input.chainedProcessing = True
self.data.input.inputStepName = inputStepName
self.data.input.inputOutputModule = inputOutputModule
def keepOutput(self, keepOutput):
"""
_keepOutput_
Mark whether or not we should keep the output from this step. We don't
want to keep the output from certain chained steps.
"""
self.data.output.keep = keepOutput
return
def getPileup(self):
"""
_getPileup_
Retrieve the pileup config from this step.
"""
return getattr(self.data, "pileup", None)
def setupPileup(self, pileupConfig, dbsUrl):
"""
include pileup input configuration into this step configuration.
pileupConfig is initially specified as input to the workload
(user input) and here is available as a dict.
"""
# so, e.g. this {"cosmics": "/some/cosmics/dataset", "minbias": "/some/minbias/dataset"}
# would translate into
# self.data.pileup.comics.dataset = "/some/cosmics/dataset"
# self.data.pileup.minbias.dataset = "/some/minbias/dataset"
self.data.section_("pileup")
for pileupType, dataset in pileupConfig.items():
self.data.pileup.section_(pileupType)
setattr(getattr(self.data.pileup, pileupType), "dataset", dataset)
setattr(self.data, "dbsUrl", dbsUrl)
def setOverrideCatalog(self, overrideCatalog):
"""
_setOverrideCatalog_
set the override catalog
needed at least at CERN to use production castor pools
"""
if overrideCatalog != None:
self.data.application.overrideCatalog = overrideCatalog
def setEventsPerLumi(self, eventsPerLumi):
"""
_setEventsPerLumi_
Add event per lumi information to the step, so it can be added later
to the process, this comes from user input
"""
if eventsPerLumi != None:
setattr(self.data.application.configuration, "eventsPerLumi", eventsPerLumi)
def getSkipBadFiles(self):
"""
_getSkipBadFiles_
Check if we can skip inexistent files instead of failing the job
"""
return getattr(self.data.application.configuration, "skipBadFiles", False)
def setSkipBadFiles(self, skipBadFiles):
"""
_setSkipBadFiles_
Add a flag to indicate the CMSSW process if we can
skip inexistent files instead of failing the job
"""
setattr(self.data.application.configuration, "skipBadFiles", skipBadFiles)
def setNumberOfCores(self, ncores, nEventStreams=0):
"""
_setNumberOfCores_
Set the number of cores and event streams for CMSSW to run on
"""
# if None is passed for EventStreams, then set it to 0
nEventStreams = nEventStreams or 0
self.data.application.multicore.numberOfCores = ncores
self.data.application.multicore.eventStreams = nEventStreams
def getNumberOfCores(self):
"""
_getNumberOfCores_
Get number of cores
"""
return self.data.application.multicore.numberOfCores
def getEventStreams(self):
"""
_getEventStreams_
Get number of event streams
"""
return self.data.application.multicore.eventStreams
class CMSSW(Template):
"""
_CMSSW_
Tools for creating a template CMSSW Step
"""
def install(self, step):
"""
_install_
Add the set of default fields to the step required for running
a cmssw job
"""
stepname = nodeName(step)
step.stepType = "CMSSW"
step.application.section_("setup")
step.application.setup.scramCommand = "scramv1"
step.application.setup.scramProject = "CMSSW"
step.application.setup.cmsswVersion = None
step.application.setup.scramArch = None
step.application.setup.buildArch = None
step.application.setup.softwareEnvironment = None
step.application.section_("command")
step.application.command.executable = "cmsRun"
step.application.command.configuration = "PSet.py"
step.application.command.configurationPickle = "PSet.pkl"
step.application.command.configurationHash = None
step.application.command.psetTweak = None
step.application.command.arguments = ""
step.output.jobReport = "FrameworkJobReport.xml"
step.output.stdout = "%s-stdout.log" % stepname
step.output.stderr = "%s-stderr.log" % stepname
step.output.keep = True
step.output.section_("modules")
step.output.section_("analysisFiles")
step.section_("runtime")
step.runtime.preScripts = []
step.runtime.scramPreScripts = []
step.runtime.postScripts = []
step.runtime.postScramScripts = []
step.section_("debug")
step.debug.verbosity = 0
step.debug.keepLogs = False
step.section_("user")
step.user.inputSandboxes = []
step.user.script = None
step.user.outputFiles = []
step.user.userFiles = []
step.user.lfnBase = None
step.section_("monitoring")
# support for multicore cmssw running mode
step.application.section_("multicore")
step.application.multicore.numberOfCores = 1
step.application.multicore.eventStreams = 0
def helper(self, step):
"""
_helper_
Wrap the WMStep provided in the CMSSW helper class that
includes the ability to add and manipulate the details
of a CMSSW workflow step
"""
return CMSSWStepHelper(step)
| 30.56405 | 106 | 0.63503 | #!/usr/bin/env python
"""
_CMSSW_
Template for a CMSSW Step
"""
import pickle
from WMCore.WMSpec.ConfigSectionTree import nodeName
from WMCore.WMSpec.Steps.Template import CoreHelper, Template
class CMSSWStepHelper(CoreHelper):
"""
_CMSSWStepHelper_
Add API calls and helper methods to the basic WMStepHelper to specialise
for CMSSW tasks
"""
def setAcqEra(self, acqEra):
"""
_setAcqEra_
Set the acquisition era attribute for this step.
"""
self.data.output.acqEra = acqEra
def setProcStr(self, procStr):
"""
_setProcStr_
Set the processing string attribute for this step.
"""
self.data.output.procStr = procStr
def setProcVer(self, procVer):
"""
_setProcVer_
Set the processing version era attribute for this step.
"""
self.data.output.procVer = procVer
def getAcqEra(self):
"""
_getAcqEra_
Retrieve the acquisition era for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'acqEra', None)
def getProcStr(self):
"""
_getProcStr_
Retrieve the processing string for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procStr', None)
def getProcVer(self):
"""
_getProcVer_
Retrieve the processing version for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procVer', None)
def setPrepId(self, prepId):
"""
_setPrepId_
Set the prep_id attribute for this step.
"""
self.data.output.prepId = prepId
def getPrepId(self):
"""
_getPrepId_
Retrieve the prep_id for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'prepId', None)
def addOutputModule(self, moduleName, **details):
"""
_addOutputModule_
Add in an output module settings, all default to None unless
the value is provided in details
"""
modules = self.data.output.modules
if getattr(modules, moduleName, None) == None:
modules.section_(moduleName)
module = getattr(modules, moduleName)
for key, value in details.items():
setattr(module, key, value)
return
def listOutputModules(self):
"""
_listOutputModules_
retrieve list of output module names
"""
if hasattr(self.data.output, "modules"):
return self.data.output.modules.dictionary_().keys()
return []
def getOutputModule(self, name):
"""
_getOutputModule_
retrieve the data structure for an output module by name
None if not found
"""
return getattr(self.data.output.modules, name, None)
def setConfigCache(self, url, document, dbName="config_cache"):
"""
_setConfigCache_
Set the information required to retrieve a configuration from
the config cache.
url - base URL for the config cache instance
document - GUID for the config document
dbName - optional, name of the db instance in the couch server
"""
self.data.application.configuration.configCacheUrl = url
self.data.application.configuration.configId = document
self.data.application.configuration.cacheName = dbName
docUrl = "%s/%s/%s" % (url, dbName, document)
self.data.application.configuration.configUrl = docUrl
self.data.application.configuration.retrieveConfigUrl = "%s/configFile" % docUrl
def setDataProcessingConfig(self, scenarioName, functionName, **args):
"""
_setDataProcessingConfig_
Set a configuration library to be used from the CMSSW Release
DataProcessing package.
"""
self.data.application.configuration.scenario = scenarioName
self.data.application.configuration.function = functionName
# assume if this crashes we are dealing with complex data
# which is only supported in new agents that only look
# at pickledarguments anyways
try:
self.data.application.configuration.section_('arguments')
[setattr(self.data.application.configuration.arguments, k, v) for k, v in args.items()]
except Exception:
pass
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def cmsswSetup(self, cmsswVersion, **options):
"""
_cmsswSetup_
Provide setup details for CMSSW.
cmsswVersion - required - version of CMSSW to use
Optional:
scramCommand - defaults to scramv1
scramProject - defaults to CMSSW
scramArch - optional scram architecture, defaults to None
buildArch - optional scram build architecture, defaults to None
softwareEnvironment - setup command to bootstrap scram,defaults to None
"""
self.data.application.setup.cmsswVersion = cmsswVersion
for k, v in options.items():
setattr(self.data.application.setup, k, v)
return
def getScramArch(self):
"""
_getScramArch_
Retrieve the scram architecture used for this step.
"""
return self.data.application.setup.scramArch
def getCMSSWVersion(self):
"""
_getCMSSWVersion_
Retrieve the version of the framework used for this step.
"""
return self.data.application.setup.cmsswVersion
def setGlobalTag(self, globalTag):
"""
_setGlobalTag_
Set the global tag.
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.globalTag = globalTag
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['globalTag'] = globalTag
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getGlobalTag(self):
"""
_getGlobalTag_
Retrieve the global tag.
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "globalTag"):
return self.data.application.configuration.arguments.globalTag
return pickle.loads(self.data.application.configuration.pickledarguments)['globalTag']
def setDatasetName(self, datasetName):
"""
_setDatasetName_
Set the dataset name in the pickled arguments
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.datasetName = datasetName
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['datasetName'] = datasetName
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getDatasetName(self):
"""
_getDatasetName_
Retrieve the dataset name from the pickled arguments
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "datasetName"):
return self.data.application.configuration.arguments.datasetName
return pickle.loads(self.data.application.configuration.pickledarguments).get('datasetName', None)
def getScenario(self):
"""
_getScenario_
Retrieve the scenario from the pickled arguments, if any
"""
if hasattr(self.data.application.configuration, "scenario"):
return self.data.application.configuration.scenario
return None
def setUserSandbox(self, userSandbox):
"""
_setUserSandbox_
Sets the userSandbox. Eventually may have to move this to a proper
list rather than a one element list
"""
if userSandbox:
self.data.user.inputSandboxes = [userSandbox]
return
def setUserFiles(self, userFiles):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if userFiles:
self.data.user.userFiles = userFiles
return
def setUserLFNBase(self, lfnBase):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if lfnBase:
self.data.user.lfnBase = lfnBase
return
def setupChainedProcessing(self, inputStepName, inputOutputModule):
"""
_setupChainedProcessing_
Set values to support chained CMSSW running.
"""
self.data.input.chainedProcessing = True
self.data.input.inputStepName = inputStepName
self.data.input.inputOutputModule = inputOutputModule
def keepOutput(self, keepOutput):
"""
_keepOutput_
Mark whether or not we should keep the output from this step. We don't
want to keep the output from certain chained steps.
"""
self.data.output.keep = keepOutput
return
def getPileup(self):
"""
_getPileup_
Retrieve the pileup config from this step.
"""
return getattr(self.data, "pileup", None)
def setupPileup(self, pileupConfig, dbsUrl):
"""
include pileup input configuration into this step configuration.
pileupConfig is initially specified as input to the workload
(user input) and here is available as a dict.
"""
# so, e.g. this {"cosmics": "/some/cosmics/dataset", "minbias": "/some/minbias/dataset"}
# would translate into
# self.data.pileup.comics.dataset = "/some/cosmics/dataset"
# self.data.pileup.minbias.dataset = "/some/minbias/dataset"
self.data.section_("pileup")
for pileupType, dataset in pileupConfig.items():
self.data.pileup.section_(pileupType)
setattr(getattr(self.data.pileup, pileupType), "dataset", dataset)
setattr(self.data, "dbsUrl", dbsUrl)
def setOverrideCatalog(self, overrideCatalog):
"""
_setOverrideCatalog_
set the override catalog
needed at least at CERN to use production castor pools
"""
if overrideCatalog != None:
self.data.application.overrideCatalog = overrideCatalog
def setEventsPerLumi(self, eventsPerLumi):
"""
_setEventsPerLumi_
Add event per lumi information to the step, so it can be added later
to the process, this comes from user input
"""
if eventsPerLumi != None:
setattr(self.data.application.configuration, "eventsPerLumi", eventsPerLumi)
def getSkipBadFiles(self):
"""
_getSkipBadFiles_
Check if we can skip inexistent files instead of failing the job
"""
return getattr(self.data.application.configuration, "skipBadFiles", False)
def setSkipBadFiles(self, skipBadFiles):
"""
_setSkipBadFiles_
Add a flag to indicate the CMSSW process if we can
skip inexistent files instead of failing the job
"""
setattr(self.data.application.configuration, "skipBadFiles", skipBadFiles)
def setNumberOfCores(self, ncores, nEventStreams=0):
"""
_setNumberOfCores_
Set the number of cores and event streams for CMSSW to run on
"""
# if None is passed for EventStreams, then set it to 0
nEventStreams = nEventStreams or 0
self.data.application.multicore.numberOfCores = ncores
self.data.application.multicore.eventStreams = nEventStreams
def getNumberOfCores(self):
"""
_getNumberOfCores_
Get number of cores
"""
return self.data.application.multicore.numberOfCores
def getEventStreams(self):
"""
_getEventStreams_
Get number of event streams
"""
return self.data.application.multicore.eventStreams
class CMSSW(Template):
"""
_CMSSW_
Tools for creating a template CMSSW Step
"""
def install(self, step):
"""
_install_
Add the set of default fields to the step required for running
a cmssw job
"""
stepname = nodeName(step)
step.stepType = "CMSSW"
step.application.section_("setup")
step.application.setup.scramCommand = "scramv1"
step.application.setup.scramProject = "CMSSW"
step.application.setup.cmsswVersion = None
step.application.setup.scramArch = None
step.application.setup.buildArch = None
step.application.setup.softwareEnvironment = None
step.application.section_("command")
step.application.command.executable = "cmsRun"
step.application.command.configuration = "PSet.py"
step.application.command.configurationPickle = "PSet.pkl"
step.application.command.configurationHash = None
step.application.command.psetTweak = None
step.application.command.arguments = ""
step.output.jobReport = "FrameworkJobReport.xml"
step.output.stdout = "%s-stdout.log" % stepname
step.output.stderr = "%s-stderr.log" % stepname
step.output.keep = True
step.output.section_("modules")
step.output.section_("analysisFiles")
step.section_("runtime")
step.runtime.preScripts = []
step.runtime.scramPreScripts = []
step.runtime.postScripts = []
step.runtime.postScramScripts = []
step.section_("debug")
step.debug.verbosity = 0
step.debug.keepLogs = False
step.section_("user")
step.user.inputSandboxes = []
step.user.script = None
step.user.outputFiles = []
step.user.userFiles = []
step.user.lfnBase = None
step.section_("monitoring")
# support for multicore cmssw running mode
step.application.section_("multicore")
step.application.multicore.numberOfCores = 1
step.application.multicore.eventStreams = 0
def helper(self, step):
"""
_helper_
Wrap the WMStep provided in the CMSSW helper class that
includes the ability to add and manipulate the details
of a CMSSW workflow step
"""
return CMSSWStepHelper(step)
| 0 | 0 | 0 |
4d3f64b1ed19e17959aa539a5c12c521c3c0fdee | 3,666 | py | Python | Algorithm1/duffing.py | bharatmonga/Supervised-learning-algorithms | de2ecf6bd608870a3ef2a799763ef33815a33c04 | [
"MIT"
] | null | null | null | Algorithm1/duffing.py | bharatmonga/Supervised-learning-algorithms | de2ecf6bd608870a3ef2a799763ef33815a33c04 | [
"MIT"
] | null | null | null | Algorithm1/duffing.py | bharatmonga/Supervised-learning-algorithms | de2ecf6bd608870a3ef2a799763ef33815a33c04 | [
"MIT"
] | 1 | 2021-01-25T13:26:49.000Z | 2021-01-25T13:26:49.000Z | import numpy as np
from numpy import linalg as LA
class Duffing:
"""
Create a duffing object by specifying it's parameter delta as input at initialization of the object
It is a bistable dynamical system with 2 stable steady states and one unstable steady state
"""
def reset(self):
"""
:return: randomly initialized state of the duffing object
"""
self.state = np.random.uniform(low=-4, high=4, size=(2,))
return self.state
def step(self, u):
"""
takes input u as the action/control to be applied
calculates next state by calling 4th order Runge-Kutta solver
returns state at the next time step
"""
y = self.state
self.control = u
new_y = self.rk4(y)
self.state = new_y
return self.state
def reward(self):
"""
:return: reward as the negative of the 2 norm between current state and the desired state
"""
return -LA.norm(self.state - self.desired_state, axis=0)
def bin_classifier(self):
"""
:return: binary control (0 or 1) based on the locally weighted binary classifier
"""
w = np.exp(-(LA.norm(self.state - self.X, axis=1)**2)/(2*self.tau))
w /= np.sum(w)
if np.dot(w, self.U) > 0.5:
return 1
else:
return 0
def dydt(self, y):
"""
:param y: current state of the duffing oscillator
:return: right hand side of duffing ODEs
"""
dy0 = y[1] + self.control
dy1 = y[0] - y[0]**3 - self.delta*y[1]
return dy0, dy1
def rk4(self, y0):
"""
:param y0: current state of the duffing object
:return: state y of the duffing object at next time step using 4th order Runge-Kutta method
"""
h = self.dt
f = self.dydt
k1 = h * np.asarray(f(y0))
k2 = h * np.asarray(f(y0 + k1 / 2))
k3 = h * np.asarray(f(y0 + k2 / 2))
k4 = h * np.asarray(f(y0 + k3))
y = y0 + (1 / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return y
def trajectory(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t and control u
"""
y, u, t = np.zeros((n, 2)), np.zeros((n, 1)), np.zeros((n, 1))
y[0, :] = self.state
u[0, 0] = self.max_control * self.bin_classifier()
for i in range(1, n):
y[i, :] = self.step(u[i - 1, 0])
t[i, 0] = i * self.dt
u[i, 0] = self.max_control * self.bin_classifier()
return y, u, t
def trajectory_no_control(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t
"""
y, t = np.zeros((n, 2)), np.zeros((n, 1))
y[0, :] = self.state
for i in range(1, n):
y[i, :] = self.step(0)
t[i, 0] = i * self.dt
return y, t
| 32.732143 | 104 | 0.51773 | import numpy as np
from numpy import linalg as LA
class Duffing:
"""
Create a duffing object by specifying it's parameter delta as input at initialization of the object
It is a bistable dynamical system with 2 stable steady states and one unstable steady state
"""
def __init__(self, dl):
self.delta = dl # duffing parameter
self.tau = 0.1 # parameter for bin_classifier()
self.dt = 0.001 # time step
self.control = 0 # initialize control as 0
self.max_control = 4
self.seed = np.random.seed(0)
self.state = None
self.desired_state = [1.0, 0.0] # desired state, also a stable fixed point
self.fp2 = [0.0, 0.0] # unstable fixed point
self.fp3 = [-1.0, 0.0] # stable fixed point
self.X = None
self.U = None
def reset(self):
"""
:return: randomly initialized state of the duffing object
"""
self.state = np.random.uniform(low=-4, high=4, size=(2,))
return self.state
def step(self, u):
"""
takes input u as the action/control to be applied
calculates next state by calling 4th order Runge-Kutta solver
returns state at the next time step
"""
y = self.state
self.control = u
new_y = self.rk4(y)
self.state = new_y
return self.state
def reward(self):
"""
:return: reward as the negative of the 2 norm between current state and the desired state
"""
return -LA.norm(self.state - self.desired_state, axis=0)
def bin_classifier(self):
"""
:return: binary control (0 or 1) based on the locally weighted binary classifier
"""
w = np.exp(-(LA.norm(self.state - self.X, axis=1)**2)/(2*self.tau))
w /= np.sum(w)
if np.dot(w, self.U) > 0.5:
return 1
else:
return 0
def dydt(self, y):
"""
:param y: current state of the duffing oscillator
:return: right hand side of duffing ODEs
"""
dy0 = y[1] + self.control
dy1 = y[0] - y[0]**3 - self.delta*y[1]
return dy0, dy1
def rk4(self, y0):
"""
:param y0: current state of the duffing object
:return: state y of the duffing object at next time step using 4th order Runge-Kutta method
"""
h = self.dt
f = self.dydt
k1 = h * np.asarray(f(y0))
k2 = h * np.asarray(f(y0 + k1 / 2))
k3 = h * np.asarray(f(y0 + k2 / 2))
k4 = h * np.asarray(f(y0 + k3))
y = y0 + (1 / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return y
def trajectory(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t and control u
"""
y, u, t = np.zeros((n, 2)), np.zeros((n, 1)), np.zeros((n, 1))
y[0, :] = self.state
u[0, 0] = self.max_control * self.bin_classifier()
for i in range(1, n):
y[i, :] = self.step(u[i - 1, 0])
t[i, 0] = i * self.dt
u[i, 0] = self.max_control * self.bin_classifier()
return y, u, t
def trajectory_no_control(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t
"""
y, t = np.zeros((n, 2)), np.zeros((n, 1))
y[0, :] = self.state
for i in range(1, n):
y[i, :] = self.step(0)
t[i, 0] = i * self.dt
return y, t
| 533 | 0 | 29 |
f4485ba2649f49ec2f52ff17cf8af227d1239414 | 4,669 | py | Python | src/python/make_toplevel.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | src/python/make_toplevel.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | src/python/make_toplevel.py | kf7lsu/RegfileCompiler-public | 0845f1458137cef06d584047bb4287a72c6afbab | [
"Apache-2.0"
] | null | null | null | #Matthew Trahms
#EE 526
#5/25/21
#This file serves as the toplevel generation script. The user will enter
#at least the number entries, bits, and reads, with options to specify
#that the regfile should be split into banks. Vertical banks (v_banks) means
#that the address space will be split between multiple banks. This comes in
#handy when creating a register file with a large number of entries.
#Horizontal banks (h_banks) means that the data being read/written is split
#across multiple register files. This comes in handy when creating a register
#file with lots of bits per entry in the register file.
from make_store_grid import make_store_grid as grid
from make_module_decl import make_module_decl as module
from make_io import make_io as io
from make_wr_data_latches import make_wr_data_latches as wr_latches
from make_wr_data_buffers import make_wr_data_buffers as wr_buff
from route_rdata import route_rdata as rdata
from make_decoder import make_decoder as decoder
from make_wr_addr_latches import make_wr_addr_latches as waddr_latch
from make_wr_en_latch import make_wr_en_latch as wen_latch
from connect_w_logic import connect_w_logic as w_logic
from buffer_clk import buffer_clk
from cell_map import low_widths, tristate_w
from size_buffers_latches import size_tristate, size_wr_data_latch
import math
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('entries',
help='the number of entries found in the finished register file',
type=int)
parser.add_argument('bits',
help='the number of bits per entry in the register file',
type=int)
parser.add_argument('reads',
help='the number of read ports in the register file', type=int)
parser.add_argument('word_banks',
help='splits the word among multiple banks to lower cap (default 1)',
type=int)
parser.add_argument('address_banks',
help='splits addr space among banks to lower cap (default 1)',
type=int)
args = parser.parse_args()
out_file = open('regfile.v', 'w')
pins_file = open('../src/apr/pin_config.txt', 'w')
entries = args.entries
bits = args.bits
reads = args.reads
h_banks = args.word_banks
v_banks = args.address_banks
make_toplevel(out_file, pins_file, entries, bits, reads, v_banks, h_banks)
out_file.close()
pins_file.close()
| 39.235294 | 99 | 0.765046 | #Matthew Trahms
#EE 526
#5/25/21
#This file serves as the toplevel generation script. The user will enter
#at least the number entries, bits, and reads, with options to specify
#that the regfile should be split into banks. Vertical banks (v_banks) means
#that the address space will be split between multiple banks. This comes in
#handy when creating a register file with a large number of entries.
#Horizontal banks (h_banks) means that the data being read/written is split
#across multiple register files. This comes in handy when creating a register
#file with lots of bits per entry in the register file.
from make_store_grid import make_store_grid as grid
from make_module_decl import make_module_decl as module
from make_io import make_io as io
from make_wr_data_latches import make_wr_data_latches as wr_latches
from make_wr_data_buffers import make_wr_data_buffers as wr_buff
from route_rdata import route_rdata as rdata
from make_decoder import make_decoder as decoder
from make_wr_addr_latches import make_wr_addr_latches as waddr_latch
from make_wr_en_latch import make_wr_en_latch as wen_latch
from connect_w_logic import connect_w_logic as w_logic
from buffer_clk import buffer_clk
from cell_map import low_widths, tristate_w
from size_buffers_latches import size_tristate, size_wr_data_latch
import math
import argparse
def make_toplevel(out_file, pins_file, entries, bits, reads, v_banks, h_banks):
if v_banks > 4:
print("WARNING: Only 4 vertical banks supported at this time")
print("setting vertical banks to 4")
v_banks = 4
#figuring out the actual entries and bits based on regfile division
#entries_per_regfile = int(math.ceil(entries/v_banks))
#entries = int(entries_per_regfile * v_banks)
entries_per_regfile = entries
if v_banks > 1:
approx_rf_entries = entries / v_banks
rf_addr_bits = int(math.ceil(math.log(approx_rf_entries, 2)))
entries_per_regfile = int(2**rf_addr_bits)
entries = entries_per_regfile * v_banks
bits_per_regfile = int(math.ceil(bits/h_banks))
bits = int(bits_per_regfile * h_banks)
#write the module header and io
module(out_file, reads)
io(out_file, reads, bits, entries)
out_file.write('\n')
#calculate the number of address bits
addr_bits = int(math.ceil(math.log(entries, 2)))
#calculate the number of regfiles to generate
num_rf = v_banks * h_banks
#calculate static cell widths
tri_w = size_tristate(entries_per_regfile)
wlatch_w = size_wr_data_latch(entries_per_regfile)
#grabbing lowest latch width for unoptimized logic
lowest_latch_w = low_widths[0]
#start doing custom placement
out_file.write('// START_CUSTOM_PLACE\n')
for rf_idx in range(num_rf):
grid(out_file, entries_per_regfile, bits_per_regfile, reads, tri_w, rf_idx)
wr_latches(out_file, bits_per_regfile, wlatch_w, rf_idx)
out_file.write('// END_CUSTOM_PLACE\n')
#decoders, write logic, and signal routing
wr_buff(out_file, num_rf, bits, bits_per_regfile, 1)
rdata(out_file, reads, addr_bits, bits, bits_per_regfile, num_rf, 1)
for rf_idx in range(num_rf):
waddr_latch(out_file, rf_idx, entries_per_regfile, lowest_latch_w)
wen_latch(out_file, rf_idx, bits_per_regfile, bits, entries_per_regfile, entries, lowest_latch_w)
w_logic(out_file, rf_idx, entries_per_regfile, 1)
#buffer_clk(out_file, rf_idx, 1)
decoder(out_file, rf_idx, entries_per_regfile, -1, 1, 1)
for i in range(reads):
decoder(out_file, rf_idx, entries_per_regfile, i, 1, 1)
out_file.write("endmodule")
#creating pin information file for apr
pins_file.write('bits: ' + str(bits) + '\n')
pins_file.write('entries: ' + str(entries) + '\n')
pins_file.write('reads: ' + str(reads))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('entries',
help='the number of entries found in the finished register file',
type=int)
parser.add_argument('bits',
help='the number of bits per entry in the register file',
type=int)
parser.add_argument('reads',
help='the number of read ports in the register file', type=int)
parser.add_argument('word_banks',
help='splits the word among multiple banks to lower cap (default 1)',
type=int)
parser.add_argument('address_banks',
help='splits addr space among banks to lower cap (default 1)',
type=int)
args = parser.parse_args()
out_file = open('regfile.v', 'w')
pins_file = open('../src/apr/pin_config.txt', 'w')
entries = args.entries
bits = args.bits
reads = args.reads
h_banks = args.word_banks
v_banks = args.address_banks
make_toplevel(out_file, pins_file, entries, bits, reads, v_banks, h_banks)
out_file.close()
pins_file.close()
| 2,333 | 0 | 23 |
feebba28c40b6e3359fcd65517e17e31597ad388 | 1,164 | py | Python | tools/python/test/test_point.py | babic95/dlib | 285f0255f6deef4e59e97f93023de112594c0741 | [
"BSL-1.0"
] | 11,719 | 2015-01-03T22:38:57.000Z | 2022-03-30T21:45:04.000Z | tools/python/test/test_point.py | KiLJ4EdeN/dlib | eb1f08ce6ab3ca6f9d10425d899103de3c0df56c | [
"BSL-1.0"
] | 2,518 | 2015-01-04T04:38:06.000Z | 2022-03-31T11:55:43.000Z | tools/python/test/test_point.py | KiLJ4EdeN/dlib | eb1f08ce6ab3ca6f9d10425d899103de3c0df56c | [
"BSL-1.0"
] | 3,308 | 2015-01-01T14:34:16.000Z | 2022-03-31T07:20:07.000Z | from dlib import point, points
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
| 20.421053 | 57 | 0.543814 | from dlib import point, points
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
def test_point():
p = point(27, 42)
assert repr(p) == "point(27, 42)"
assert str(p) == "(27, 42)"
assert p.x == 27
assert p.y == 42
ser = pickle.dumps(p, 2)
deser = pickle.loads(ser)
assert deser.x == p.x
assert deser.y == p.y
def test_point_assignment():
p = point(27, 42)
p.x = 16
assert p.x == 16
assert p.y == 42
p.y = 31
assert p.x == 16
assert p.y == 31
def test_point_init_kwargs():
p = point(y=27, x=42)
assert repr(p) == "point(42, 27)"
assert str(p) == "(42, 27)"
assert p.x == 42
assert p.y == 27
def test_points():
ps = points()
ps.resize(5)
assert len(ps) == 5
for i in range(5):
assert ps[i].x == 0
assert ps[i].y == 0
ps.clear()
assert len(ps) == 0
ps.extend([point(1, 2), point(3, 4)])
assert len(ps) == 2
ser = pickle.dumps(ps, 2)
deser = pickle.loads(ser)
assert deser[0].x == 1
assert deser[0].y == 2
assert deser[1].x == 3
assert deser[1].y == 4
| 938 | 0 | 92 |
3e3949fe63731b232415468a7f16a08adf502c6a | 981 | py | Python | Offer_book_problems/008nextNode_in_BTree.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | Offer_book_problems/008nextNode_in_BTree.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | Offer_book_problems/008nextNode_in_BTree.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-01-23
Function: 二叉树的下一节点
"""
def getBTreeNextNode(pNode):
"""
给定一棵二叉树和其中一个节点,找出中序遍历序列的下一个节点
:param pNode: 给定节点
:return: 该节点下一个节点
"""
if pNode == None:
return None
if pNode.rchild != None:
tmpNode = pNode.rchild
while tmpNode.lchild:
tmpNode = tmpNode.lchild
return tmpNode
else:
if pNode.parent == None:
return
elif pNode.parent.lchild == pNode:
return pNode.parent
else:
while pNode.parent:
if pNode.parent.lchild and pNode.parent.lchild == pNode:
return pNode.parent
pNode = pNode.parent
return None | 25.153846 | 72 | 0.567788 | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-01-23
Function: 二叉树的下一节点
"""
class BTLinkNode(object):
def __init__(self, elem, lchild=None, rchild=None, parent=None):
self.elem = elem
self.lchild = lchild
self.rchild = rchild
self.parent = parent
def getBTreeNextNode(pNode):
"""
给定一棵二叉树和其中一个节点,找出中序遍历序列的下一个节点
:param pNode: 给定节点
:return: 该节点下一个节点
"""
if pNode == None:
return None
if pNode.rchild != None:
tmpNode = pNode.rchild
while tmpNode.lchild:
tmpNode = tmpNode.lchild
return tmpNode
else:
if pNode.parent == None:
return
elif pNode.parent.lchild == pNode:
return pNode.parent
else:
while pNode.parent:
if pNode.parent.lchild and pNode.parent.lchild == pNode:
return pNode.parent
pNode = pNode.parent
return None | 155 | 4 | 48 |
591a344d3b4ce08a96e8f6be6e20ecc1f055d514 | 4,045 | py | Python | src/plugins/pcr/calender.py | cdlaimin/CoolQBot | eb77046dd9f8c53c4e7b2e8419d2e447261ade97 | [
"MIT"
] | 72 | 2019-10-23T08:07:58.000Z | 2022-03-31T12:02:08.000Z | src/plugins/pcr/calender.py | cdlaimin/CoolQBot | eb77046dd9f8c53c4e7b2e8419d2e447261ade97 | [
"MIT"
] | 87 | 2019-03-11T09:52:31.000Z | 2022-03-21T21:56:48.000Z | src/plugins/pcr/calender.py | cdlaimin/CoolQBot | eb77046dd9f8c53c4e7b2e8419d2e447261ade97 | [
"MIT"
] | 24 | 2019-03-08T08:15:17.000Z | 2021-12-24T05:25:58.000Z | """ 日程表
https://pcrbot.github.io/pcr-calendar/#cn
"""
from datetime import datetime, timedelta
from typing import Dict, Optional, Set
import httpx
from nonebot import get_bot
from nonebot.log import logger
from nonebot_plugin_apscheduler import scheduler
from .config import plugin_config
calender_obj = Calender()
| 30.413534 | 88 | 0.536712 | """ 日程表
https://pcrbot.github.io/pcr-calendar/#cn
"""
from datetime import datetime, timedelta
from typing import Dict, Optional, Set
import httpx
from nonebot import get_bot
from nonebot.log import logger
from nonebot_plugin_apscheduler import scheduler
from .config import plugin_config
class Calender:
def __init__(self):
# 动态的地址
self._url = "https://pcrbot.github.io/calendar-updater-action/cn.json"
# 定时任务
self._job = None
# 日程表
self._timeline: Dict[str, Set[str]] = {}
self._timeline_update_time: datetime = datetime.now()
self.init()
def init(self):
"""初始化日程自动推送"""
logger.info("初始化 公主连结Re:Dive 日程推送")
self._job = scheduler.add_job(
self.push_calender,
"cron",
hour=plugin_config.calender_hour,
minute=plugin_config.calender_minute,
second=plugin_config.calender_second,
id="push_calender",
)
async def refresh_calender(self) -> None:
"""获取最新的日程表"""
if self._timeline:
# 最近四小时内才更新的不用再次更新
if self._timeline_update_time > datetime.now() - timedelta(hours=4):
return None
try:
async with httpx.AsyncClient() as client:
r = await client.get(self._url)
if r.status_code != 200:
# 如果 HTTP 响应状态码不是 200,说明调用失败
return None
# 清楚以前的时间线
self._timeline.clear()
for event in r.json():
start_time = datetime.strptime(
event["start_time"], "%Y/%m/%d %H:%M:%S"
)
end_time = datetime.strptime(event["end_time"], "%Y/%m/%d %H:%M:%S")
name = event["name"]
self.add_event(start_time, end_time, name)
self._timeline_update_time = datetime.now()
logger.info("公主连结Re:Dive 日程表刷新成功")
except (httpx.HTTPError, KeyError) as e:
logger.error(f"获取日程表出错,{e}")
# 抛出上面任何异常,说明调用失败
return None
def add_event(self, start_time: datetime, end_time: datetime, name: str) -> None:
"""添加日程至日程表"""
t = start_time
while t <= end_time:
daystr = t.strftime("%Y%m%d")
if daystr not in self._timeline:
self._timeline[daystr] = set()
self._timeline[daystr].add(name)
t += timedelta(days=1)
async def push_calender(self):
"""推送日程"""
# 没有启用的群则不推送消息
if not plugin_config.push_calender_group_id:
return
logger.info("推送今日 公主连结Re:Dive 日程")
await self.refresh_calender()
date = datetime.now()
events = self._timeline.get(date.strftime("%Y%m%d"))
if events is None:
events_str = "无活动或无数据"
else:
events_str = "\n".join(events)
try:
bot = get_bot()
except ValueError:
bot = None
reply = "公主连结Re:Dive 今日活动:\n{}".format(events_str)
for group_id in plugin_config.push_calender_group_id:
if bot:
await bot.send_msg(
message_type="group", group_id=group_id, message=reply
)
else:
logger.warning("no bot connected")
async def get_week_events(self) -> str:
"""获取日程表"""
await self.refresh_calender()
reply = "一周日程:"
date = datetime.now()
for _ in range(7):
events = self._timeline.get(date.strftime("%Y%m%d"), ())
events_str = "\n⨠".join(sorted(events))
if events_str == "":
events_str = "没有记录"
daystr = date.strftime("%Y%m%d")
reply += f"\n======{daystr}======\n⨠{events_str}"
date += timedelta(days=1)
reply += "\n\n更多日程:https://pcrbot.github.io/pcr-calendar/#cn"
return reply
calender_obj = Calender()
| 303 | 3,768 | 23 |
91ded89ee5895b5142e5d05120e0bf5d7092adf9 | 3,151 | py | Python | components/crud-web-apps/common/backend/kubeflow/kubeflow/crud_backend/helpers.py | umka1332/kubeflow | d831d05f8489270211eb547fb80d7adfaa7e1128 | [
"Apache-2.0"
] | 1 | 2021-05-27T15:00:26.000Z | 2021-05-27T15:00:26.000Z | components/crud-web-apps/common/backend/kubeflow/kubeflow/crud_backend/helpers.py | umka1332/kubeflow | d831d05f8489270211eb547fb80d7adfaa7e1128 | [
"Apache-2.0"
] | 60 | 2020-06-08T19:50:01.000Z | 2022-02-09T14:19:26.000Z | components/crud-web-apps/common/backend/kubeflow/kubeflow/crud_backend/helpers.py | umka1332/kubeflow | d831d05f8489270211eb547fb80d7adfaa7e1128 | [
"Apache-2.0"
] | 2 | 2020-06-19T18:51:19.000Z | 2021-08-03T15:11:55.000Z | """
Common helper functions for handling k8s objects information
"""
import datetime as dt
import logging
import os
import re
import yaml
from flask import current_app
log = logging.getLogger(__name__)
def get_prefixed_index_html():
"""
The backend should modify the <base> element of the index.html file to
align with the configured prefix the backend is listening
"""
prefix = os.path.join("/", current_app.config["PREFIX"], "")
static_dir = current_app.config["STATIC_DIR"]
log.info("Setting the <base> to reflect the prefix: %s", prefix)
with open(os.path.join(static_dir, "index.html"), "r") as f:
index_html = f.read()
index_prefixed = re.sub(
r"\<base href=\".*\"\>", '<base href="%s">' % prefix, index_html,
)
return index_prefixed
def load_yaml(f):
"""
f: file path
Load a yaml file and convert it to a python dict.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read()
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def load_param_yaml(f, **kwargs):
"""
f: file path
Load a yaml file and convert it to a python dict. The yaml might have some
`{var}` values which the user will have to format. For this we first read
the yaml file and replace these variables and then convert the generated
string to a dict via the yaml module.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read().format(**kwargs)
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def get_uptime(then):
"""
then: datetime instance | string
Return a string that informs how much time has pasted from the provided
timestamp.
"""
if isinstance(then, str):
then = dt.datetime.strptime(then, "%Y-%m-%dT%H:%M:%SZ")
now = dt.datetime.now()
diff = now - then.replace(tzinfo=None)
days = diff.days
hours = int(diff.seconds / 3600)
mins = int((diff.seconds % 3600) / 60)
age = ""
if days > 0:
if days == 1:
age = str(days) + " day"
else:
age = str(days) + " days"
else:
if hours > 0:
if hours == 1:
age = str(hours) + " hour"
else:
age = str(hours) + " hours"
else:
if mins == 0:
return "just now"
if mins == 1:
age = str(mins) + " min"
else:
age = str(mins) + " mins"
return age + " ago"
| 25.208 | 78 | 0.554745 | """
Common helper functions for handling k8s objects information
"""
import datetime as dt
import logging
import os
import re
import yaml
from flask import current_app
log = logging.getLogger(__name__)
def get_prefixed_index_html():
"""
The backend should modify the <base> element of the index.html file to
align with the configured prefix the backend is listening
"""
prefix = os.path.join("/", current_app.config["PREFIX"], "")
static_dir = current_app.config["STATIC_DIR"]
log.info("Setting the <base> to reflect the prefix: %s", prefix)
with open(os.path.join(static_dir, "index.html"), "r") as f:
index_html = f.read()
index_prefixed = re.sub(
r"\<base href=\".*\"\>", '<base href="%s">' % prefix, index_html,
)
return index_prefixed
def load_yaml(f):
"""
f: file path
Load a yaml file and convert it to a python dict.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read()
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def load_param_yaml(f, **kwargs):
"""
f: file path
Load a yaml file and convert it to a python dict. The yaml might have some
`{var}` values which the user will have to format. For this we first read
the yaml file and replace these variables and then convert the generated
string to a dict via the yaml module.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read().format(**kwargs)
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def get_uptime(then):
"""
then: datetime instance | string
Return a string that informs how much time has pasted from the provided
timestamp.
"""
if isinstance(then, str):
then = dt.datetime.strptime(then, "%Y-%m-%dT%H:%M:%SZ")
now = dt.datetime.now()
diff = now - then.replace(tzinfo=None)
days = diff.days
hours = int(diff.seconds / 3600)
mins = int((diff.seconds % 3600) / 60)
age = ""
if days > 0:
if days == 1:
age = str(days) + " day"
else:
age = str(days) + " days"
else:
if hours > 0:
if hours == 1:
age = str(hours) + " hour"
else:
age = str(hours) + " hours"
else:
if mins == 0:
return "just now"
if mins == 1:
age = str(mins) + " min"
else:
age = str(mins) + " mins"
return age + " ago"
| 0 | 0 | 0 |
fca9a93153360695e0d8f4b6f9beece87a7f70e5 | 2,009 | py | Python | dist-packages/samba/tests/blackbox/ndrdump.py | Jianwei-Wang/python2.7_lib | 911b8e81512e5ac5f13e669ab46f7693ed897378 | [
"PSF-2.0"
] | 480 | 2015-02-03T11:59:43.000Z | 2022-03-21T13:23:29.000Z | lib/python2.7/site-packages/samba/tests/blackbox/ndrdump.py | brianwrf/pth-toolkit | 3641cdc76c0f52275315c9b18bf08b22521bd4d7 | [
"BSD-2-Clause"
] | 6 | 2015-02-03T14:06:12.000Z | 2021-05-11T12:07:02.000Z | lib/python2.7/site-packages/samba/tests/blackbox/ndrdump.py | brianwrf/pth-toolkit | 3641cdc76c0f52275315c9b18bf08b22521bd4d7 | [
"BSD-2-Clause"
] | 137 | 2015-02-05T13:31:57.000Z | 2022-02-23T09:44:18.000Z | # Blackbox tests for ndrdump
# Copyright (C) 2008 Andrew Tridgell <tridge@samba.org>
# Copyright (C) 2008 Andrew Bartlett <abartlet@samba.org>
# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
# based on test_smbclient.sh
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Blackbox tests for ndrdump."""
import os
from samba.tests import BlackboxTestCase
for p in [ "../../../../../source4/librpc/tests", "../../../../../librpc/tests"]:
data_path_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), p))
print data_path_dir
if os.path.exists(data_path_dir):
break
class NdrDumpTests(BlackboxTestCase):
"""Blackbox tests for ndrdump."""
| 40.18 | 167 | 0.72225 | # Blackbox tests for ndrdump
# Copyright (C) 2008 Andrew Tridgell <tridge@samba.org>
# Copyright (C) 2008 Andrew Bartlett <abartlet@samba.org>
# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
# based on test_smbclient.sh
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Blackbox tests for ndrdump."""
import os
from samba.tests import BlackboxTestCase
for p in [ "../../../../../source4/librpc/tests", "../../../../../librpc/tests"]:
data_path_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), p))
print data_path_dir
if os.path.exists(data_path_dir):
break
class NdrDumpTests(BlackboxTestCase):
"""Blackbox tests for ndrdump."""
def data_path(self, name):
return os.path.join(data_path_dir, name)
def test_ndrdump_with_in(self):
self.check_run("ndrdump samr samr_CreateUser in %s" % (self.data_path("samr-CreateUser-in.dat")))
def test_ndrdump_with_out(self):
self.check_run("ndrdump samr samr_CreateUser out %s" % (self.data_path("samr-CreateUser-out.dat")))
def test_ndrdump_context_file(self):
self.check_run("ndrdump --context-file %s samr samr_CreateUser out %s" % (self.data_path("samr-CreateUser-in.dat"), self.data_path("samr-CreateUser-out.dat")))
def test_ndrdump_with_validate(self):
self.check_run("ndrdump --validate samr samr_CreateUser in %s" % (self.data_path("samr-CreateUser-in.dat")))
| 605 | 0 | 135 |
81b24bed0ebe42d5f358871f7cea3fbf1f53bc73 | 837 | py | Python | esphome/components/rdm6300/binary_sensor.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | esphome/components/rdm6300/binary_sensor.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | esphome/components/rdm6300/binary_sensor.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor, rdm6300
from esphome.const import CONF_UID, CONF_ID
from . import rdm6300_ns
DEPENDENCIES = ['rdm6300']
CONF_RDM6300_ID = 'rdm6300_id'
RDM6300BinarySensor = rdm6300_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor)
CONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(RDM6300BinarySensor),
cv.GenerateID(CONF_RDM6300_ID): cv.use_id(rdm6300.RDM6300Component),
cv.Required(CONF_UID): cv.uint32_t,
})
| 32.192308 | 90 | 0.784946 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor, rdm6300
from esphome.const import CONF_UID, CONF_ID
from . import rdm6300_ns
DEPENDENCIES = ['rdm6300']
CONF_RDM6300_ID = 'rdm6300_id'
RDM6300BinarySensor = rdm6300_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor)
CONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(RDM6300BinarySensor),
cv.GenerateID(CONF_RDM6300_ID): cv.use_id(rdm6300.RDM6300Component),
cv.Required(CONF_UID): cv.uint32_t,
})
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield binary_sensor.register_binary_sensor(var, config)
hub = yield cg.get_variable(config[CONF_RDM6300_ID])
cg.add(hub.register_card(var))
cg.add(var.set_id(config[CONF_UID]))
| 237 | 0 | 23 |
09d52eb37970544a99d7c46d2209764658a577e5 | 4,963 | py | Python | checkm/taxonParser.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | checkm/taxonParser.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | checkm/taxonParser.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ###############################################################################
#
# taxonParser.py - parse taxonomic-specific marker sets
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
from collections import defaultdict
import checkm.prettytable as prettytable
from checkm.markerSets import BinMarkerSets, MarkerSet
from checkm.util.taxonomyUtils import taxonomicRanks, ranksByLevel, ranksByLabel
from checkm.defaultValues import DefaultValues
class TaxonParser():
"""Parse taxonomic-specific marker sets."""
def list(self, rankFilter='ALL'):
""" List all available marker sets from the specified rank."""
taxonMarkerSets = self.readMarkerSets()
header = ['Rank', 'Taxon', '# genomes', '# marker genes', '# marker sets']
pTable = prettytable.PrettyTable(header)
pTable.align = 'c'
pTable.align['Rank'] = 'l'
pTable.align['Taxon'] = 'l'
pTable.hrules = prettytable.FRAME
pTable.vrules = prettytable.NONE
for rank in taxonomicRanks:
if rankFilter == 'ALL' or rankFilter == rank:
for taxon in sorted(taxonMarkerSets[rank]):
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
pTable.add_row([rank, taxon, markerSet.numGenomes, numMarkers, numMarkerSets])
print ''
print pTable.get_string()
def markerSet(self, rank, taxon, markerFile):
"""Obtain specified taxonomic-specific marker set."""
taxonMarkerSets = self.readMarkerSets()
if rank not in taxonMarkerSets:
self.logger.error(' Unrecognized taxonomic rank: ' + rank)
return False
elif taxon not in taxonMarkerSets[rank]:
self.logger.error(' Unrecognized taxon: %s (in rank %s): ' % (taxon, rank))
return False
markerSet = taxonMarkerSets[rank][taxon]
taxonomy = markerSet.lineageStr.split(';')[::-1]
binMarkerSets = BinMarkerSets(taxon, BinMarkerSets.TAXONOMIC_MARKER_SET)
for i, taxon in enumerate(taxonomy):
if rank != 'life':
rank = ranksByLevel[len(taxonomy) - i - 1]
if rank == 'species':
taxon = taxonomy[1] + ' ' + taxonomy[0]
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
self.logger.info(' Marker set for %s contains %d marker genes arranged in %d sets.' % (taxon, numMarkers, numMarkerSets))
self.logger.info(' Marker set inferred from %d reference genomes.' % markerSet.numGenomes)
markerSet.lineageStr = taxon
binMarkerSets.addMarkerSet(markerSet)
fout = open(markerFile, 'w')
fout.write(DefaultValues.TAXON_MARKER_FILE_HEADER + '\n')
binMarkerSets.write(fout)
fout.close()
return True
| 42.784483 | 135 | 0.535563 | ###############################################################################
#
# taxonParser.py - parse taxonomic-specific marker sets
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
from collections import defaultdict
import checkm.prettytable as prettytable
from checkm.markerSets import BinMarkerSets, MarkerSet
from checkm.util.taxonomyUtils import taxonomicRanks, ranksByLevel, ranksByLabel
from checkm.defaultValues import DefaultValues
class TaxonParser():
"""Parse taxonomic-specific marker sets."""
def __init__(self):
self.logger = logging.getLogger()
def readMarkerSets(self):
taxonMarkerSets = defaultdict(dict)
for line in open(DefaultValues.TAXON_MARKER_SETS):
lineSplit = line.split('\t')
rank = lineSplit[0]
taxon = lineSplit[1]
lineage = lineSplit[2]
numGenomes = int(lineSplit[3])
markerSet = eval(lineSplit[6].rstrip())
ms = MarkerSet(ranksByLabel[rank], lineage, numGenomes, markerSet)
ms.removeMarkers(DefaultValues.MARKERS_TO_EXCLUDE)
taxonMarkerSets[rank][taxon] = ms
return taxonMarkerSets
def list(self, rankFilter='ALL'):
""" List all available marker sets from the specified rank."""
taxonMarkerSets = self.readMarkerSets()
header = ['Rank', 'Taxon', '# genomes', '# marker genes', '# marker sets']
pTable = prettytable.PrettyTable(header)
pTable.align = 'c'
pTable.align['Rank'] = 'l'
pTable.align['Taxon'] = 'l'
pTable.hrules = prettytable.FRAME
pTable.vrules = prettytable.NONE
for rank in taxonomicRanks:
if rankFilter == 'ALL' or rankFilter == rank:
for taxon in sorted(taxonMarkerSets[rank]):
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
pTable.add_row([rank, taxon, markerSet.numGenomes, numMarkers, numMarkerSets])
print ''
print pTable.get_string()
def markerSet(self, rank, taxon, markerFile):
"""Obtain specified taxonomic-specific marker set."""
taxonMarkerSets = self.readMarkerSets()
if rank not in taxonMarkerSets:
self.logger.error(' Unrecognized taxonomic rank: ' + rank)
return False
elif taxon not in taxonMarkerSets[rank]:
self.logger.error(' Unrecognized taxon: %s (in rank %s): ' % (taxon, rank))
return False
markerSet = taxonMarkerSets[rank][taxon]
taxonomy = markerSet.lineageStr.split(';')[::-1]
binMarkerSets = BinMarkerSets(taxon, BinMarkerSets.TAXONOMIC_MARKER_SET)
for i, taxon in enumerate(taxonomy):
if rank != 'life':
rank = ranksByLevel[len(taxonomy) - i - 1]
if rank == 'species':
taxon = taxonomy[1] + ' ' + taxonomy[0]
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
self.logger.info(' Marker set for %s contains %d marker genes arranged in %d sets.' % (taxon, numMarkers, numMarkerSets))
self.logger.info(' Marker set inferred from %d reference genomes.' % markerSet.numGenomes)
markerSet.lineageStr = taxon
binMarkerSets.addMarkerSet(markerSet)
fout = open(markerFile, 'w')
fout.write(DefaultValues.TAXON_MARKER_FILE_HEADER + '\n')
binMarkerSets.write(fout)
fout.close()
return True
| 633 | 0 | 56 |
43350b093630de1460d7a41f738dff3506dbfc5b | 5,368 | py | Python | code/util_db.py | chenyuheng/CTRAS | a755b966ddf32cb7035f93201ce74cf1075f444d | [
"MIT"
] | null | null | null | code/util_db.py | chenyuheng/CTRAS | a755b966ddf32cb7035f93201ce74cf1075f444d | [
"MIT"
] | null | null | null | code/util_db.py | chenyuheng/CTRAS | a755b966ddf32cb7035f93201ce74cf1075f444d | [
"MIT"
] | 1 | 2020-11-12T12:31:51.000Z | 2020-11-12T12:31:51.000Z | #-*- coding:utf-8 -*-
import sqlite3
# ---------------------------------------------------------------------------------------
# Description : Database Processor
# ---------------------------------------------------------------------------------------
# duplicate_tag is the group id.
# retrieve all different sentence candadite of a certain group. tag here is the group id | 30.5 | 214 | 0.670268 | #-*- coding:utf-8 -*-
import sqlite3
# ---------------------------------------------------------------------------------------
# Description : Database Processor
# ---------------------------------------------------------------------------------------
class Cluster:
cluster_id = None
reports = []
def __init__(self, cluster_id, reports):
self.clsuter_id = cluster_id
self.reports = reports
def __eq__(self, another):
if self.clsuter_id == another.get_cluster_id():
return True
return False
def get_cluster_id(self):
return self.clsuter_id
def get_reports(self):
return set(self.reports)
def connect_db():
db = sqlite3.connect("db.db")
# db = MySQLdb.connect(host = '',
# user = '',
# passwd = '',
# charset = 'utf8',
# db = ''
# )
return db
def close_db(db):
db.close()
# duplicate_tag is the group id.
def insert_diff_sentence_into_sql(app, duplicate_tag, diff_sentence, diff_sentence_index, report_id):
db = connect_db()
cur = db.cursor()
sql = "INSERT INTO diff_txt VALUES (?,?,?,?,?)"
l = (app, duplicate_tag, diff_sentence, diff_sentence_index, report_id)
cur.execute(sql, l)
db.commit()
close_db(db)
def insert_diff_img_into_sql(app, duplicate_tag, diff_img, report_id):
db = connect_db()
cur = db.cursor()
sql = "INSERT INTO diff_img VALUES (%s,%s,%s,%s)"
l = (app, duplicate_tag, diff_img, report_id)
cur.execute(sql, l)
db.commit()
close_db(db)
# retrieve all different sentence candadite of a certain group. tag here is the group id
def get_all_sentence_records(app, tag):
db = connect_db()
cur = db.cursor()
cur.execute("SELECT app, duplicate_tag, diff_sentence, " +
"diff_sentence_index, report_id FROM diff_txt " +
"WHERE app = '" + app + "' AND duplicate_tag = " + tag)
all_records = cur.fetchall()
close_db(db)
return all_records
def get_all_img_records(app, tag):
db = connect_db()
cur = db.cursor()
cur.execute("SELECT app, duplicate_tag, diff_img, " +
"report_id FROM diff_img " +
"WHERE app = '" + app + "' AND duplicate_tag = " + tag)
all_records = cur.fetchall()
close_db(db)
return all_records
def get_all_clusters(app, tag, table):
all_clusters = []
db = connect_db()
cur = db.cursor()
# get all cluster_id
cur.execute("SELECT DISTINCT cluster_id FROM " + table +
" WHERE app = '" + app + "' AND duplicate_tag = " + tag +
" ORDER BY cluster_id ASC")
results = cur.fetchall()
for record in results:
cluster_id = record[0]
# get all reports of this cluster
cur.execute("SELECT DISTINCT report_id FROM " + table +
" WHERE app = '" + app + "' AND duplicate_tag = " + tag +
" AND cluster_id = " + str(cluster_id) +
" ORDER BY report_id ASC")
reports = [x[0] for x in cur.fetchall()]
cluster_obj = Cluster(cluster_id, reports)
all_clusters.append(cluster_obj)
close_db(db)
return all_clusters
def select_cluster_combine_tag(group_id, app):#str,int,str,int,int
db = connect_db()
cur = db.cursor()
sql = "SELECT DISTINCT cluster_tag FROM cluster_combine WHERE duplicate_tag = " + str(group_id) + " AND app = '" + app + "' ORDER BY cluster_tag"
cur.execute(sql)
records = cur.fetchall()
close_db(db)
return records
def select_cluster_id_txt(cluster_combine_tag, group_id, app):#str,int,str,int,int
db = connect_db()
cur = db.cursor()
sql = "SELECT DISTINCT cluster_id_txt FROM cluster_combine WHERE cluster_tag = " + str(cluster_combine_tag) + " AND duplicate_tag = "+ str(group_id) + " AND app = '" + app + "' ORDER BY cluster_id_txt"
cur.execute(sql)
records = cur.fetchall()
close_db(db)
return records
def select_cluster_id_img(cluster_combine_tag, group_id, app):#str,int,str,int,int
db = connect_db()
cur = db.cursor()
sql = "SELECT DISTINCT cluster_id_img FROM cluster_combine WHERE cluster_tag = " + str(cluster_combine_tag) + " AND duplicate_tag = "+ str(group_id) + " AND app = '" + app + "' ORDER BY cluster_id_img"
cur.execute(sql)
records = cur.fetchall()
close_db(db)
return records
def select_cluster_txt_tag(cluster_id, group_id, app):#str,int,str,int,int
db = connect_db()
cur = db.cursor()
sql = "SELECT DISTINCT diff_sentence, report_id, diff_sentence_index FROM cluster_txt WHERE cluster_id = " + str(cluster_id) + " AND duplicate_tag = "+ str(group_id) + " AND app = '" + app + "' ORDER BY report_id"
cur.execute(sql)
records = cur.fetchall()
close_db(db)
return records
def select_cluster_img_tag(cluster_id, group_id, app):
db = connect_db()
cur = db.cursor()
sql = "SELECT DISTINCT diff_img, report_id FROM cluster_img WHERE cluster_id = " + str(cluster_id) + " AND duplicate_tag = "+ str(group_id) + " AND app = '" + app + "' ORDER BY report_id"
cur.execute(sql)
records = cur.fetchall()
close_db(db)
return records
def insert_top_txt_into_sql(app, duplicate_tag, cluster_tag, txts):#str,int,int,str
db = connect_db()
cur = db.cursor()
print(txts)
sent = []
for i in txts[1].split(" "):
sent.append(i.split("_")[0])
sql = "INSERT INTO top_txt VALUES (?, ?, ?, ?)"
l = (app, duplicate_tag, cluster_tag, " ".join(sent))
cur.execute(sql,l)
db.commit()
close_db(db)
def insert_top_img_into_sql(app, duplicate_tag, cluster_tag, imgs):#str,int,int,str
db = connect_db()
cur = db.cursor()
sql = "INSERT INTO top_img VALUES (?, ?, ?, ?)"
l = (app, duplicate_tag, cluster_tag, imgs)
cur.execute(sql,l)
db.commit()
close_db(db) | 4,526 | 122 | 343 |
f46172b67bc62f92888ec2a748e0705a3d5e3445 | 779 | py | Python | src/encryption.py | DigitalAgnost/pass | e3c333944f9fe6db92394e8cbb99b1f8dbc8cae2 | [
"MIT"
] | 1 | 2021-03-07T16:48:24.000Z | 2021-03-07T16:48:24.000Z | src/encryption.py | owolabioromidayo/pass | e3c333944f9fe6db92394e8cbb99b1f8dbc8cae2 | [
"MIT"
] | null | null | null | src/encryption.py | owolabioromidayo/pass | e3c333944f9fe6db92394e8cbb99b1f8dbc8cae2 | [
"MIT"
] | null | null | null | from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from config import ConfigService
| 35.409091 | 105 | 0.632863 | from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from config import ConfigService
class EncryptionService:
def __init__(self):
salt = b'\x12jk\xa3~\xfbc.1\xf7O\xea\xad\xeb\xb5\xbaT\x97\x08+$\x8fi)\x08\x0e;\x06\x07\xc0\xb9#'
self.iv = b'03h\x90\xa4\x8a\tTRu63Z\xed\x17\xa0'
self.config = ConfigService()
self.password=self.config.get('MASTER')
self.key = PBKDF2(self.password, salt, dkLen=32)
def encrypt(self, data):
cipher = AES.new(self.key, AES.MODE_CFB, iv=self.iv)
return cipher.encrypt(data.encode('utf-8'))
def decrypt(self, data):
cipher = AES.new(self.key, AES.MODE_CFB, iv=self.iv)
return cipher.decrypt(data).decode('utf-8')
| 548 | 3 | 113 |
a2d4f21eeb08c513b2b172867c62fd0a262e64ec | 120 | py | Python | run.py | ivanlmj/puppy | 172ea0af3bad9d930a428bf839b8699398a2e9d9 | [
"Apache-2.0"
] | null | null | null | run.py | ivanlmj/puppy | 172ea0af3bad9d930a428bf839b8699398a2e9d9 | [
"Apache-2.0"
] | 4 | 2018-12-03T16:10:53.000Z | 2018-12-06T20:21:16.000Z | run.py | ivanlmj/puppy | 172ea0af3bad9d930a428bf839b8699398a2e9d9 | [
"Apache-2.0"
] | 1 | 2018-10-20T16:54:48.000Z | 2018-10-20T16:54:48.000Z | #!/usr/bin/python3
from app import app
__author__ = "@ivanleoncz"
if __name__ == "__main__":
app.run(debug=True)
| 13.333333 | 26 | 0.683333 | #!/usr/bin/python3
from app import app
__author__ = "@ivanleoncz"
if __name__ == "__main__":
app.run(debug=True)
| 0 | 0 | 0 |
c3246c36b9dd0f9dfff43ac8a71493ef21a8d206 | 1,344 | py | Python | tests/extra/test_serializer.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | 14 | 2021-06-18T12:16:02.000Z | 2022-02-10T09:36:50.000Z | tests/extra/test_serializer.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | 37 | 2021-10-03T10:47:12.000Z | 2022-03-24T10:08:20.000Z | tests/extra/test_serializer.py | ChihweiLHBird/sanic-ext | f0193a0cc89650a43c50fe543b43d1832307896f | [
"MIT"
] | 7 | 2021-10-03T09:43:36.000Z | 2022-03-27T14:29:49.000Z | from sanic import text
from sanic.response import json
from sanic_ext import serializer
| 26.88 | 63 | 0.667411 | from sanic import text
from sanic.response import json
from sanic_ext import serializer
def test_serializer_with_builtin(app):
@app.get("/")
@serializer(text)
async def handler(request):
return "hello"
@app.get("/201")
@serializer(text, status=201)
async def handler_201(request):
return "hello"
_, response = app.test_client.get("/")
assert response.status_code == 200
assert response.text == "hello"
assert response.content_type == "text/plain; charset=utf-8"
_, response = app.test_client.get("/201")
assert response.status_code == 201
assert response.content_type == "text/plain; charset=utf-8"
def test_serializer_with_custom(app):
def custom(message, status):
return json({"message": message}, status=status)
@app.get("/")
@serializer(custom)
async def handler(request):
return "hello"
@app.get("/201")
@serializer(custom, status=201)
async def handler_201(request):
return "hello"
_, response = app.test_client.get("/")
assert response.status_code == 200
assert response.content_type == "application/json"
assert response.json["message"] == "hello"
_, response = app.test_client.get("/201")
assert response.status_code == 201
assert response.content_type == "application/json"
| 1,207 | 0 | 46 |
12871fa76af694cd535a0687e91ea2c9c06589f4 | 5,056 | py | Python | ClassifierDataPrepper.py | pierremtb/paragraph-vector-baselines | 9ad6855f632323a60bd4bcc82cd1a6371808c0ac | [
"MIT"
] | null | null | null | ClassifierDataPrepper.py | pierremtb/paragraph-vector-baselines | 9ad6855f632323a60bd4bcc82cd1a6371808c0ac | [
"MIT"
] | null | null | null | ClassifierDataPrepper.py | pierremtb/paragraph-vector-baselines | 9ad6855f632323a60bd4bcc82cd1a6371808c0ac | [
"MIT"
] | null | null | null | # COMP 551 Mini Project 4
# 2019-04-17
# Segev, Michael
# Jacquier, Pierre
# Han, Zhenze
# loads and formats data from stanford sentiment tree bank
import re
import numpy as np
# computes the fine-grained labels
| 34.630137 | 98 | 0.552017 | # COMP 551 Mini Project 4
# 2019-04-17
# Segev, Michael
# Jacquier, Pierre
# Han, Zhenze
# loads and formats data from stanford sentiment tree bank
import re
import numpy as np
class ClassifierDataPrepper:
def __init__(self, dataPath):
# read the sentences
with open(dataPath + "dictionary.txt") as file:
phrases = file.read().splitlines()
with open(dataPath + "sentiment_labels.txt") as file:
classes = file.read().splitlines()
classes = classes[1:] # remove first line
with open(dataPath + "datasetSentences.txt", encoding="utf8") as file:
# with open(dataPath + "datasetSentencesFixed.txt", encoding="utf8") as file:
sentences = file.read().splitlines()
sentences = sentences[1:] # remove first line
with open(dataPath + "datasetSplit.txt") as file:
splitCategories = file.read().splitlines()
splitCategories = splitCategories[1:] # remove first line
self.phrase2index = {} # returns the phrase index of input sentence string
# self.index2phrase = {}
self.phraseClassByPhraseIndex = {} # returns the sentiment of an input phrase Idx
self.sentenceIdx2SplitLabel = {}
self.X = {}
self.Y = {}
for sample_class in classes:
sample_class_split = sample_class.split("|")
sampleIdx = int(sample_class_split[0])
score = float(sample_class_split[1])
self.phraseClassByPhraseIndex[sampleIdx] = score
for sample_phrase in phrases:
sample_phrase_split = sample_phrase.split("|")
sampleIdx = int(sample_phrase_split[1])
phrase = sample_phrase_split[0]
# self.index2phrase[sampleIdx] = phrase
self.phrase2index[phrase] = sampleIdx
for sentence in sentences:
sentence_split = re.split(r'\t+', sentence)
sentenceIdx = int(sentence_split[0])
sentenceString = sentence_split[1].replace("-LRB-", '(').replace("-RRB-", ')')
self.X[sentenceIdx] = sentenceString
self.Y[sentenceIdx] = self.phraseClassByPhraseIndex[self.phrase2index[sentenceString]]
for splitCategory in splitCategories:
sentence_split = splitCategory.split(',')
sentenceIdx = int(sentence_split[0])
splitLabel = int(sentence_split[1])
self.sentenceIdx2SplitLabel[sentenceIdx] = splitLabel
def getXYlabeledBinary(self):
Y2Binary = {}
for k, v in self.Y.items():
if v > 0.5:
binClass = 1
else:
binClass = 0
Y2Binary[k] = binClass
x_train = []
y_train = []
x_valid = []
y_valid = []
x_test = []
y_test = []
# 1 = train
# 2 = test
# 3 = dev
for sentIdx, sentence in self.X.items():
if self.sentenceIdx2SplitLabel[sentIdx] == 1:
x_train.append(sentence)
y_train.append(Y2Binary[sentIdx])
elif self.sentenceIdx2SplitLabel[sentIdx] == 2:
x_test.append(sentence)
y_test.append(Y2Binary[sentIdx])
elif self.sentenceIdx2SplitLabel[sentIdx] == 3:
x_valid.append(sentence)
y_valid.append(Y2Binary[sentIdx])
else:
print("Error!")
return x_train, y_train, x_valid, y_valid, x_test, y_test
# computes the fine-grained labels
def getXYlabeledSplit(self):
Y2Split = {}
for k, v in self.Y.items():
if v <= 0.2:
binClass = 0
elif v <= 0.4:
binClass = 1
elif v <= 0.6:
binClass = 2
elif v <= 0.8:
binClass = 3
else:
binClass = 4
Y2Split[k] = binClass
x_train = []
y_train = []
x_valid = []
y_valid = []
x_test = []
y_test = []
# 1 = train
# 2 = test
# 3 = dev
for sentIdx, sentence in self.X.items():
if self.sentenceIdx2SplitLabel[sentIdx] == 1:
x_train.append(sentence)
y_train.append(Y2Split[sentIdx])
elif self.sentenceIdx2SplitLabel[sentIdx] == 2:
x_test.append(sentence)
y_test.append(Y2Split[sentIdx])
elif self.sentenceIdx2SplitLabel[sentIdx] == 3:
x_valid.append(sentence)
y_valid.append(Y2Split[sentIdx])
else:
print("Error!")
return x_train, y_train, x_valid, y_valid, x_test, y_test
def cleanhtml(self, raw_html):
cleanr = re.compile('<.*?>')
cleantext = cleanr.sub(' ', raw_html)
cleantext = re.sub(r"([\.\",\(\)!\?;:])", " \\1 ", cleantext)
re_nonletters = re.compile('[^a-zA-Z ]')
cleantext = re_nonletters.sub(' ', cleantext)
return cleantext
| 4,700 | 7 | 130 |
4ae352d11b5df568ad63367e6fa6d00dc70203c7 | 13,388 | py | Python | radon/tests/test_complexity_visitor.py | DolajoCZ/radon | 316965b27f1ee9228d8556abc08551381a9e569b | [
"MIT"
] | 943 | 2015-01-05T22:02:39.000Z | 2022-03-31T19:33:08.000Z | radon/tests/test_complexity_visitor.py | DolajoCZ/radon | 316965b27f1ee9228d8556abc08551381a9e569b | [
"MIT"
] | 165 | 2015-01-03T16:37:23.000Z | 2022-03-31T15:01:04.000Z | radon/tests/test_complexity_visitor.py | DolajoCZ/radon | 316965b27f1ee9228d8556abc08551381a9e569b | [
"MIT"
] | 108 | 2015-01-03T16:22:14.000Z | 2022-03-02T16:27:36.000Z | import sys
import textwrap
import pytest
from radon.visitors import *
dedent = lambda code: textwrap.dedent(code).strip()
SIMPLE_BLOCKS = [
(
'''
if a: pass
''',
2,
{},
),
(
'''
if a: pass
else: pass
''',
2,
{},
),
(
'''
if a: pass
elif b: pass
''',
3,
{},
),
(
'''
if a: pass
elif b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
''',
3,
{},
),
(
'''
if a and b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
elif c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c: pass
else: pass
''',
4,
{},
),
(
'''
for x in range(10): print(x)
''',
2,
{},
),
(
'''
for x in xrange(10): print(x)
else: pass
''',
3,
{},
),
(
'''
while a < 4: pass
''',
2,
{},
),
(
'''
while a < 4: pass
else: pass
''',
3,
{},
),
(
'''
while a < 4 and b < 42: pass
''',
3,
{},
),
(
'''
while a and b or c < 10: pass
else: pass
''',
5,
{},
),
# With and async-with statements no longer count towards CC, see #123
(
'''
with open('raw.py') as fobj: print(fobj.read())
''',
1,
{},
),
(
'''
[i for i in range(4)]
''',
2,
{},
),
(
'''
[i for i in range(4) if i&1]
''',
3,
{},
),
(
'''
(i for i in range(4))
''',
2,
{},
),
(
'''
(i for i in range(4) if i&1)
''',
3,
{},
),
(
'''
[i for i in range(42) if sum(k ** 2 for k in divisors(i)) & 1]
''',
4,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
''',
3,
{},
),
(
'''
try: raise TypeError
finally: pass
''',
1,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
finally: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
finally: pass
''',
3,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else:
pass
pass
finally: pass
''',
3,
{},
),
# Lambda are not counted anymore as per #68
(
'''
k = lambda a, b: k(b, a)
''',
1,
{},
),
(
'''
k = lambda a, b, c: c if a else b
''',
2,
{},
),
(
'''
v = a if b else c
''',
2,
{},
),
(
'''
v = a if sum(i for i in xrange(c)) < 10 else c
''',
3,
{},
),
(
'''
sum(i for i in range(12) for z in range(i ** 2) if i * z & 1)
''',
4,
{},
),
(
'''
sum(i for i in range(10) if i >= 2 and val and val2 or val3)
''',
6,
{},
),
(
'''
for i in range(10):
print(i)
else:
print('wah')
print('really not found')
print(3)
''',
3,
{},
),
(
'''
while True:
print(1)
else:
print(2)
print(1)
print(0)
print(-1)
''',
3,
{},
),
(
'''
assert i < 0
''',
2,
{},
),
(
'''
assert i < 0, "Fail"
''',
2,
{},
),
(
'''
assert i < 0
''',
1,
{'no_assert': True},
),
(
'''
def f():
assert 10 > 20
''',
1,
{'no_assert': True},
),
(
'''
class TestYo(object):
def test_yo(self):
assert self.n > 4
''',
1,
{'no_assert': True},
),
]
# These run only if Python version is >= 2.7
ADDITIONAL_BLOCKS = [
(
'''
{i for i in range(4)}
''',
2,
{},
),
(
'''
{i for i in range(4) if i&1}
''',
3,
{},
),
(
'''
{i:i**4 for i in range(4)}
''',
2,
{},
),
(
'''
{i:i**4 for i in range(4) if i&1}
''',
3,
{},
),
]
BLOCKS = SIMPLE_BLOCKS[:]
if sys.version_info[:2] >= (2, 7):
BLOCKS.extend(ADDITIONAL_BLOCKS)
@pytest.mark.parametrize('code,expected,kwargs', BLOCKS)
SINGLE_FUNCTIONS_CASES = [
(
'''
def f(a, b, c):
if a and b == 4:
return c ** c
elif a and not c:
return sum(i for i in range(41) if i&1)
return a + b
''',
(1, 7),
),
(
'''
if a and not b: pass
elif b or c: pass
else: pass
for i in range(4):
print(i)
def g(a, b):
while a < b:
b, a = a **2, b ** 2
return b
''',
(6, 2),
),
(
'''
def f(a, b):
while a**b:
a, b = b, a * (b - 1)
if a and b:
b = 0
else:
b = 1
return sum(i for i in range(b))
''',
(1, 5),
),
]
if sys.version_info[:2] >= (3, 5):
# With and async-with statements no longer count towards CC, see #123
SINGLE_FUNCTIONS_CASES.append(
(
'''
async def f(a, b):
async with open('blabla.log', 'w') as f:
async for i in range(100):
f.write(str(i) + '\\n')
''',
(1, 2),
),
)
@pytest.mark.parametrize('code,expected', SINGLE_FUNCTIONS_CASES)
FUNCTIONS_CASES = [
# With and async-with statements no longer count towards CC, see #123
(
'''
def f(a, b):
return a if b else 2
def g(a, b, c):
if a and b:
return a / b + b / a
elif b and c:
return b / c - c / b
return a + b + c
def h(a, b):
return 2 * (a + b)
''',
(2, 5, 1),
),
(
'''
def f(p, q):
while p:
p, q = q, p - q
if q < 1:
return 1 / q ** 2
elif q > 100:
return 1 / q ** .5
return 42 if not q else p
def g(a, b, c):
if a and b or a - b:
return a / b - c
elif b or c:
return 1
else:
k = 0
with open('results.txt', 'w') as fobj:
for i in range(b ** c):
k += sum(1 / j for j in range(i ** 2) if j > 2)
fobj.write(str(k))
return k - 1
''',
(5, 9),
),
]
@pytest.mark.parametrize('code,expected', FUNCTIONS_CASES)
CLASSES_CASES = [
(
'''
class A(object):
def m(self, a, b):
if not a or b:
return b - 1
try:
return a / b
except ZeroDivisionError:
return a
def n(self, k):
while self.m(k) < k:
k -= self.m(k ** 2 - min(self.m(j) for j in range(k ** 4)))
return k
''',
(8, 4, 3),
),
(
'''
class B(object):
ATTR = 9 if A().n(9) == 9 else 10
import sys
if sys.version_info >= (3, 3):
import os
AT = os.openat('/random/loc')
def __iter__(self):
return __import__('itertools').tee(B.__dict__)
def test(self, func):
a = func(self.ATTR, self.AT)
if a < self.ATTR:
yield self
elif a > self.ATTR ** 2:
yield self.__iter__()
yield iter(a)
''',
(7, 1, 3),
),
]
@pytest.mark.parametrize('code,expected', CLASSES_CASES)
GENERAL_CASES = [
(
'''
if a and b:
print
else:
print
a = sum(i for i in range(1000) if i % 3 == 0 and i % 5 == 0)
def f(n):
def inner(n):
return n ** 2
if n == 0:
return 1
elif n == 1:
return n
elif n < 5:
return (n - 1) ** 2
return n * pow(inner(n), f(n - 1), n - 3)
''',
(6, 3, 0, 9),
),
(
'''
try:
1 / 0
except ZeroDivisonError:
print
except TypeError:
pass
class J(object):
def aux(self, w):
if w == 0:
return 0
return w - 1 + sum(self.aux(w - 3 - i) for i in range(2))
def f(a, b):
def inner(n):
return n ** 2
if a < b:
b, a = a, inner(b)
return a, b
''',
(3, 1, 3, 7),
),
(
'''
class f(object):
class inner(object):
pass
''',
(1, 0, 0, 1),
),
]
@pytest.mark.parametrize('code,expected', GENERAL_CASES)
CLOSURES_CASES = [
(
'''
def f(n):
def g(l):
return l ** 4
def h(i):
return i ** 5 + 1 if i & 1 else 2
return sum(g(u + 4) / float(h(u)) for u in range(2, n))
''',
('g', 'h'),
(1, 2, 2),
),
(
'''
# will it work? :D
def memoize(func):
cache = {}
def aux(*args, **kwargs):
key = (args, kwargs)
if key in cache:
return cache[key]
cache[key] = res = func(*args, **kwargs)
return res
return aux
''',
('aux',),
(2, 1),
),
]
@pytest.mark.parametrize('code,closure_names,expected', CLOSURES_CASES)
CONTAINERS_CASES = [
(
('func', 12, 0, 18, False, None, [], 5),
('F', 'func', 'F 12:0->18 func - 5'),
),
(
('meth', 12, 0, 21, True, 'cls', [], 5),
('M', 'cls.meth', 'M 12:0->21 cls.meth - 5'),
),
(('cls', 12, 0, 15, [], [], 5), ('C', 'cls', 'C 12:0->15 cls - 5')),
(
('cls', 12, 0, 19, [object, object, object, object], [], 30),
('C', 'cls', 'C 12:0->19 cls - 8'),
),
]
@pytest.mark.parametrize('values,expected', CONTAINERS_CASES)
| 18.776999 | 76 | 0.412459 | import sys
import textwrap
import pytest
from radon.visitors import *
dedent = lambda code: textwrap.dedent(code).strip()
SIMPLE_BLOCKS = [
(
'''
if a: pass
''',
2,
{},
),
(
'''
if a: pass
else: pass
''',
2,
{},
),
(
'''
if a: pass
elif b: pass
''',
3,
{},
),
(
'''
if a: pass
elif b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
''',
3,
{},
),
(
'''
if a and b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
elif c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c: pass
else: pass
''',
4,
{},
),
(
'''
for x in range(10): print(x)
''',
2,
{},
),
(
'''
for x in xrange(10): print(x)
else: pass
''',
3,
{},
),
(
'''
while a < 4: pass
''',
2,
{},
),
(
'''
while a < 4: pass
else: pass
''',
3,
{},
),
(
'''
while a < 4 and b < 42: pass
''',
3,
{},
),
(
'''
while a and b or c < 10: pass
else: pass
''',
5,
{},
),
# With and async-with statements no longer count towards CC, see #123
(
'''
with open('raw.py') as fobj: print(fobj.read())
''',
1,
{},
),
(
'''
[i for i in range(4)]
''',
2,
{},
),
(
'''
[i for i in range(4) if i&1]
''',
3,
{},
),
(
'''
(i for i in range(4))
''',
2,
{},
),
(
'''
(i for i in range(4) if i&1)
''',
3,
{},
),
(
'''
[i for i in range(42) if sum(k ** 2 for k in divisors(i)) & 1]
''',
4,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
''',
3,
{},
),
(
'''
try: raise TypeError
finally: pass
''',
1,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
finally: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
finally: pass
''',
3,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else:
pass
pass
finally: pass
''',
3,
{},
),
# Lambda are not counted anymore as per #68
(
'''
k = lambda a, b: k(b, a)
''',
1,
{},
),
(
'''
k = lambda a, b, c: c if a else b
''',
2,
{},
),
(
'''
v = a if b else c
''',
2,
{},
),
(
'''
v = a if sum(i for i in xrange(c)) < 10 else c
''',
3,
{},
),
(
'''
sum(i for i in range(12) for z in range(i ** 2) if i * z & 1)
''',
4,
{},
),
(
'''
sum(i for i in range(10) if i >= 2 and val and val2 or val3)
''',
6,
{},
),
(
'''
for i in range(10):
print(i)
else:
print('wah')
print('really not found')
print(3)
''',
3,
{},
),
(
'''
while True:
print(1)
else:
print(2)
print(1)
print(0)
print(-1)
''',
3,
{},
),
(
'''
assert i < 0
''',
2,
{},
),
(
'''
assert i < 0, "Fail"
''',
2,
{},
),
(
'''
assert i < 0
''',
1,
{'no_assert': True},
),
(
'''
def f():
assert 10 > 20
''',
1,
{'no_assert': True},
),
(
'''
class TestYo(object):
def test_yo(self):
assert self.n > 4
''',
1,
{'no_assert': True},
),
]
# These run only if Python version is >= 2.7
ADDITIONAL_BLOCKS = [
(
'''
{i for i in range(4)}
''',
2,
{},
),
(
'''
{i for i in range(4) if i&1}
''',
3,
{},
),
(
'''
{i:i**4 for i in range(4)}
''',
2,
{},
),
(
'''
{i:i**4 for i in range(4) if i&1}
''',
3,
{},
),
]
BLOCKS = SIMPLE_BLOCKS[:]
if sys.version_info[:2] >= (2, 7):
BLOCKS.extend(ADDITIONAL_BLOCKS)
@pytest.mark.parametrize('code,expected,kwargs', BLOCKS)
def test_visitor_simple(code, expected, kwargs):
visitor = ComplexityVisitor.from_code(dedent(code), **kwargs)
assert visitor.complexity == expected
SINGLE_FUNCTIONS_CASES = [
(
'''
def f(a, b, c):
if a and b == 4:
return c ** c
elif a and not c:
return sum(i for i in range(41) if i&1)
return a + b
''',
(1, 7),
),
(
'''
if a and not b: pass
elif b or c: pass
else: pass
for i in range(4):
print(i)
def g(a, b):
while a < b:
b, a = a **2, b ** 2
return b
''',
(6, 2),
),
(
'''
def f(a, b):
while a**b:
a, b = b, a * (b - 1)
if a and b:
b = 0
else:
b = 1
return sum(i for i in range(b))
''',
(1, 5),
),
]
if sys.version_info[:2] >= (3, 5):
# With and async-with statements no longer count towards CC, see #123
SINGLE_FUNCTIONS_CASES.append(
(
'''
async def f(a, b):
async with open('blabla.log', 'w') as f:
async for i in range(100):
f.write(str(i) + '\\n')
''',
(1, 2),
),
)
@pytest.mark.parametrize('code,expected', SINGLE_FUNCTIONS_CASES)
def test_visitor_single_functions(code, expected):
visitor = ComplexityVisitor.from_code(dedent(code))
assert len(visitor.functions) == 1
assert (visitor.complexity, visitor.functions[0].complexity) == expected
FUNCTIONS_CASES = [
# With and async-with statements no longer count towards CC, see #123
(
'''
def f(a, b):
return a if b else 2
def g(a, b, c):
if a and b:
return a / b + b / a
elif b and c:
return b / c - c / b
return a + b + c
def h(a, b):
return 2 * (a + b)
''',
(2, 5, 1),
),
(
'''
def f(p, q):
while p:
p, q = q, p - q
if q < 1:
return 1 / q ** 2
elif q > 100:
return 1 / q ** .5
return 42 if not q else p
def g(a, b, c):
if a and b or a - b:
return a / b - c
elif b or c:
return 1
else:
k = 0
with open('results.txt', 'w') as fobj:
for i in range(b ** c):
k += sum(1 / j for j in range(i ** 2) if j > 2)
fobj.write(str(k))
return k - 1
''',
(5, 9),
),
]
@pytest.mark.parametrize('code,expected', FUNCTIONS_CASES)
def test_visitor_functions(code, expected):
visitor = ComplexityVisitor.from_code(dedent(code))
assert len(visitor.functions) == len(expected)
assert tuple(map(GET_COMPLEXITY, visitor.functions)) == expected
CLASSES_CASES = [
(
'''
class A(object):
def m(self, a, b):
if not a or b:
return b - 1
try:
return a / b
except ZeroDivisionError:
return a
def n(self, k):
while self.m(k) < k:
k -= self.m(k ** 2 - min(self.m(j) for j in range(k ** 4)))
return k
''',
(8, 4, 3),
),
(
'''
class B(object):
ATTR = 9 if A().n(9) == 9 else 10
import sys
if sys.version_info >= (3, 3):
import os
AT = os.openat('/random/loc')
def __iter__(self):
return __import__('itertools').tee(B.__dict__)
def test(self, func):
a = func(self.ATTR, self.AT)
if a < self.ATTR:
yield self
elif a > self.ATTR ** 2:
yield self.__iter__()
yield iter(a)
''',
(7, 1, 3),
),
]
@pytest.mark.parametrize('code,expected', CLASSES_CASES)
def test_visitor_classes(code, expected):
total_class_complexity = expected[0]
methods_complexity = expected[1:]
visitor = ComplexityVisitor.from_code(dedent(code))
assert len(visitor.classes) == 1
assert len(visitor.functions) == 0
cls = visitor.classes[0]
assert cls.real_complexity == total_class_complexity
assert tuple(map(GET_COMPLEXITY, cls.methods)) == methods_complexity
GENERAL_CASES = [
(
'''
if a and b:
print
else:
print
a = sum(i for i in range(1000) if i % 3 == 0 and i % 5 == 0)
def f(n):
def inner(n):
return n ** 2
if n == 0:
return 1
elif n == 1:
return n
elif n < 5:
return (n - 1) ** 2
return n * pow(inner(n), f(n - 1), n - 3)
''',
(6, 3, 0, 9),
),
(
'''
try:
1 / 0
except ZeroDivisonError:
print
except TypeError:
pass
class J(object):
def aux(self, w):
if w == 0:
return 0
return w - 1 + sum(self.aux(w - 3 - i) for i in range(2))
def f(a, b):
def inner(n):
return n ** 2
if a < b:
b, a = a, inner(b)
return a, b
''',
(3, 1, 3, 7),
),
(
'''
class f(object):
class inner(object):
pass
''',
(1, 0, 0, 1),
),
]
@pytest.mark.parametrize('code,expected', GENERAL_CASES)
def test_visitor_module(code, expected):
(
module_complexity,
functions_complexity,
classes_complexity,
total_complexity,
) = expected
visitor = ComplexityVisitor.from_code(dedent(code))
assert visitor.complexity, module_complexity
assert visitor.functions_complexity == functions_complexity
assert visitor.classes_complexity == classes_complexity
assert visitor.total_complexity == total_complexity
CLOSURES_CASES = [
(
'''
def f(n):
def g(l):
return l ** 4
def h(i):
return i ** 5 + 1 if i & 1 else 2
return sum(g(u + 4) / float(h(u)) for u in range(2, n))
''',
('g', 'h'),
(1, 2, 2),
),
(
'''
# will it work? :D
def memoize(func):
cache = {}
def aux(*args, **kwargs):
key = (args, kwargs)
if key in cache:
return cache[key]
cache[key] = res = func(*args, **kwargs)
return res
return aux
''',
('aux',),
(2, 1),
),
]
@pytest.mark.parametrize('code,closure_names,expected', CLOSURES_CASES)
def test_visitor_closures(code, closure_names, expected):
visitor = ComplexityVisitor.from_code(dedent(code))
func = visitor.functions[0]
closure_names = closure_names
expected_cs_cc = expected[:-1]
expected_total_cc = expected[-1]
assert len(visitor.functions) == 1
names = tuple(cs.name for cs in func.closures)
assert names == closure_names
cs_complexity = tuple(cs.complexity for cs in func.closures)
assert cs_complexity == expected_cs_cc
assert func.complexity == expected_total_cc
# There was a bug for which `blocks` increased while it got accessed
v = visitor
assert v.blocks == v.blocks == v.blocks
CONTAINERS_CASES = [
(
('func', 12, 0, 18, False, None, [], 5),
('F', 'func', 'F 12:0->18 func - 5'),
),
(
('meth', 12, 0, 21, True, 'cls', [], 5),
('M', 'cls.meth', 'M 12:0->21 cls.meth - 5'),
),
(('cls', 12, 0, 15, [], [], 5), ('C', 'cls', 'C 12:0->15 cls - 5')),
(
('cls', 12, 0, 19, [object, object, object, object], [], 30),
('C', 'cls', 'C 12:0->19 cls - 8'),
),
]
@pytest.mark.parametrize('values,expected', CONTAINERS_CASES)
def test_visitor_containers(values, expected):
expected_letter, expected_name, expected_str = expected
cls = Function if len(values) == 8 else Class
obj = cls(*values)
assert obj.letter == expected_letter
assert obj.fullname == expected_name
assert str(obj) == expected_str
| 2,287 | 0 | 154 |
4d927900145a4013c6fd9c6f27960c5b3f0b35e5 | 1,819 | py | Python | models/av_fusion.py | yang-zj1026/CS386_Project2 | 1a353086487e5fd4c4465566a1673c9495f5bb6c | [
"MIT"
] | null | null | null | models/av_fusion.py | yang-zj1026/CS386_Project2 | 1a353086487e5fd4c4465566a1673c9495f5bb6c | [
"MIT"
] | null | null | null | models/av_fusion.py | yang-zj1026/CS386_Project2 | 1a353086487e5fd4c4465566a1673c9495f5bb6c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from models.dsam_layers import center_crop
| 41.340909 | 116 | 0.640462 | import torch
import torch.nn as nn
from models.dsam_layers import center_crop
class av_module(nn.Module):
def __init__(self, rgb_nfilters, audio_nfilters, img_size, temp_size, hidden_layers):
super(av_module, self).__init__()
self.rgb_nfilters = rgb_nfilters
self.audio_nfilters = audio_nfilters
self.hidden_layers = hidden_layers
self.out_layers = 64
self.img_size = img_size
self.avgpool_rgb = nn.AvgPool3d((temp_size, 1, 1), stride=1)
# Make the layers numbers equal
self.relu = nn.ReLU()
self.affine_rgb = nn.Linear(rgb_nfilters, hidden_layers)
self.affine_audio = nn.Linear(audio_nfilters, hidden_layers)
self.w_a_rgb = nn.Bilinear(hidden_layers, hidden_layers, self.out_layers, bias=True)
self.upscale_ = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=False)
def forward(self, rgb, audio, crop_h, crop_w):
self.crop_w = crop_w
self.crop_h = crop_h
dgb = rgb[:, :, rgb.shape[2] // 2 - 1:rgb.shape[2] // 2 + 1, :, :]
rgb = self.avgpool_rgb(dgb).squeeze(2)
rgb = rgb.permute(0, 2, 3, 1)
rgb = rgb.view(rgb.size(0), -1, self.rgb_nfilters)
rgb = self.affine_rgb(rgb)
rgb = self.relu(rgb)
audio1 = self.affine_audio(audio[0].squeeze())
audio1 = self.relu(audio1)
a_rgb_B = self.w_a_rgb(rgb.contiguous(),
audio1.unsqueeze(1).expand(-1, self.img_size[0] * self.img_size[1], -1).contiguous())
sal_bilin = a_rgb_B
sal_bilin = sal_bilin.view(-1, self.img_size[0], self.img_size[1], self.out_layers)
sal_bilin = sal_bilin.permute(0, 3, 1, 2)
sal_bilin = center_crop(self.upscale_(sal_bilin), self.crop_h, self.crop_w)
return sal_bilin
| 1,657 | 6 | 77 |
fa66763cb6972a92806bcf929a87e7d65ee83a88 | 425 | py | Python | soustypes.py | geocot/Python_ArcGIS_Desktop | aef5d855d8ce3f564dd4fba80599be32b89fcb5b | [
"Apache-2.0"
] | null | null | null | soustypes.py | geocot/Python_ArcGIS_Desktop | aef5d855d8ce3f564dd4fba80599be32b89fcb5b | [
"Apache-2.0"
] | null | null | null | soustypes.py | geocot/Python_ArcGIS_Desktop | aef5d855d8ce3f564dd4fba80599be32b89fcb5b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import arcpy
arcpy.env.workspace = "C:/Temp/donnees.gdb"
soustypes = arcpy.da.ListSubtypes("villes")
for stcode, stdict in list(soustypes.items()):
print("code: ", stcode, " nom: ", stdict['Name'], " defaut: ", stdict['Default'])
"""
Retourne ceci:
('code: ', 0, ' nom: ', u'Villes', ' defaut: ', True)
('code: ', 1, ' nom: ', u'Grande ville', ' defaut: ', False)
"""
| 22.368421 | 86 | 0.592941 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import arcpy
arcpy.env.workspace = "C:/Temp/donnees.gdb"
soustypes = arcpy.da.ListSubtypes("villes")
for stcode, stdict in list(soustypes.items()):
print("code: ", stcode, " nom: ", stdict['Name'], " defaut: ", stdict['Default'])
"""
Retourne ceci:
('code: ', 0, ' nom: ', u'Villes', ' defaut: ', True)
('code: ', 1, ' nom: ', u'Grande ville', ' defaut: ', False)
"""
| 0 | 0 | 0 |
ed20f595ddcce0ce41c67515c8bef7687b70e532 | 2,379 | py | Python | src/site/cms/build/mdx_addheaderlinkcss.py | hpham/odftoolkit | cace01f463e49d2cb85579482605780ddf8a5e9e | [
"Apache-2.0"
] | 75 | 2018-12-13T16:05:55.000Z | 2022-02-08T18:42:56.000Z | src/site/cms/build/mdx_addheaderlinkcss.py | hpham/odftoolkit | cace01f463e49d2cb85579482605780ddf8a5e9e | [
"Apache-2.0"
] | 99 | 2019-02-14T11:09:45.000Z | 2022-03-02T19:05:11.000Z | src/site/cms/build/mdx_addheaderlinkcss.py | hpham/odftoolkit | cace01f463e49d2cb85579482605780ddf8a5e9e | [
"Apache-2.0"
] | 42 | 2019-02-15T11:06:34.000Z | 2022-02-09T02:35:36.000Z | #!
# coding:utf-8
'''
Add headerlink Extension for Python-Markdown
==========================================
This extension adds headerlink CSS to the output HTML in Python-Markdown.
This is intended for use with TocExtension(permalink=True) which generates the links
Simple Usage:
>>> import markdown
>>> markdown.markdown("Some text", ['addheaderlinkcss']) # doctest: +ELLIPSIS
u'<style...h1:hover > .headerlink {\\n display: inline;...</style>\\n<p>Some text</p>'
'''
import markdown
from markdown.util import etree
from markdown.util import isBlockLevel
# Global Vars
SECTIONLINK_PERMITTED_TAGS=set("h1 h2 h3 h4 h5 h6".split())
SECTIONLINK_CSS = r'''
/* The following code is added by mdx_addheaderlinkcss.py
It was originally lifted from http://subversion.apache.org/style/site.css */
/*
* Hide class="headerlink", except when an enclosing heading
* has the :hover property.
*/
.headerlink {
display: none;
}
'''
for tag in SECTIONLINK_PERMITTED_TAGS:
SECTIONLINK_CSS += '''\
%s:hover > .headerlink {
display: inline;
}
''' % tag
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
""" Add tableclass to Markdown. """
# https://pythonhosted.org/Markdown/extensions/api.html#makeextension says
# to use (**kwargs) only, but built-in extensions actually use (*args, **kwargs)
if __name__ == "__main__":
import doctest
# Test does not work currently because processing is disabled
doctest.testmod()
| 30.896104 | 91 | 0.654897 | #!
# coding:utf-8
'''
Add headerlink Extension for Python-Markdown
==========================================
This extension adds headerlink CSS to the output HTML in Python-Markdown.
This is intended for use with TocExtension(permalink=True) which generates the links
Simple Usage:
>>> import markdown
>>> markdown.markdown("Some text", ['addheaderlinkcss']) # doctest: +ELLIPSIS
u'<style...h1:hover > .headerlink {\\n display: inline;...</style>\\n<p>Some text</p>'
'''
import markdown
from markdown.util import etree
from markdown.util import isBlockLevel
# Global Vars
SECTIONLINK_PERMITTED_TAGS=set("h1 h2 h3 h4 h5 h6".split())
SECTIONLINK_CSS = r'''
/* The following code is added by mdx_addheaderlinkcss.py
It was originally lifted from http://subversion.apache.org/style/site.css */
/*
* Hide class="headerlink", except when an enclosing heading
* has the :hover property.
*/
.headerlink {
display: none;
}
'''
for tag in SECTIONLINK_PERMITTED_TAGS:
SECTIONLINK_CSS += '''\
%s:hover > .headerlink {
display: inline;
}
''' % tag
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
class AddHeaderlinkCssTreeProcessor(Treeprocessor):
def run(self, root):
if isBlockLevel(root.tag) and root.tag not in ['code', 'pre']:
child = etree.Element("style")
for k,v in {
'type': 'text/css',
}.iteritems():
child.set(k, v)
# Note upstream doc bug: it's not called markdown.AtomicString().
child.text = markdown.util.AtomicString(SECTIONLINK_CSS)
root.insert(0, child)
child.tail = root.text; root.text = None;
""" Add tableclass to Markdown. """
class AddHeaderlinkCssExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.treeprocessors.add('addheaderlinkcss',
AddHeaderlinkCssTreeProcessor(md.parser),
'_end')
# https://pythonhosted.org/Markdown/extensions/api.html#makeextension says
# to use (**kwargs) only, but built-in extensions actually use (*args, **kwargs)
def makeExtension(**kwargs):
return AddHeaderlinkCssExtension(**kwargs)
if __name__ == "__main__":
import doctest
# Test does not work currently because processing is disabled
doctest.testmod()
| 723 | 52 | 119 |
7d90044a4e3bb406aaeb8545599f0b25f2f9de09 | 3,545 | py | Python | maze_exploration/showmaze.py | TSchattschneider/MazeExploration | 19949457971241a0eff5894be4ea9aa0072eb6bd | [
"MIT"
] | 1 | 2021-03-18T17:25:08.000Z | 2021-03-18T17:25:08.000Z | maze_exploration/showmaze.py | TSchattschneider/MazeExploration | 19949457971241a0eff5894be4ea9aa0072eb6bd | [
"MIT"
] | null | null | null | maze_exploration/showmaze.py | TSchattschneider/MazeExploration | 19949457971241a0eff5894be4ea9aa0072eb6bd | [
"MIT"
] | null | null | null | import json
import sys
import turtle
from maze import Maze
def draw_path(filepath, pen, origin, sq_size):
""""Reads a path from a file and draws it on the maze."""
first = True
with open(filepath, 'r') as file_object:
for line in file_object:
x, y, visited, heading = json.loads(line)
if visited == 0:
color = 'gray'
elif visited == 1:
color = 'green yellow'
elif visited == 2:
color = 'gray'
elif visited == 3:
color = 'red'
else:
color = 'black'
if first:
pen.hideturtle()
pen.pensize(int(sq_size / 2))
pen.pencolor(color)
pen.setheading(90)
pen.goto(origin + sq_size / 2, origin + sq_size / 2)
pen.showturtle()
first = False
else:
draw_line(x, y, color, heading, pen, origin, sq_size)
def draw_line(x, y, color, heading, pen, origin, sq_size):
"""Draws a continuous line on the path."""
center_x = origin + sq_size * x + sq_size / 2
center_y = origin + sq_size * y + sq_size / 2
heading_dict = {"up": 90, "right": 0, "down": 270, "left": 180}
pen.setheading(heading_dict[heading])
pen.pendown()
pen.goto(center_x, center_y)
pen.penup()
pen.pencolor(color)
def draw_maze(maze, pen, origin, sq_size):
"""Draws the maze lines om screen."""
# iterate through squares one by one to decide where to draw walls
for x in range(maze.dim):
for y in range(maze.dim):
if not maze.is_permissible([x, y], 'up'):
pen.goto(origin + sq_size * x, origin + sq_size * (y + 1))
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if not maze.is_permissible([x, y], 'right'):
pen.goto(origin + sq_size * (x + 1), origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check bottom wall if on lowest row
if y == 0 and not maze.is_permissible([x, y], 'down'):
pen.goto(origin + sq_size * x, origin)
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check left wall if on leftmost column
if x == 0 and not maze.is_permissible([x, y], 'left'):
pen.goto(origin, origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if __name__ == '__main__':
'''
This script uses Python's turtle library to draw a picture of the maze
given as an argument when running the script.
'''
# Create a maze based on input argument on command line.
maze = Maze(str(sys.argv[1]))
# Initialize the window and drawing turtle.
window = turtle.Screen()
pen = turtle.Turtle()
pen.speed(0)
pen.penup()
# maze centered on (0,0), squares are 20 units in length.
sq_size = 20
origin = maze.dim * sq_size / -2
window.tracer(0)
draw_maze(maze, pen, origin, sq_size)
window.update()
window.tracer(1)
if len(sys.argv) == 3:
draw_path(str(sys.argv[2]), pen, origin, sq_size)
pen.hideturtle()
window.exitonclick()
| 30.299145 | 74 | 0.530606 | import json
import sys
import turtle
from maze import Maze
def draw_path(filepath, pen, origin, sq_size):
""""Reads a path from a file and draws it on the maze."""
first = True
with open(filepath, 'r') as file_object:
for line in file_object:
x, y, visited, heading = json.loads(line)
if visited == 0:
color = 'gray'
elif visited == 1:
color = 'green yellow'
elif visited == 2:
color = 'gray'
elif visited == 3:
color = 'red'
else:
color = 'black'
if first:
pen.hideturtle()
pen.pensize(int(sq_size / 2))
pen.pencolor(color)
pen.setheading(90)
pen.goto(origin + sq_size / 2, origin + sq_size / 2)
pen.showturtle()
first = False
else:
draw_line(x, y, color, heading, pen, origin, sq_size)
def draw_line(x, y, color, heading, pen, origin, sq_size):
"""Draws a continuous line on the path."""
center_x = origin + sq_size * x + sq_size / 2
center_y = origin + sq_size * y + sq_size / 2
heading_dict = {"up": 90, "right": 0, "down": 270, "left": 180}
pen.setheading(heading_dict[heading])
pen.pendown()
pen.goto(center_x, center_y)
pen.penup()
pen.pencolor(color)
def draw_maze(maze, pen, origin, sq_size):
"""Draws the maze lines om screen."""
# iterate through squares one by one to decide where to draw walls
for x in range(maze.dim):
for y in range(maze.dim):
if not maze.is_permissible([x, y], 'up'):
pen.goto(origin + sq_size * x, origin + sq_size * (y + 1))
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if not maze.is_permissible([x, y], 'right'):
pen.goto(origin + sq_size * (x + 1), origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check bottom wall if on lowest row
if y == 0 and not maze.is_permissible([x, y], 'down'):
pen.goto(origin + sq_size * x, origin)
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check left wall if on leftmost column
if x == 0 and not maze.is_permissible([x, y], 'left'):
pen.goto(origin, origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if __name__ == '__main__':
'''
This script uses Python's turtle library to draw a picture of the maze
given as an argument when running the script.
'''
# Create a maze based on input argument on command line.
maze = Maze(str(sys.argv[1]))
# Initialize the window and drawing turtle.
window = turtle.Screen()
pen = turtle.Turtle()
pen.speed(0)
pen.penup()
# maze centered on (0,0), squares are 20 units in length.
sq_size = 20
origin = maze.dim * sq_size / -2
window.tracer(0)
draw_maze(maze, pen, origin, sq_size)
window.update()
window.tracer(1)
if len(sys.argv) == 3:
draw_path(str(sys.argv[2]), pen, origin, sq_size)
pen.hideturtle()
window.exitonclick()
| 0 | 0 | 0 |
45996bd75386e35e2553f01df2c921844ec3804f | 394 | py | Python | SoftLayer/shell/routes.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 126 | 2015-01-05T05:09:22.000Z | 2021-07-02T00:16:35.000Z | SoftLayer/shell/routes.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 969 | 2015-01-05T15:55:31.000Z | 2022-03-31T19:55:20.000Z | SoftLayer/shell/routes.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | 176 | 2015-01-22T11:23:40.000Z | 2022-02-11T13:16:58.000Z | """
SoftLayer.CLI.routes
~~~~~~~~~~~~~~~~~~~
Routes for shell-specific commands
:license: MIT, see LICENSE for more details.
"""
ALL_ROUTES = [
('exit', 'SoftLayer.shell.cmd_exit:cli'),
('shell-help', 'SoftLayer.shell.cmd_help:cli'),
('env', 'SoftLayer.shell.cmd_env:cli'),
]
ALL_ALIASES = {
'?': 'shell-help',
'help': 'shell-help',
'quit': 'exit',
}
| 19.7 | 51 | 0.56599 | """
SoftLayer.CLI.routes
~~~~~~~~~~~~~~~~~~~
Routes for shell-specific commands
:license: MIT, see LICENSE for more details.
"""
ALL_ROUTES = [
('exit', 'SoftLayer.shell.cmd_exit:cli'),
('shell-help', 'SoftLayer.shell.cmd_help:cli'),
('env', 'SoftLayer.shell.cmd_env:cli'),
]
ALL_ALIASES = {
'?': 'shell-help',
'help': 'shell-help',
'quit': 'exit',
}
| 0 | 0 | 0 |
2c4ac8b7a30c10bdaf41728053de8eb250b5ee56 | 31,337 | py | Python | dist/gbif2estimateS.py | xjpalma/gbif2estimateS | b9e25b0de3996b1d2502d02738fa1e9e8d33f9b7 | [
"MIT"
] | null | null | null | dist/gbif2estimateS.py | xjpalma/gbif2estimateS | b9e25b0de3996b1d2502d02738fa1e9e8d33f9b7 | [
"MIT"
] | null | null | null | dist/gbif2estimateS.py | xjpalma/gbif2estimateS | b9e25b0de3996b1d2502d02738fa1e9e8d33f9b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Thu Oct 31 11:09:38 2019
@author: Jorge palma
MARETEC/Instituto Superior Técnico
Universidade de Lisboa
'''
import sys
import os
import gc
import argparse
import traceback
import time
import datetime
import pandas as pd
import json
import random
import string
## python-geohash: https://pypi.org/project/python-geohash/
import geohash
## pyshp: https://pythonhosted.org/Python%20Shapefile%20Library/
import shapefile
## https://shapely.readthedocs.io
from shapely.geometry import Point, Polygon, shape
sys.tracebacklimit=0
'''dev'''
sys.tracebacklimit=1
output_default_file = 'output.dat'
species_loc_file = 'speciesloc.dat'
grid_file = 'grid.dat'
class GeohashMaker(object):
'''bounding_box'''
def _is_geohash_in_bounding_box(self, current_geohash, bbox_coordinates):
'''Checks if the box of a geohash is inside the bounding box
:param current_geohash: a geohash
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the center of the geohash is in the bounding box
'''
# decode return [latitude, longitude]
(latitude, longitude) = geohash.decode(current_geohash)
geohash_in_bounding_box = (bbox_coordinates[0] < longitude < bbox_coordinates[2]) and \
(bbox_coordinates[1] < latitude < bbox_coordinates[3])
return geohash_in_bounding_box
def _is_coordinates_in_bounding_box(self, coordinates, bbox_coordinates):
'''Checks if coordinates is inside the bounding box
:param coordinates: [lon, lat]
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the coordinate is in the bounding box
'''
coordinates_in_bounding_box = (bbox_coordinates[0] < coordinates[0] < bbox_coordinates[2]) and \
(bbox_coordinates[1] < coordinates[1] < bbox_coordinates[3])
return coordinates_in_bounding_box
def _buil_cell_tiles_from_bbox(self, bbox_coordinates):
'''Computes all geohash tile in the given bounding box
:param bbox_coordinates: the bounding box coordinates of the geohashes
:return: a list of geohashes
'''
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = (bbox_coordinates[0] + bbox_coordinates[2]) / 2
center_latitude = (bbox_coordinates[1] + bbox_coordinates[3]) / 2
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
if neighbor not in checked_geohashes and self._is_geohash_in_bounding_box(neighbor, bbox_coordinates):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''shapefile'''
def _is_coordinates_in_shapefile(self, coordinates, shpfile):
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
first_shp = shapes[0]
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
def _build_cell_tiles_from_shapefile(self, shpfile):
'''Computes all geohash tiles in the given shapefile
:param shapefile: shapefile
:return: a list of geohashes
'''
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
if len(shapes) > 1:
print("More than one feature was found. Only first will be selected.")
input("Press Enter to continue...")
'''only use first feature'''
first_shp = shapes[0]
''' get shape type. only if shapetype is polygon'''
shape_type = first_shp.shapeType
if shape_type != 5:
handle_error(msg='Shapefile feature be a polygon')
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_latitude = polygon.centroid.coords[0][1]
center_longitude = polygon.centroid.coords[0][0]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''geojson'''
def _is_coordinates_in_geojson(self, coordinates, jsonfile):
'''Checks if coordinates is inside the polygon
:param coordinates: [lon, lat]
:geojson file with polygon
:return: true if the coordinate is in polygon
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
def _build_cell_tiles_from_geojson(self, jsonfile):
'''Computes all geohash tiles in the given geojson file
:param jsonfile: geojson (polygon)
:return: a list of geohashes
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
geom_type = polygon.geom_type
if geom_type != 'Polygon':
handle_error('SyntaxError', 'Invalid GEOJSON format: Must be a Polygon type')
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = polygon.centroid.coords[0][0]
center_latitude = polygon.centroid.coords[0][1]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
def get_parser():
''' Get parser object '''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='read gbif and make input file to EstimateS')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
parser.add_argument('-v', dest='verbose', help='verbose', action='store_true')
parser.add_argument('-vv', dest='vverbose', help='more verbose', action='store_true')
## Create io files group
subparser_io = parser.add_argument_group(title='IO group')
subparser_io.add_argument('-i', dest='input', help='csv gbif results', required=True)
subparser_io.add_argument('-s', dest='separator', help='csv separator', default='\t', required=False)
subparser_io.add_argument('-o', dest='output', help='output file', default=output_default_file, required=False)
## Create time group
subparser_time = parser.add_argument_group(title='time group')
subparser_time.add_argument('-str', dest='strdate', help="the Start Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
subparser_time.add_argument('-end', dest='enddate', help="the End Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
## Create grid group
subparser_grid = parser.add_argument_group(title='grid group')
subparser_grid.add_argument('-g', dest='grid_type', choices=['geohash'], default='geohash', required=False)
subparser_grid.add_argument('-p', dest='precision', type=int, help='grid precision', default=5, required=False)
subparser_grid_exclusive = subparser_grid.add_mutually_exclusive_group(required=True)
subparser_grid_exclusive.add_argument('-shp', dest='shapefile', help='shapefile with polygon', required=False)
subparser_grid_exclusive.add_argument('-geojson', dest='geojson', help='geojson file with polygon', required=False)
subparser_grid_exclusive.add_argument('-bbox', dest='bbox', nargs='+', type=float, help='bounding box: x1 y1 x2 y2', required=False)
## Create species group
subparser_specie = parser.add_argument_group(title='specie group')
subparser_specie.add_argument('-n', dest='species', nargs='+', default=[], help='species allowed', required=False)
args = parser.parse_args()
if args.vverbose:
args.verbose = True
if not os.path.isfile(args.input):
raise IOError('No such file {}'.format(args.input))
args.outdir = os.path.dirname(args.output)
outfile = os.path.basename(args.output)
## verify if is a path and create it
if args.outdir:
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
args.outdir = args.outdir + '/'
## verify if is a path with filename
if not outfile:
args.output = args.outdir + '/output.dat'
if not args.strdate:
args.strdate = datetime.datetime.strptime('1900-01-01', '%Y-%m-%d')
if not args.enddate:
args.enddate = datetime.datetime.strptime('2100-01-01', '%Y-%m-%d')
if args.shapefile:
if not os.path.isfile(args.shapefile):
handle_error('FileNotFoundError', 'Shapefile not found')
if args.geojson:
if not os.path.isfile(args.geojson):
handle_error('FileNotFoundError', 'JSON file not found')
return args
if __name__ == "__main__":
start_time = time.time()
args = get_parser()
'''#### build grid'''
print('1. build grid')
grid = []
if args.grid_type == 'geohash':
grid_maker = GeohashMaker(args.precision, args.shapefile, args.geojson, args.bbox)
grid = grid_maker.create_grid()
else:
handle_error(msg='Error: only accept geohash grid type')
'''#### init big_data variable'''
print('2. init big data')
big_data = {}
for cell in grid:
big_data[cell] = {}
'''how many species in cell'''
big_data[cell]['sum'] = 0
'''list of species in cell'''
big_data[cell]['species'] = {}
'''used to consider only one observation (specie and time) in cell'''
big_data[cell]['dates'] = {}
'''create localization.dat file'''
f = open(args.outdir + species_loc_file, 'w+')
f.write("latitude;longitude;species\n")
'''#### read csv file'''
print('3. read each gbif observation (be patient...)')
nobs_accepted = 0
nobs_rejected = 0
nobs_repeated = 0
nobs_outside_grid_or_time = 0
nobs_wrong_format = 0
nobs = 0
usecols = ['gbifID', 'decimalLatitude', 'decimalLongitude', 'speciesKey', 'year', 'month', 'day']
chunksize = 10 ** 5
filesize = os.path.getsize(args.input)
linesize = 820
for df in pd.read_csv(args.input, sep=args.separator, chunksize=chunksize, engine='c', low_memory=False, usecols=usecols, skip_blank_lines=True):
s_time = time.time()
nlines = len(df.index)
nobs += nlines
''' verify if all columns exist in header csv'''
csv_columns = df.columns.tolist()
test_csv_header(csv_columns, usecols)
'''
gbifID abstract accessRights accrualMethod accrualPeriodicity accrualPolicy alternative audience available bibliographicCitation conformsTo contributor coverage
created creator date dateAccepted dateCopyrighted dateSubmitted description educationLevel extent format hasFormat hasPart hasVersion identifier instructionalMethod isFormatOf isPartOf
isReferencedBy isReplacedBy isRequiredBy isVersionOf issued language license mediator medium modified provenance publisher references relation replaces requires rights
rightsHolder source spatial subject tableOfContents temporal title type valid institutionID collectionID datasetID institutionCode collectionCode datasetName ownerInstitutionCode
basisOfRecord informationWithheld dataGeneralizations dynamicProperties occurrenceID catalogNumber recordNumber recordedBy individualCount organismQuantity organismQuantityType
sex lifeStage reproductiveCondition behavior establishmentMeansoccurrenceStatus preparations disposition associatedReferences associatedSequences associatedTaxa otherCatalogNumbers
occurrenceRemarks organismIDorganismName organismScope associatedOccurrences associatedOrganisms previousIdentifications organismRemarks materialSampleID eventID parentEventID
fieldNumber eventDate eventTime startDayOfYear endDayOfYear year month day verbatimEventDate habitat samplingProtocol samplingEffort sampleSizeValue sampleSizeUnit
fieldNotes eventRemarks locationID higherGeographyID higherGeography continent waterBody islandGroupisland countryCode stateProvince county municipality locality
verbatimLocality verbatimElevation verbatimDepth minimumDistanceAboveSurfaceInMeters maximumDistanceAboveSurfaceInMeters locationAccordingTo locationRemarks decimalLatitude
decimalLongitude coordinateUncertaintyInMeters coordinatePrecision pointRadiusSpatialFit verbatimCoordinateSystem verbatimSRS footprintWKT footprintSRS
footprintSpatialFit georeferencedBy georeferencedDate georeferenceProtocol georeferenceSources georeferenceVerificationStatus georeferenceRemarks geologicalContextID
earliestEonOrLowestEonothemlatestEonOrHighestEonothem earliestEraOrLowestErathem latestEraOrHighestErathem earliestPeriodOrLowestSystem latestPeriodOrHighestSystem
earliestEpochOrLowestSeries latestEpochOrHighestSeries earliestAgeOrLowestStage latestAgeOrHighestStage lowestBiostratigraphicZone highestBiostratigraphicZonelithostratigraphicTerms
group formation member bed identificationID identificationQualifier typeStatus identifiedBy dateIdentified identificationReferences identificationVerificationStatus
identificationRemarks taxonID scientificNameID acceptedNameUsageID parentNameUsageID originalNameUsageID nameAccordingToID namePublishedInID taxonConceptID scientificName
acceptedNameUsage parentNameUsage originalNameUsage nameAccordingTo namePublishedIn namePublishedInYear higherClassification kingdom phylum class order family genus
subgenus specificEpithet infraspecificEpithet taxonRank verbatimTaxonRank vernacularName nomenclaturalCode taxonomicStatus nomenclaturalStatus taxonRemarks
datasetKey publishingCountry lastInterpreted elevation elevationAccuracy depth depthAccuracy distanceAboveSurface distanceAboveSurfaceAccuracy issue mediaType
hasCoordinate hasGeospatialIssues taxonKey acceptedTaxonKey kingdomKey phylumKey classKey orderKey familyKey genusKey subgenusKey speciesKey species genericName acceptedScientificName
typifiedName protocol lastParsed lastCrawled repatriated
'''
for index, row in df.iterrows():
if args.verbose:
if nlines < chunksize:
progress(index, nlines)
else:
progress(index*linesize, filesize)
'''get values'''
try:
gbifid = str(row['gbifID'])
speciekey = int(float(row['speciesKey']))
lon = round(float(row['decimalLongitude']), 6)
lat = round(float(row['decimalLatitude']), 6)
year = row['year']
month = row['month']
day = row['day']
date_obj = datetime.datetime(int(year), int(month), int(day), 0, 0)
date = date_obj.strftime("%Y-%m-%d")
#print(index, gbifid, speciekey, lon, lat, date)
except Exception as exception:
# traceback.print_exc()
nobs_wrong_format += 1
nobs_rejected += 1
continue
else:
'''test if observation is in domain, in time and in species list'''
if grid_maker.is_in_grid([lon, lat]) and date_obj >= args.strdate and date_obj <= args.enddate and (not args.species or speciekey in args.species):
cell = grid_maker.build_cell([lon, lat])
if not cell in grid:
nobs_outside_grid_or_time += 1
#print(cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
#handle_error(msg=cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
continue
'''
filter: only consider one observation per day, per grid
create key to save only one obs per day, per grid
'''
try:
if speciekey in big_data[cell]['dates'][date]:
# print('repeated: ' + cell + ' ' + date + ' ' + speciekey)
nobs_repeated += 1
nobs_rejected += 1
continue
else:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
except KeyError:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date] = []
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
else:
# print('out of domain or out of time period: ' + cell + ' ' + str(lon) + ' ' + str(lat) + ' ' + date + ' ' + str(speciekey))
nobs_outside_grid_or_time += 1
nobs_rejected += 1
if args.vverbose: elapsed_time([s_time, start_time])
del df
gc.collect()
f.close()
print()
print('\tobservations outside grid, time or selected species: {0}'.format(nobs_outside_grid_or_time))
print('\tobservations wrong format (no date): {0}'.format(nobs_wrong_format))
print('\tobservations repeated: {0}'.format(nobs_repeated))
print('\tobservations rejected: {0}'.format(nobs_rejected))
print('\tobservations accepted: {0}'.format(nobs_accepted))
print('\tobservations total: {0}'.format(nobs))
print()
''' delete unecessary variables '''
for c in big_data:
del big_data[c]['dates']
print('4. process big data and output results')
'''open output files'''
fout = open(args.output, 'w+')
fgeo = open(args.outdir + grid_file, 'w+')
fgeo.write("lat;lon;geohash;has_species\r\n")
community_list = []
community_accepted = 0
units_count = 0
for cell in big_data:
(lat, lon) = coordinates = geohash.decode(cell)
'''create new community'''
community = BioCommunity(cell)
community_unit_list = []
for speciekey, v in big_data[cell]['species'].items():
member = BioMember(speciekey)
member_count = v['count']
'''add member to community'''
community.add_member(member, member_count)
'''add member to member list'''
for i in range(member_count):
community_unit_list.append(member.getid())
community_units_count = community.get_units_count()
richness = community.get_richness()
'''only consider community with more than 2 observations'''
if community_units_count > 2:
community_accepted += 1
units_count += community_units_count
if args.vverbose: print(" There are {mc:} units in community {cell:} ({lat:}, {lon:}. The total diversity is {rich:} species)".format(mc=community_units_count, cell=cell, lat=lat, lon=lon, rich=community.get_richness()))
'''add community to list'''
community_list.append(community)
'''print header
"Cell eydsh (37.28759765625, -7.53662109375)" * SampleSet * 1 1 1
8 8
00001 00002 00003 00004 00005 00006 00007 00008
'''
fout.write("\"Cell {cell:} ({lat:}, {lon:})\"\t*SampleSet*\t1\t1\t1\r\n".format(cell=cell, lat=lat, lon=lon))
fout.write("{r:}\t{uc:}\r\n".format(r=richness, uc=community_units_count))
for i in range(1, community_units_count + 1):
fout.write("\t{0:05d}".format(i))
fout.write("\r\n")
'''set matrix data for random get'''
matrix = {}
members = community.get_all_members()
'''init matrix'''
for speciekey in members:
matrix[speciekey] = []
for i in range(community_units_count):
'''get random member'''
random_member = community_unit_list.pop(random.randrange(len(community_unit_list)))
for speciekey in members:
if speciekey == random_member:
matrix[speciekey].append(1)
else:
matrix[speciekey].append(0)
'''
print matrix
2474051 0 0 1 0 0 0 0 0
2492606 0 0 0 0 0 0 0 1
2492867 0 0 0 0 1 0 0 0
2495000 0 0 0 0 0 1 0 0
2498415 0 0 0 0 0 0 1 0
5229493 1 0 0 0 0 0 0 0
6092830 0 0 0 1 0 0 0 0
9515886 0 1 0 0 0 0 0 0
'''
for speciekey in sorted(matrix):
fout.write("{0:d}".format(speciekey))
for i in range(community_units_count):
fout.write("\t{0:}".format(int(matrix[speciekey][i])))
fout.write("\r\n")
fgeo.write("{lat:};{lon:};{cell:};{uc:}\r\n".format(lat=lat, lon=lon, cell= cell, uc=community_units_count))
fout.close()
fgeo.close()
'''add first line in output file'''
first_line = "*MultipleSampleSets*\t{0:}\t\"PT Community with more then 2 members".format(community_accepted)
if args.strdate.year != 1900:
first_line += '; start year: ' + str(args.strdate.year)
if args.enddate.year != 2100:
first_line += '; end year: ' + str(args.enddate.year)
first_line += "\"\r\n"
line_prepender(args.output, first_line)
'''print stat'''
meta = BioCommunity('tmp')
meta.add_communities(community_list)
print("\n== Metacommunities with more then 2 individuals:")
print("\t{0:} communities".format(len(community_list)))
print("\t{0:} species".format(meta.get_richness()))
print("\t{0:} individuals".format(units_count))
'''the end'''
elapsed_time(start_time)
| 39.717364 | 235 | 0.606695 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Thu Oct 31 11:09:38 2019
@author: Jorge palma
MARETEC/Instituto Superior Técnico
Universidade de Lisboa
'''
import sys
import os
import gc
import argparse
import traceback
import time
import datetime
import pandas as pd
import json
import random
import string
## python-geohash: https://pypi.org/project/python-geohash/
import geohash
## pyshp: https://pythonhosted.org/Python%20Shapefile%20Library/
import shapefile
## https://shapely.readthedocs.io
from shapely.geometry import Point, Polygon, shape
sys.tracebacklimit=0
'''dev'''
sys.tracebacklimit=1
output_default_file = 'output.dat'
species_loc_file = 'speciesloc.dat'
grid_file = 'grid.dat'
class GeohashMaker(object):
def __init__(self, precision, shapefile, geojson, bbox):
self.precision = precision
self.bbox = bbox
self.shapefile = shapefile
self.geojson = geojson
def create_grid(self):
if self.bbox:
return self._buil_cell_tiles_from_bbox(self.bbox)
elif self.shapefile:
return self._build_cell_tiles_from_shapefile(self.shapefile)
elif self.geojson:
return self._build_cell_tiles_from_geojson(self.geojson)
def is_in_grid(self, coordinates):
if self.bbox:
return self._is_coordinates_in_bounding_box(coordinates, self.bbox)
elif self.shapefile:
return self._is_coordinates_in_shapefile(coordinates, self.shapefile)
elif self.geojson:
return self._is_coordinates_in_geojson(coordinates, self.geojson)
def build_cell(self, coordinates):
return geohash.encode(coordinates[1], coordinates[0], precision=self.precision)
def get_precision(self):
return self.precision
'''bounding_box'''
def _is_geohash_in_bounding_box(self, current_geohash, bbox_coordinates):
'''Checks if the box of a geohash is inside the bounding box
:param current_geohash: a geohash
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the center of the geohash is in the bounding box
'''
# decode return [latitude, longitude]
(latitude, longitude) = geohash.decode(current_geohash)
geohash_in_bounding_box = (bbox_coordinates[0] < longitude < bbox_coordinates[2]) and \
(bbox_coordinates[1] < latitude < bbox_coordinates[3])
return geohash_in_bounding_box
def _is_coordinates_in_bounding_box(self, coordinates, bbox_coordinates):
'''Checks if coordinates is inside the bounding box
:param coordinates: [lon, lat]
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the coordinate is in the bounding box
'''
coordinates_in_bounding_box = (bbox_coordinates[0] < coordinates[0] < bbox_coordinates[2]) and \
(bbox_coordinates[1] < coordinates[1] < bbox_coordinates[3])
return coordinates_in_bounding_box
def _buil_cell_tiles_from_bbox(self, bbox_coordinates):
'''Computes all geohash tile in the given bounding box
:param bbox_coordinates: the bounding box coordinates of the geohashes
:return: a list of geohashes
'''
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = (bbox_coordinates[0] + bbox_coordinates[2]) / 2
center_latitude = (bbox_coordinates[1] + bbox_coordinates[3]) / 2
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
if neighbor not in checked_geohashes and self._is_geohash_in_bounding_box(neighbor, bbox_coordinates):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''shapefile'''
def _is_coordinates_in_shapefile(self, coordinates, shpfile):
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
first_shp = shapes[0]
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
def _build_cell_tiles_from_shapefile(self, shpfile):
'''Computes all geohash tiles in the given shapefile
:param shapefile: shapefile
:return: a list of geohashes
'''
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
if len(shapes) > 1:
print("More than one feature was found. Only first will be selected.")
input("Press Enter to continue...")
'''only use first feature'''
first_shp = shapes[0]
''' get shape type. only if shapetype is polygon'''
shape_type = first_shp.shapeType
if shape_type != 5:
handle_error(msg='Shapefile feature be a polygon')
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_latitude = polygon.centroid.coords[0][1]
center_longitude = polygon.centroid.coords[0][0]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''geojson'''
def _is_coordinates_in_geojson(self, coordinates, jsonfile):
'''Checks if coordinates is inside the polygon
:param coordinates: [lon, lat]
:geojson file with polygon
:return: true if the coordinate is in polygon
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
def _build_cell_tiles_from_geojson(self, jsonfile):
'''Computes all geohash tiles in the given geojson file
:param jsonfile: geojson (polygon)
:return: a list of geohashes
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
geom_type = polygon.geom_type
if geom_type != 'Polygon':
handle_error('SyntaxError', 'Invalid GEOJSON format: Must be a Polygon type')
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = polygon.centroid.coords[0][0]
center_latitude = polygon.centroid.coords[0][1]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
class BioCommunity(object):
units_count = 0
members = {}
def __init__(self, name):
self.name = name
self.reset()
def add_member(self, member, count=1):
member_id = member.getid()
try:
self.members[member_id] = {'member': member, 'count': count}
self.units_count += count
return True
except KeyError:
return False
def remove_member(self, member, count=1):
member_id = member.getid()
try:
if self.members[member_id][count] <= count:
self.units_count -= self.members['member_id'][count]
del self.members[member_id]
else:
self.members[member_id][count] -= count
return True
except KeyError:
return False
def add_communities(self, communities):
for community in communities:
members = community.get_all_members()
for speciekey in members:
member = BioMember(speciekey)
self.add_member(member, members[speciekey]['count'])
def get_units_count(self):
return self.units_count
def get_all_members(self):
return self.members
def set_name(self, name):
self.name = name
return True
def get_name(self):
return self.name
''' Report the community richness or number of different types of members.
This is a form of alpha diversity. '''
def get_richness(self):
return len(self.members)
def reset(self):
'''Re-initialize the community'''
self.units_count = 0
self.members = {}
return True
class BioMember(object):
def __init__(self, id=''):
self.id = id
if not id:
self.id = self.randomStringDigits()
def getid(self):
return self.id
def setid(self, id):
self.id = id
def randomStringDigits(stringLength=6):
'''Generate a random string of letters and digits '''
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def get_parser():
''' Get parser object '''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='read gbif and make input file to EstimateS')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
parser.add_argument('-v', dest='verbose', help='verbose', action='store_true')
parser.add_argument('-vv', dest='vverbose', help='more verbose', action='store_true')
## Create io files group
subparser_io = parser.add_argument_group(title='IO group')
subparser_io.add_argument('-i', dest='input', help='csv gbif results', required=True)
subparser_io.add_argument('-s', dest='separator', help='csv separator', default='\t', required=False)
subparser_io.add_argument('-o', dest='output', help='output file', default=output_default_file, required=False)
## Create time group
subparser_time = parser.add_argument_group(title='time group')
subparser_time.add_argument('-str', dest='strdate', help="the Start Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
subparser_time.add_argument('-end', dest='enddate', help="the End Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
## Create grid group
subparser_grid = parser.add_argument_group(title='grid group')
subparser_grid.add_argument('-g', dest='grid_type', choices=['geohash'], default='geohash', required=False)
subparser_grid.add_argument('-p', dest='precision', type=int, help='grid precision', default=5, required=False)
subparser_grid_exclusive = subparser_grid.add_mutually_exclusive_group(required=True)
subparser_grid_exclusive.add_argument('-shp', dest='shapefile', help='shapefile with polygon', required=False)
subparser_grid_exclusive.add_argument('-geojson', dest='geojson', help='geojson file with polygon', required=False)
subparser_grid_exclusive.add_argument('-bbox', dest='bbox', nargs='+', type=float, help='bounding box: x1 y1 x2 y2', required=False)
## Create species group
subparser_specie = parser.add_argument_group(title='specie group')
subparser_specie.add_argument('-n', dest='species', nargs='+', default=[], help='species allowed', required=False)
args = parser.parse_args()
if args.vverbose:
args.verbose = True
if not os.path.isfile(args.input):
raise IOError('No such file {}'.format(args.input))
args.outdir = os.path.dirname(args.output)
outfile = os.path.basename(args.output)
## verify if is a path and create it
if args.outdir:
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
args.outdir = args.outdir + '/'
## verify if is a path with filename
if not outfile:
args.output = args.outdir + '/output.dat'
if not args.strdate:
args.strdate = datetime.datetime.strptime('1900-01-01', '%Y-%m-%d')
if not args.enddate:
args.enddate = datetime.datetime.strptime('2100-01-01', '%Y-%m-%d')
if args.shapefile:
if not os.path.isfile(args.shapefile):
handle_error('FileNotFoundError', 'Shapefile not found')
if args.geojson:
if not os.path.isfile(args.geojson):
handle_error('FileNotFoundError', 'JSON file not found')
return args
def handle_error(error='', msg=''):
formatted_lines = traceback.format_exc().splitlines()
print()
if error:
exec('raise ' + error + '(\'' + msg + '\')')
elif msg:
print(msg)
else:
print()
sys.exit(1)
def test_csv_header(csv_columns, must_exist_columns):
not_exist_columns = set()
for elem in must_exist_columns:
if not elem in csv_columns:
not_exist_columns.add(elem)
if not_exist_columns:
print("Error: Missing columns in input file.\n Input file doesn\'t have " + \
'"{}"'.format('", "'.join(not_exist_columns)) + \
' columns')
return False
del not_exist_columns, csv_columns, must_exist_columns
return True;
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 0)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('{:} {:}% ...{:}\r'.format(bar, percents, status))
sys.stdout.flush()
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
def elapsed_time(start_time):
end_time = time.time()
if isinstance(start_time, list):
print()
for st in start_time:
secs = (end_time - st)
print("Time elapsed: {}".format(str(datetime.timedelta(seconds=secs))))
print()
else:
secs = (end_time - start_time)
print()
print("Time elapsed: {}".format(str(datetime.timedelta(seconds=secs))))
print()
if __name__ == "__main__":
start_time = time.time()
args = get_parser()
'''#### build grid'''
print('1. build grid')
grid = []
if args.grid_type == 'geohash':
grid_maker = GeohashMaker(args.precision, args.shapefile, args.geojson, args.bbox)
grid = grid_maker.create_grid()
else:
handle_error(msg='Error: only accept geohash grid type')
'''#### init big_data variable'''
print('2. init big data')
big_data = {}
for cell in grid:
big_data[cell] = {}
'''how many species in cell'''
big_data[cell]['sum'] = 0
'''list of species in cell'''
big_data[cell]['species'] = {}
'''used to consider only one observation (specie and time) in cell'''
big_data[cell]['dates'] = {}
'''create localization.dat file'''
f = open(args.outdir + species_loc_file, 'w+')
f.write("latitude;longitude;species\n")
'''#### read csv file'''
print('3. read each gbif observation (be patient...)')
nobs_accepted = 0
nobs_rejected = 0
nobs_repeated = 0
nobs_outside_grid_or_time = 0
nobs_wrong_format = 0
nobs = 0
usecols = ['gbifID', 'decimalLatitude', 'decimalLongitude', 'speciesKey', 'year', 'month', 'day']
chunksize = 10 ** 5
filesize = os.path.getsize(args.input)
linesize = 820
for df in pd.read_csv(args.input, sep=args.separator, chunksize=chunksize, engine='c', low_memory=False, usecols=usecols, skip_blank_lines=True):
s_time = time.time()
nlines = len(df.index)
nobs += nlines
''' verify if all columns exist in header csv'''
csv_columns = df.columns.tolist()
test_csv_header(csv_columns, usecols)
'''
gbifID abstract accessRights accrualMethod accrualPeriodicity accrualPolicy alternative audience available bibliographicCitation conformsTo contributor coverage
created creator date dateAccepted dateCopyrighted dateSubmitted description educationLevel extent format hasFormat hasPart hasVersion identifier instructionalMethod isFormatOf isPartOf
isReferencedBy isReplacedBy isRequiredBy isVersionOf issued language license mediator medium modified provenance publisher references relation replaces requires rights
rightsHolder source spatial subject tableOfContents temporal title type valid institutionID collectionID datasetID institutionCode collectionCode datasetName ownerInstitutionCode
basisOfRecord informationWithheld dataGeneralizations dynamicProperties occurrenceID catalogNumber recordNumber recordedBy individualCount organismQuantity organismQuantityType
sex lifeStage reproductiveCondition behavior establishmentMeansoccurrenceStatus preparations disposition associatedReferences associatedSequences associatedTaxa otherCatalogNumbers
occurrenceRemarks organismIDorganismName organismScope associatedOccurrences associatedOrganisms previousIdentifications organismRemarks materialSampleID eventID parentEventID
fieldNumber eventDate eventTime startDayOfYear endDayOfYear year month day verbatimEventDate habitat samplingProtocol samplingEffort sampleSizeValue sampleSizeUnit
fieldNotes eventRemarks locationID higherGeographyID higherGeography continent waterBody islandGroupisland countryCode stateProvince county municipality locality
verbatimLocality verbatimElevation verbatimDepth minimumDistanceAboveSurfaceInMeters maximumDistanceAboveSurfaceInMeters locationAccordingTo locationRemarks decimalLatitude
decimalLongitude coordinateUncertaintyInMeters coordinatePrecision pointRadiusSpatialFit verbatimCoordinateSystem verbatimSRS footprintWKT footprintSRS
footprintSpatialFit georeferencedBy georeferencedDate georeferenceProtocol georeferenceSources georeferenceVerificationStatus georeferenceRemarks geologicalContextID
earliestEonOrLowestEonothemlatestEonOrHighestEonothem earliestEraOrLowestErathem latestEraOrHighestErathem earliestPeriodOrLowestSystem latestPeriodOrHighestSystem
earliestEpochOrLowestSeries latestEpochOrHighestSeries earliestAgeOrLowestStage latestAgeOrHighestStage lowestBiostratigraphicZone highestBiostratigraphicZonelithostratigraphicTerms
group formation member bed identificationID identificationQualifier typeStatus identifiedBy dateIdentified identificationReferences identificationVerificationStatus
identificationRemarks taxonID scientificNameID acceptedNameUsageID parentNameUsageID originalNameUsageID nameAccordingToID namePublishedInID taxonConceptID scientificName
acceptedNameUsage parentNameUsage originalNameUsage nameAccordingTo namePublishedIn namePublishedInYear higherClassification kingdom phylum class order family genus
subgenus specificEpithet infraspecificEpithet taxonRank verbatimTaxonRank vernacularName nomenclaturalCode taxonomicStatus nomenclaturalStatus taxonRemarks
datasetKey publishingCountry lastInterpreted elevation elevationAccuracy depth depthAccuracy distanceAboveSurface distanceAboveSurfaceAccuracy issue mediaType
hasCoordinate hasGeospatialIssues taxonKey acceptedTaxonKey kingdomKey phylumKey classKey orderKey familyKey genusKey subgenusKey speciesKey species genericName acceptedScientificName
typifiedName protocol lastParsed lastCrawled repatriated
'''
for index, row in df.iterrows():
if args.verbose:
if nlines < chunksize:
progress(index, nlines)
else:
progress(index*linesize, filesize)
'''get values'''
try:
gbifid = str(row['gbifID'])
speciekey = int(float(row['speciesKey']))
lon = round(float(row['decimalLongitude']), 6)
lat = round(float(row['decimalLatitude']), 6)
year = row['year']
month = row['month']
day = row['day']
date_obj = datetime.datetime(int(year), int(month), int(day), 0, 0)
date = date_obj.strftime("%Y-%m-%d")
#print(index, gbifid, speciekey, lon, lat, date)
except Exception as exception:
# traceback.print_exc()
nobs_wrong_format += 1
nobs_rejected += 1
continue
else:
'''test if observation is in domain, in time and in species list'''
if grid_maker.is_in_grid([lon, lat]) and date_obj >= args.strdate and date_obj <= args.enddate and (not args.species or speciekey in args.species):
cell = grid_maker.build_cell([lon, lat])
if not cell in grid:
nobs_outside_grid_or_time += 1
#print(cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
#handle_error(msg=cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
continue
'''
filter: only consider one observation per day, per grid
create key to save only one obs per day, per grid
'''
try:
if speciekey in big_data[cell]['dates'][date]:
# print('repeated: ' + cell + ' ' + date + ' ' + speciekey)
nobs_repeated += 1
nobs_rejected += 1
continue
else:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
except KeyError:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date] = []
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
else:
# print('out of domain or out of time period: ' + cell + ' ' + str(lon) + ' ' + str(lat) + ' ' + date + ' ' + str(speciekey))
nobs_outside_grid_or_time += 1
nobs_rejected += 1
if args.vverbose: elapsed_time([s_time, start_time])
del df
gc.collect()
f.close()
print()
print('\tobservations outside grid, time or selected species: {0}'.format(nobs_outside_grid_or_time))
print('\tobservations wrong format (no date): {0}'.format(nobs_wrong_format))
print('\tobservations repeated: {0}'.format(nobs_repeated))
print('\tobservations rejected: {0}'.format(nobs_rejected))
print('\tobservations accepted: {0}'.format(nobs_accepted))
print('\tobservations total: {0}'.format(nobs))
print()
''' delete unecessary variables '''
for c in big_data:
del big_data[c]['dates']
print('4. process big data and output results')
'''open output files'''
fout = open(args.output, 'w+')
fgeo = open(args.outdir + grid_file, 'w+')
fgeo.write("lat;lon;geohash;has_species\r\n")
community_list = []
community_accepted = 0
units_count = 0
for cell in big_data:
(lat, lon) = coordinates = geohash.decode(cell)
'''create new community'''
community = BioCommunity(cell)
community_unit_list = []
for speciekey, v in big_data[cell]['species'].items():
member = BioMember(speciekey)
member_count = v['count']
'''add member to community'''
community.add_member(member, member_count)
'''add member to member list'''
for i in range(member_count):
community_unit_list.append(member.getid())
community_units_count = community.get_units_count()
richness = community.get_richness()
'''only consider community with more than 2 observations'''
if community_units_count > 2:
community_accepted += 1
units_count += community_units_count
if args.vverbose: print(" There are {mc:} units in community {cell:} ({lat:}, {lon:}. The total diversity is {rich:} species)".format(mc=community_units_count, cell=cell, lat=lat, lon=lon, rich=community.get_richness()))
'''add community to list'''
community_list.append(community)
'''print header
"Cell eydsh (37.28759765625, -7.53662109375)" * SampleSet * 1 1 1
8 8
00001 00002 00003 00004 00005 00006 00007 00008
'''
fout.write("\"Cell {cell:} ({lat:}, {lon:})\"\t*SampleSet*\t1\t1\t1\r\n".format(cell=cell, lat=lat, lon=lon))
fout.write("{r:}\t{uc:}\r\n".format(r=richness, uc=community_units_count))
for i in range(1, community_units_count + 1):
fout.write("\t{0:05d}".format(i))
fout.write("\r\n")
'''set matrix data for random get'''
matrix = {}
members = community.get_all_members()
'''init matrix'''
for speciekey in members:
matrix[speciekey] = []
for i in range(community_units_count):
'''get random member'''
random_member = community_unit_list.pop(random.randrange(len(community_unit_list)))
for speciekey in members:
if speciekey == random_member:
matrix[speciekey].append(1)
else:
matrix[speciekey].append(0)
'''
print matrix
2474051 0 0 1 0 0 0 0 0
2492606 0 0 0 0 0 0 0 1
2492867 0 0 0 0 1 0 0 0
2495000 0 0 0 0 0 1 0 0
2498415 0 0 0 0 0 0 1 0
5229493 1 0 0 0 0 0 0 0
6092830 0 0 0 1 0 0 0 0
9515886 0 1 0 0 0 0 0 0
'''
for speciekey in sorted(matrix):
fout.write("{0:d}".format(speciekey))
for i in range(community_units_count):
fout.write("\t{0:}".format(int(matrix[speciekey][i])))
fout.write("\r\n")
fgeo.write("{lat:};{lon:};{cell:};{uc:}\r\n".format(lat=lat, lon=lon, cell= cell, uc=community_units_count))
fout.close()
fgeo.close()
'''add first line in output file'''
first_line = "*MultipleSampleSets*\t{0:}\t\"PT Community with more then 2 members".format(community_accepted)
if args.strdate.year != 1900:
first_line += '; start year: ' + str(args.strdate.year)
if args.enddate.year != 2100:
first_line += '; end year: ' + str(args.enddate.year)
first_line += "\"\r\n"
line_prepender(args.output, first_line)
'''print stat'''
meta = BioCommunity('tmp')
meta.add_communities(community_list)
print("\n== Metacommunities with more then 2 individuals:")
print("\t{0:} communities".format(len(community_list)))
print("\t{0:} species".format(meta.get_richness()))
print("\t{0:} individuals".format(units_count))
'''the end'''
elapsed_time(start_time)
| 3,743 | 887 | 295 |
b6f4ab43343140abcd5bc0b46526c1de04b55594 | 9,097 | py | Python | applications/twolevel_model_building.py | bcdarwin/pydpiper | 7ac70ecc1f2dc4609395e1136fb312b6fef40dcc | [
"BSD-3-Clause"
] | null | null | null | applications/twolevel_model_building.py | bcdarwin/pydpiper | 7ac70ecc1f2dc4609395e1136fb312b6fef40dcc | [
"BSD-3-Clause"
] | null | null | null | applications/twolevel_model_building.py | bcdarwin/pydpiper | 7ac70ecc1f2dc4609395e1136fb312b6fef40dcc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pydpiper.application import AbstractApplication
import atoms_and_modules.registration_functions as rf
import atoms_and_modules.minc_modules as mm
import atoms_and_modules.minc_parameters as mp
import atoms_and_modules.LSQ6 as lsq6
import atoms_and_modules.LSQ12 as lsq12
import atoms_and_modules.NLIN as nl
import atoms_and_modules.stats_tools as st
import atoms_and_modules.registration_file_handling as rfh
from os.path import split, splitext, abspath
import sys
import logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
application = LongitudinalTwolevelNlin()
application.start()
| 52.281609 | 119 | 0.572387 | #!/usr/bin/env python
from pydpiper.application import AbstractApplication
import atoms_and_modules.registration_functions as rf
import atoms_and_modules.minc_modules as mm
import atoms_and_modules.minc_parameters as mp
import atoms_and_modules.LSQ6 as lsq6
import atoms_and_modules.LSQ12 as lsq12
import atoms_and_modules.NLIN as nl
import atoms_and_modules.stats_tools as st
import atoms_and_modules.registration_file_handling as rfh
from os.path import split, splitext, abspath
import sys
import logging
logger = logging.getLogger(__name__)
class LongitudinalTwolevelNlin(AbstractApplication):
def setup_options(self):
helpString="""
twolevel_model_building
A pydpiper application designed to work with longitudinal data. LSQ12
and nonlinear registration is used to create a consensus average of
every subject. A second level of LSQ12 and nonlinear registrations is
then used to bring all the consensus averages from each subject into
their own consensus average.
Some assumptions:
* at least two timepoints per subject
* future work should be able to extend this to allow single timepoint subjects
* all images must be similar enough to allow registration
The last point is particularly important: the consensus average building process
aligns every image from each subject to every other image from that subject. Early
developmental data or tumour data, where the first image in the series might not be
alignable to the last image in the series, is thus not suited for this approach.
Data is passed to the application through a CSV file. This file has one line per subject,
with each scan per subject listed on the same line and separated by a comma.
"""
# own options go here
lsq6.addLSQ6OptionGroup(self.parser)
lsq12.addLSQ12OptionGroup(self.parser)
nl.addNlinRegOptionGroup(self.parser)
rf.addGenRegOptionGroup(self.parser)
st.addStatsOptions(self.parser)
# TODO: better usage description (once I've figured out what the usage will be ...)
self.parser.set_usage("%prog [options] input.csv")
# set help - note that the format is messed up, something that can be fixed if we upgrade
# from optparse to argparse.
self.parser.set_description(helpString)
def setup_appName(self):
appName = "twolevel_model_building"
return appName
def run(self):
options = self.options
args = self.args
# Setup output directories for two-level model building:
(subjectDirs, dirs) = rf.setupTwoLevelDirectories(args[0], self.outputDir, options.pipeline_name, module="ALL")
# read in files from CSV
subjects = rf.setupSubjectHash(args[0], subjectDirs, options.mask_dir)
#firstlevelNlins stores per subject NLIN avgs, subjStats stores first level stats, to be resampled at the end
firstlevelNlins = []
subjStats = []
### first level of registrations: register within subject
for i in range(len(subjects)):
baseVol = subjects[i][0].getLastBasevol()
subjBase = splitext(split(baseVol)[1])[0]
if options.input_space == "native":
initModel, targetPipeFH = rf.setInitialTarget(options.init_model,
options.lsq6_target,
subjectDirs[i].lsq6Dir,
self.outputDir)
#LSQ6 MODULE, NUC and INORM
runLSQ6NucInorm = lsq6.LSQ6NUCInorm(subjects[i],
targetPipeFH,
initModel,
subjectDirs[i].lsq6Dir,
options)
self.pipeline.addPipeline(runLSQ6NucInorm.p)
if options.input_space=="native" or options.input_space=="lsq6":
#LSQ12+NLIN (registration starts here or is run after LSQ6)
if options.input_space == "lsq6":
initModel=None
lsq12Nlin = mm.FullIterativeLSQ12Nlin(subjects[i],
subjectDirs[i],
options,
avgPrefix=subjBase,
initModel=initModel)
self.pipeline.addPipeline(lsq12Nlin.p)
finalNlin = lsq12Nlin.nlinFH
#If no protocols are specified, use same lsq12 and nlin protocols as for first level registration
if not options.lsq12_protocol:
options.lsq12_protocol = lsq12Nlin.lsq12Params
if not options.nlin_protocol:
options.nlin_protocol = lsq12Nlin.nlinParams
elif options.input_space=="lsq12":
#If inputs in lsq12 space, run NLIN only
lsq12AvgFile = abspath(subjectDirs[i].lsq12Dir) + "/" + subjBase + "-lsq12avg.mnc"
nlinObj = nl.initializeAndRunNLIN(subjectDirs[i].lsq12Dir,
subjects[i],
subjectDirs[i].nlinDir,
avgPrefix=subjBase,
createAvg=True,
targetAvg=lsq12AvgFile,
targetMask=options.target_mask,
nlin_protocol=options.nlin_protocol,
reg_method=options.reg_method)
self.pipeline.addPipeline(nlinObj.p)
finalNlin = nlinObj.nlinAverages[-1]
# If no protocols are specified, get lsq12 based on resolution of one of the existing input files.
# Use same nlin protocol as the one we ran previously.
if not options.lsq12_protocol:
if not options.lsq12_subject_matter:
fileRes = rf.returnFinestResolution(subjects[i][0])
options.lsq12_protocol = mp.setLSQ12MinctraccParams(fileRes,
subject_matter=options.lsq12_subject_matter)
if not options.nlin_protocol:
options.nlin_protocol = nlinObj.nlinParams
else:
print "--input-space can only be native, lsq6 or lsq12. You specified: " + str(options.input_space)
sys.exit()
# add the last NLIN average to the volumes that will proceed to step 2
firstlevelNlins.append(finalNlin)
if options.calc_stats:
tmpStats=[]
for s in subjects[i]:
stats = st.CalcStats(s, finalNlin, options.stats_kernels)
self.pipeline.addPipeline(stats.p)
tmpStats.append(stats)
subjStats.append(tmpStats)
# second level of registrations: register final averages from first level
# TODO: Allow for LSQ6 reg first, or just NLIN. Right now, we allow LSQ12+NLIN only
firstLevelNlinsNewFH = []
for nlin in firstlevelNlins:
nlinFH = rfh.RegistrationPipeFH(nlin.getLastBasevol(), mask=nlin.getMask(), basedir=dirs.processedDir)
firstLevelNlinsNewFH.append(nlinFH)
lsq12Nlin = mm.FullIterativeLSQ12Nlin(firstLevelNlinsNewFH, dirs, options, avgPrefix="second_level")
self.pipeline.addPipeline(lsq12Nlin.p)
finalNlin = lsq12Nlin.nlinFH
initialTarget = lsq12Nlin.initialTarget
if options.calc_stats:
for s in firstLevelNlinsNewFH:
stats = st.CalcStats(s, finalNlin, options.stats_kernels)
self.pipeline.addPipeline(stats.p)
# now resample the stats files from the first level registration to the common space
# created by the second level of registration
for i in range(len(subjects)):
for s in range(len(subjects[i])):
# get the last xfm from the second level registrations
xfm = firstLevelNlinsNewFH[i].getLastXfm(finalNlin)
p = mm.resampleToCommon(xfm,
subjects[i][s],
subjStats[i][s].statsGroup,
options.stats_kernels,
initialTarget)
self.pipeline.addPipeline(p)
if __name__ == "__main__":
application = LongitudinalTwolevelNlin()
application.start()
| 8,299 | 31 | 116 |
20652bad1ff1459dd7113b64293e10f2385a3694 | 442 | py | Python | compound-interest-calculator.py | ADcadia/Compound-Interest-Calculator | b57e5a4deab3090abc708fd8ad6556fc28d90f7b | [
"Unlicense"
] | null | null | null | compound-interest-calculator.py | ADcadia/Compound-Interest-Calculator | b57e5a4deab3090abc708fd8ad6556fc28d90f7b | [
"Unlicense"
] | null | null | null | compound-interest-calculator.py | ADcadia/Compound-Interest-Calculator | b57e5a4deab3090abc708fd8ad6556fc28d90f7b | [
"Unlicense"
] | null | null | null | # Enter the interest rate (%) as a float (ex. 4% = .04).
P = int(input("What is the principal? \n \n"))
n = int(input("How many compoundings per year? \n \n"))
r = float(input("What is the interest rate? \n \n"))
t = int(input("How many years will your money be compounded for? \n \n"))
Total_money_compounded = P * ( ((1 + (r/n)) ** (n * t)) )
print("The total amount of money you will have in", t, "years is :", Total_money_compounded)
| 40.181818 | 92 | 0.635747 | # Enter the interest rate (%) as a float (ex. 4% = .04).
P = int(input("What is the principal? \n \n"))
n = int(input("How many compoundings per year? \n \n"))
r = float(input("What is the interest rate? \n \n"))
t = int(input("How many years will your money be compounded for? \n \n"))
Total_money_compounded = P * ( ((1 + (r/n)) ** (n * t)) )
print("The total amount of money you will have in", t, "years is :", Total_money_compounded)
| 0 | 0 | 0 |
5d45181d4c435134e01529e46c42e91dbd3990da | 849 | py | Python | example/bert_to_wordvec.py | zhupengjia/simple_kbqa | 5a56bc27454442909275eb19d97a0e3a208257f5 | [
"Apache-2.0"
] | null | null | null | example/bert_to_wordvec.py | zhupengjia/simple_kbqa | 5a56bc27454442909275eb19d97a0e3a208257f5 | [
"Apache-2.0"
] | null | null | null | example/bert_to_wordvec.py | zhupengjia/simple_kbqa | 5a56bc27454442909275eb19d97a0e3a208257f5 | [
"Apache-2.0"
] | 1 | 2019-12-11T06:33:18.000Z | 2019-12-11T06:33:18.000Z | #!/usr/bin/env python
import sys, os, torch, h5py
from transformers import BertModel, BertTokenizer
from nlptools.utils import zdump
model_path = sys.argv[1] if len(sys.argv) > 1 else "."
model_name = "bert-base-uncased"
vocab_name = os.path.join(model_path, 'vocab')
weight_path = os.path.join(model_path, '{}.h5py'.format(model_name))
word2idx_path = os.path.join(model_path, '{}.lookup'.format(model_name))
model = BertModel.from_pretrained(model_name)
weights = model.embeddings.word_embeddings.weight.detach().numpy()
tokenizer = BertTokenizer.from_pretrained(model_name)
word2idx = tokenizer.vocab
print(weights.shape)
print(len(tokenizer.vocab))
if os.path.exists(weight_path):
os.remove(weight_path)
with h5py.File(weight_path, 'w') as h5file:
h5file.create_dataset("word2vec", data=weights)
zdump(word2idx, word2idx_path)
| 27.387097 | 72 | 0.765607 | #!/usr/bin/env python
import sys, os, torch, h5py
from transformers import BertModel, BertTokenizer
from nlptools.utils import zdump
model_path = sys.argv[1] if len(sys.argv) > 1 else "."
model_name = "bert-base-uncased"
vocab_name = os.path.join(model_path, 'vocab')
weight_path = os.path.join(model_path, '{}.h5py'.format(model_name))
word2idx_path = os.path.join(model_path, '{}.lookup'.format(model_name))
model = BertModel.from_pretrained(model_name)
weights = model.embeddings.word_embeddings.weight.detach().numpy()
tokenizer = BertTokenizer.from_pretrained(model_name)
word2idx = tokenizer.vocab
print(weights.shape)
print(len(tokenizer.vocab))
if os.path.exists(weight_path):
os.remove(weight_path)
with h5py.File(weight_path, 'w') as h5file:
h5file.create_dataset("word2vec", data=weights)
zdump(word2idx, word2idx_path)
| 0 | 0 | 0 |
c574eb814dd96cab84aba69f09fb9dd973154144 | 357 | py | Python | day1/part_two.py | hmart90/advent-of-code | b7bc05bc837a87c50cad6e1ed739704e49652f68 | [
"Apache-2.0"
] | null | null | null | day1/part_two.py | hmart90/advent-of-code | b7bc05bc837a87c50cad6e1ed739704e49652f68 | [
"Apache-2.0"
] | null | null | null | day1/part_two.py | hmart90/advent-of-code | b7bc05bc837a87c50cad6e1ed739704e49652f68 | [
"Apache-2.0"
] | null | null | null | FILENAME = './day1/data/input'
measurements = []
with open(FILENAME) as file:
for line in file:
measurements.append(int(line.strip()))
s = 0
for i in range(3, len(measurements)):
if measurements[i] + measurements[i - 1] + measurements[i - 2] > measurements[i - 1] + measurements[i - 2] + measurements[i - 3]:
s = s + 1
print(s)
| 22.3125 | 133 | 0.619048 | FILENAME = './day1/data/input'
measurements = []
with open(FILENAME) as file:
for line in file:
measurements.append(int(line.strip()))
s = 0
for i in range(3, len(measurements)):
if measurements[i] + measurements[i - 1] + measurements[i - 2] > measurements[i - 1] + measurements[i - 2] + measurements[i - 3]:
s = s + 1
print(s)
| 0 | 0 | 0 |
df61160f4bb8361f95ff1dcfb8e914e74180d926 | 6,879 | py | Python | tests/test_gyomu_data_schema.py | Yoshihisa-Matsumoto/Gyomu_Python | c79a262d65ac6e23535731e30c9cacd9698ad626 | [
"MIT"
] | null | null | null | tests/test_gyomu_data_schema.py | Yoshihisa-Matsumoto/Gyomu_Python | c79a262d65ac6e23535731e30c9cacd9698ad626 | [
"MIT"
] | null | null | null | tests/test_gyomu_data_schema.py | Yoshihisa-Matsumoto/Gyomu_Python | c79a262d65ac6e23535731e30c9cacd9698ad626 | [
"MIT"
] | null | null | null | import pytest
from gyomu.gyomu_db_model import *
from gyomu.gyomu_db_schema import *
from gyomu.user_factory import UserFactory
from gyomu.user import User
from gyomu.db_connection_factory import DbConnectionFactory
from gyomu.json import Json
from marshmallow import ValidationError
gyomuapps_schema = GyomuAppsSchema()
gyomuapps_total_list_schema = GyomuAppsSchema(many=True)
gyomuapps_schema_load = GyomuAppsSchema(load_instance=True)
gyomuapps_total_list_schema_load = GyomuAppsSchema(many=True, load_instance=True)
TEST_APPLICATION_ID2 = 32651
TEST_APPLICAIONT_ID3 = 32652
| 62.536364 | 503 | 0.767553 | import pytest
from gyomu.gyomu_db_model import *
from gyomu.gyomu_db_schema import *
from gyomu.user_factory import UserFactory
from gyomu.user import User
from gyomu.db_connection_factory import DbConnectionFactory
from gyomu.json import Json
from marshmallow import ValidationError
gyomuapps_schema = GyomuAppsSchema()
gyomuapps_total_list_schema = GyomuAppsSchema(many=True)
gyomuapps_schema_load = GyomuAppsSchema(load_instance=True)
gyomuapps_total_list_schema_load = GyomuAppsSchema(many=True, load_instance=True)
TEST_APPLICATION_ID2 = 32651
TEST_APPLICAIONT_ID3 = 32652
class TestSchema:
def test_gyomu_app(self):
app = GyomuAppsInfoCdtbl()
app.application_id = TEST_APPLICATION_ID2
app.description = "Test Application"
user: User = UserFactory.get_current_user()
app.mail_from_name = user.userid
app.mail_from_address = "Test@test.com"
json_data = Json.to_json(app, gyomuapps_schema)
app_loaded = Json.deserialize(json_data, GyomuAppsInfoCdtbl, gyomuapps_schema)
assert app.application_id == app_loaded.application_id
app2 = GyomuAppsInfoCdtbl()
app2.application_id = TEST_APPLICAIONT_ID3
app2.description = "Testw Application"
user: User = UserFactory.get_current_user()
app2.mail_from_name = user.userid
app2.mail_from_address = "Testq@test.com"
app_list = [app,app2]
json_data = Json.to_json(app_list, gyomuapps_total_list_schema)
#json_data = gyomuapps_total_list_schema.dumps(app_list)
#dictionary = gyomuapps_total_list_schema.loads(json_data=json_data)
#assert len(dictionary)==2
app_list_loaded = Json.deserialize(json_data, GyomuAppsInfoCdtbl, gyomuapps_total_list_schema)
assert len(app_list_loaded)==2
def test_gyomu_app_load(self,environment_setup):
app = GyomuAppsInfoCdtbl()
app.application_id = TEST_APPLICATION_ID2
app.description = "Test Application"
user: User = UserFactory.get_current_user()
app.mail_from_name = user.userid
app.mail_from_address = "Test@test.com"
json_data = gyomuapps_schema_load.dumps(app)
dictionary = gyomuapps_schema.loads(json_data=json_data)
with DbConnectionFactory.get_gyomu_db_session() as session:
app_loaded = gyomuapps_schema_load.load(dictionary,session=session)
assert app.application_id == app_loaded.application_id
app2 = GyomuAppsInfoCdtbl()
app2.application_id = TEST_APPLICAIONT_ID3
app2.description = "Testw Application"
user: User = UserFactory.get_current_user()
app2.mail_from_name = user.userid
app2.mail_from_address = "Testq@test.com"
app_list = [app,app2]
json_data = gyomuapps_total_list_schema.dumps(app_list)
dictionary = gyomuapps_total_list_schema.loads(json_data=json_data)
with DbConnectionFactory.get_gyomu_db_session() as session:
apps = gyomuapps_total_list_schema_load.load(dictionary,session=session)
assert len(apps)==2
def test_gyomu_app_validation(self):
app = GyomuAppsInfoCdtbl()
app.application_id = TEST_APPLICATION_ID2
app.description = "Test Application"
user: User = UserFactory.get_current_user()
app.mail_from_name = user.userid
app.mail_from_address = "Test@test.com"
json_data = Json.to_json(app, gyomuapps_schema)
json_data = '{"mail_from_name": "yoshm", "description": "Test Application", "application_id": "Test", "mail_from_address": "Test@test.com"}'
with pytest.raises(ValidationError) as ve:
app2 = Json.deserialize(json_data, GyomuAppsInfoCdtbl, gyomuapps_schema)
assert 'application_id' in ve.value.messages
json_data = '{"mail_from_name": "yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789", ' \
'"description": "Test Application", "application_id": 32651, "mail_from_address": "Test@test.com"}'
with pytest.raises(ValidationError) as ve:
dictionary = gyomuapps_schema.loads(json_data=json_data)
assert 'mail_from_name' in ve.value.messages
json_data = '{"mail_from_name": "yoshm",' \
' "description": "Test Application123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789Application123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789", ' \
'"application_id": 32651, "mail_from_address": "Test@test.com"}'
with pytest.raises(ValidationError) as ve:
dictionary = gyomuapps_schema.loads(json_data=json_data)
assert 'description' in ve.value.messages
json_data = '{"mail_from_name": "yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789yoshma123456789123456789123456789123456789123456789123456789", ' \
' "description": "Test Application123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789Application123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789123456789", ' \
'"application_id": 32651, "mail_from_address": "Test@test.com"}'
with pytest.raises(ValidationError) as ve:
dictionary = gyomuapps_schema.loads(json_data=json_data)
assert 'description' in ve.value.messages
assert 'mail_from_name' in ve.value.messages
json_data = '{"mail_from_name": "yoshm", "description": "Test Application", "application_id": 35456, "mail_from_address": "Testt.com"}'
with pytest.raises(ValidationError) as ve:
dictionary = gyomuapps_schema.loads(json_data=json_data)
pass
assert 'mail_from_address' in ve.value.messages | 6,199 | -4 | 103 |
16b99d957e1208254f4639e5e9bec9c8da6c4bbb | 8,001 | py | Python | _scrap/_naver_stock.py | hopelife/mp_scraper | b3e227573647ca49b3c6c4bd9995a5d4b0d18b20 | [
"MIT"
] | null | null | null | _scrap/_naver_stock.py | hopelife/mp_scraper | b3e227573647ca49b3c6c4bd9995a5d4b0d18b20 | [
"MIT"
] | null | null | null | _scrap/_naver_stock.py | hopelife/mp_scraper | b3e227573647ca49b3c6c4bd9995a5d4b0d18b20 | [
"MIT"
] | null | null | null | import os, sys
import math
import copy
import time
from datetime import datetime
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: 현재 디렉토리 기준 상대 경로 설정
from utils_basic import (
_create_folder,
_read_file,
_file_to_json,
_json_to_file,
_to_lists,
_to_digit,
_divide_list,
_fn
)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
from scrap_selenium import (
_selenium,
_source,
_wait,
_login,
)
# sys.path.append(os.path.join(os.path.abspath('../staff')))
# from ScrapBySelenium import ScrapBySelenium
_base_url = 'https://m.stock.naver.com'
TODAY = datetime.now().strftime("%Y%m%d")
##
##----------------------------------------------------------
def scrap_naver_total(shcode='336370'):
"""
shcode의 종목에 대한 '종합/투자자별 매매동향/...' 데이터 scrap
"""
# url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total"
url = f"https://m.stock.naver.com/index.html#/domestic/stock/{shcode}/total"
browser = _selenium(url=url, headless=False)
button = _wait(xpath='.//*[@id="content"]//div[@class="VStockInfo_article__3dWiQ"]/a', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 종목 정보 더보기
html = _source(driver=browser, xpath='.//*[@id="content"]')
root = _root(html)
# ## NOTE: N증권 / 국내증시 / 종합
# info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0]
# values = {
# 'keys': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong',
# 'target': 'text',
# },
# 'vals': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span',
# 'target': 'text',
# },
# }
# r = _extract_values(info, values, _zip=None)
# print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])})
## NOTE: N증권 / 국내증시 / 투자자별 매매동향
button = _wait(xpath='.//*[@id="content"]//div[@class="VTableTrend_boxMore__1EVMo"]/a[1]', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 매매동향 더보기
info = root.xpath('.//div[@class="VTableTrend_inner__1Crkx"]')[0]
values = {
'keys': {
'xpath': './table/thead/tr/th',
'target': 'text'
},
'vals': {
'xpath': './table/tbody/tr/td',
'target': 'content'
}
}
r = _extract_values(info, values, _zip=None)
n = len(r['keys']) ## NOTE: 열column 수
vals = [val if i%n == 0 else _to_digit(val[:len(val)//2]) if i%n==n-2 else _to_digit(val) for i, val in enumerate(r['vals'])]
rows = [r['keys']] + _divide_list(vals, n)
print(f"투자동향: {rows}")
# ## NOTE: 동일 업종 비교
# xpath = '//div[contains(@class, "compare")]/a'
# if s.wait(xpath, max_wait=3) != -1: # '동일 업종 비교'가 있는 경우
# upjong = s.attribute_value(xpath, "href").split('=')[-1]
# output['업종번호'] = upjong
# ## 컨센서스
# xpath = '//span[contains(@class, "data_lyr")]'
# if s.check_element(xpath): # NOTE: 컨센서스가 있는 경우
# trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: 매수.매도 점수
# goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: 목표가
# output['매매추천'] = trade_weight
# output['목표주가'] = goal_price
# s.close() # NOTE: selenium browser close
# return output
# def scrap_naver_upjong():
# """
# 업종 상승률
# """
# url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong"
# s = ScrapBySelenium(url=url)
# # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# wait_xpath = '//span[@class="u_pg_total"]'
# s.wait(wait_xpath)
# total = s._convert_to_float(s.find_element_text(wait_xpath))
# wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# s.click(xpath=wait_xpath) # 버튼 펼치기
# output = []
# for i in range(0, total):
# gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]'
# name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]'
# no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]'
# # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">상세 목록 보기</a>
# name = s.find_element(name_xpath).text
# no = s.attribute_value(no_xpath, 'href').split('=')[-1]
# gap = s._convert_to_float(s.find_element(gap_xpath).text)
# print(f"{name}, {no}, {gap}")
# output.append({'업종명': name, '업종번호': no, '업종상승률': gap})
# s.close()
# return output
if __name__ == '__main__':
## NOTE: 테스트
scrap_naver_total(shcode='336370')
## NOTE: shcode의 종목에 대한 '종합/투자자별 매매동향/업종번호/'
# t = scrap_naver_total(shcode='336370')
# print(f"{t}")
## NOTE: 업종별 업종명/업종번호/상승률
# u = scrap_naver_upjong()
# print(f"{u}")
## NOTE: file
# path = './naver_sise_rise_table_bak.html'
# path = './naver_sise_rise_table.html'
# root = _tree_from_file(path=path)
# # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div')
# # text = _text_by_xpath(root, xpath='.//th')
# result = []
# for i in range(3, 13):
# texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
# # print(f"{[el.text for el in root.findall('.//country//rank')]}")
# ## NOTE: naver_stock_m_domestic_upper_kospi
# path = './naver_stock_m_domestic_upper_kospi.html'
# root = _tree_from_file(path=path)
# result = []
# for i in range(1, 10):
# texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
## TODO:
## naver 업종 코드(page serial)
# https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218
# # 네이버
# N증권 > 국내증시
# ### 종합
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total
# 전일
# 시가
# 고가
# 저가
# 거래량
# 대금
# 시총
# 외인소진율
# 52주최고
# 52주최저
# PER
# EPS
# BPS
# 배당수익률
# 주당배당금
# ### 토론
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss
# ### 뉴스.공시
# #### 종목뉴스
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/news
# #### 공시정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice
# #### IR정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir
# ### 시세.호가
# #### 일별시세
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/price
# #### 5단계 호가
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask
# ### 재무
# #### 연간실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual
# #### 분기실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter
# #### 비재무정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance
# ## 홈
# ### 관심종목
# ### 트렌드 랭킹
# ## 시장지표
# ### 주요
# ### 환율
# ### 에너지
# ### 금속
# ### 금리
# ### 농축산물
# ## 국내
# ### 시가총액
# ### 업종
# ### 테마
# ### 그룹
# ### 인기검색
# ### 배당
# ### 거래상위
# ### 상한가
# ###
# 컨센서스
# 컨센서스
# 업종
# 테마
# 그룹
# 거래상위
# https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0
# 상한가
# 상승
# 하락
# 관리
| 24.393293 | 143 | 0.550681 | import os, sys
import math
import copy
import time
from datetime import datetime
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: 현재 디렉토리 기준 상대 경로 설정
from utils_basic import (
_create_folder,
_read_file,
_file_to_json,
_json_to_file,
_to_lists,
_to_digit,
_divide_list,
_fn
)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
from scrap_selenium import (
_selenium,
_source,
_wait,
_login,
)
# sys.path.append(os.path.join(os.path.abspath('../staff')))
# from ScrapBySelenium import ScrapBySelenium
_base_url = 'https://m.stock.naver.com'
TODAY = datetime.now().strftime("%Y%m%d")
##
##----------------------------------------------------------
def scrap_naver_total(shcode='336370'):
"""
shcode의 종목에 대한 '종합/투자자별 매매동향/...' 데이터 scrap
"""
# url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total"
url = f"https://m.stock.naver.com/index.html#/domestic/stock/{shcode}/total"
browser = _selenium(url=url, headless=False)
button = _wait(xpath='.//*[@id="content"]//div[@class="VStockInfo_article__3dWiQ"]/a', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 종목 정보 더보기
html = _source(driver=browser, xpath='.//*[@id="content"]')
root = _root(html)
# ## NOTE: N증권 / 국내증시 / 종합
# info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0]
# values = {
# 'keys': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong',
# 'target': 'text',
# },
# 'vals': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span',
# 'target': 'text',
# },
# }
# r = _extract_values(info, values, _zip=None)
# print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])})
## NOTE: N증권 / 국내증시 / 투자자별 매매동향
button = _wait(xpath='.//*[@id="content"]//div[@class="VTableTrend_boxMore__1EVMo"]/a[1]', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 매매동향 더보기
info = root.xpath('.//div[@class="VTableTrend_inner__1Crkx"]')[0]
values = {
'keys': {
'xpath': './table/thead/tr/th',
'target': 'text'
},
'vals': {
'xpath': './table/tbody/tr/td',
'target': 'content'
}
}
r = _extract_values(info, values, _zip=None)
n = len(r['keys']) ## NOTE: 열column 수
vals = [val if i%n == 0 else _to_digit(val[:len(val)//2]) if i%n==n-2 else _to_digit(val) for i, val in enumerate(r['vals'])]
rows = [r['keys']] + _divide_list(vals, n)
print(f"투자동향: {rows}")
# ## NOTE: 동일 업종 비교
# xpath = '//div[contains(@class, "compare")]/a'
# if s.wait(xpath, max_wait=3) != -1: # '동일 업종 비교'가 있는 경우
# upjong = s.attribute_value(xpath, "href").split('=')[-1]
# output['업종번호'] = upjong
# ## 컨센서스
# xpath = '//span[contains(@class, "data_lyr")]'
# if s.check_element(xpath): # NOTE: 컨센서스가 있는 경우
# trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: 매수.매도 점수
# goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: 목표가
# output['매매추천'] = trade_weight
# output['목표주가'] = goal_price
# s.close() # NOTE: selenium browser close
# return output
# def scrap_naver_upjong():
# """
# 업종 상승률
# """
# url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong"
# s = ScrapBySelenium(url=url)
# # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# wait_xpath = '//span[@class="u_pg_total"]'
# s.wait(wait_xpath)
# total = s._convert_to_float(s.find_element_text(wait_xpath))
# wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# s.click(xpath=wait_xpath) # 버튼 펼치기
# output = []
# for i in range(0, total):
# gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]'
# name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]'
# no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]'
# # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">상세 목록 보기</a>
# name = s.find_element(name_xpath).text
# no = s.attribute_value(no_xpath, 'href').split('=')[-1]
# gap = s._convert_to_float(s.find_element(gap_xpath).text)
# print(f"{name}, {no}, {gap}")
# output.append({'업종명': name, '업종번호': no, '업종상승률': gap})
# s.close()
# return output
if __name__ == '__main__':
## NOTE: 테스트
scrap_naver_total(shcode='336370')
## NOTE: shcode의 종목에 대한 '종합/투자자별 매매동향/업종번호/'
# t = scrap_naver_total(shcode='336370')
# print(f"{t}")
## NOTE: 업종별 업종명/업종번호/상승률
# u = scrap_naver_upjong()
# print(f"{u}")
## NOTE: file
# path = './naver_sise_rise_table_bak.html'
# path = './naver_sise_rise_table.html'
# root = _tree_from_file(path=path)
# # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div')
# # text = _text_by_xpath(root, xpath='.//th')
# result = []
# for i in range(3, 13):
# texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
# # print(f"{[el.text for el in root.findall('.//country//rank')]}")
# ## NOTE: naver_stock_m_domestic_upper_kospi
# path = './naver_stock_m_domestic_upper_kospi.html'
# root = _tree_from_file(path=path)
# result = []
# for i in range(1, 10):
# texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
## TODO:
## naver 업종 코드(page serial)
# https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218
# # 네이버
# N증권 > 국내증시
# ### 종합
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total
# 전일
# 시가
# 고가
# 저가
# 거래량
# 대금
# 시총
# 외인소진율
# 52주최고
# 52주최저
# PER
# EPS
# BPS
# 배당수익률
# 주당배당금
# ### 토론
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss
# ### 뉴스.공시
# #### 종목뉴스
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/news
# #### 공시정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice
# #### IR정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir
# ### 시세.호가
# #### 일별시세
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/price
# #### 5단계 호가
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask
# ### 재무
# #### 연간실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual
# #### 분기실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter
# #### 비재무정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance
# ## 홈
# ### 관심종목
# ### 트렌드 랭킹
# ## 시장지표
# ### 주요
# ### 환율
# ### 에너지
# ### 금속
# ### 금리
# ### 농축산물
# ## 국내
# ### 시가총액
# ### 업종
# ### 테마
# ### 그룹
# ### 인기검색
# ### 배당
# ### 거래상위
# ### 상한가
# ###
# 컨센서스
# 컨센서스
# 업종
# 테마
# 그룹
# 거래상위
# https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0
# 상한가
# 상승
# 하락
# 관리
| 0 | 0 | 0 |
bae360ebaf9d4ec0184c5fa64024a723f24ff831 | 982 | py | Python | static_data/main.py | numericalresearch/cloudrisk | 845c1882e48d5a80c1b9a8be336376aea99ba457 | [
"BSD-3-Clause"
] | null | null | null | static_data/main.py | numericalresearch/cloudrisk | 845c1882e48d5a80c1b9a8be336376aea99ba457 | [
"BSD-3-Clause"
] | null | null | null | static_data/main.py | numericalresearch/cloudrisk | 845c1882e48d5a80c1b9a8be336376aea99ba457 | [
"BSD-3-Clause"
] | null | null | null | from azure.cosmos import CosmosClient
import subprocess
RES_GROUP = "my-cosmsos-resource-group"
ACCT_NAME = "my-cosomso-account-name"
url = get_account_uri(RES_GROUP, ACCT_NAME)
key = get_key(RES_GROUP, ACCT_NAME)
print(url, key)
client = CosmosClient(url, credential=key)
| 29.757576 | 119 | 0.720978 | from azure.cosmos import CosmosClient
import subprocess
RES_GROUP = "my-cosmsos-resource-group"
ACCT_NAME = "my-cosomso-account-name"
def get_account_uri(res_group, acct_name):
completed = subprocess.run(
f"az cosmosdb show --resource-group {res_group} --name {acct_name} --query documentEndpoint --output tsv",
capture_output=True, shell=True, text=True)
if completed.returncode != 0:
raise ValueError(f"error getting URI")
return completed.stdout
def get_key(res_group, acct_name):
completed = subprocess.run(
f"az cosmosdb list-keys --resource-group {res_group} --name {acct_name} --query primaryMasterKey --output tsv",
capture_output=True, shell=True, text=True)
if completed.returncode != 0:
raise ValueError(f"error getting key")
return completed.stdout
url = get_account_uri(RES_GROUP, ACCT_NAME)
key = get_key(RES_GROUP, ACCT_NAME)
print(url, key)
client = CosmosClient(url, credential=key)
| 655 | 0 | 46 |
c3fcbaca838784223014c49db842628ad06159a3 | 944 | py | Python | logging/seqlog/log2seq.py | jgraber/PythonFriday | 879f10934dc6949785e5a799bfc3ca9a2a4434d4 | [
"MIT"
] | 5 | 2021-02-22T08:39:55.000Z | 2022-03-14T03:54:36.000Z | logging/seqlog/log2seq.py | jgraber/PythonFriday | 879f10934dc6949785e5a799bfc3ca9a2a4434d4 | [
"MIT"
] | null | null | null | logging/seqlog/log2seq.py | jgraber/PythonFriday | 879f10934dc6949785e5a799bfc3ca9a2a4434d4 | [
"MIT"
] | null | null | null | import logging
import seqlog
import time
seqlog.log_to_seq(
server_url="http://127.0.0.1:5341/",
api_key="RK2UCFPEIY7dsttQJA9F",
level=logging.NOTSET,
batch_size=10,
auto_flush_timeout=1, # seconds
override_root_logger=True,
# json_encoder_class=json.encoder.JSONEncoder # Optional; only specify this if you want to use a custom JSON encoder
)
logging.debug("A log message in level debug")
logging.info("A log message in level info")
logging.warning("A log message in level warning")
logging.error("A log message in level error")
logging.critical("A log message in level critical")
logging.info("Hello, {name}!", name="World")
logging.info("Processed order {orderId} by {customer}",
orderId = 15, customer = "Johnny")
try:
result = 2 / 0
except Exception as exception:
logging.exception("We got an exception")
time.sleep(2) # sleep for 2 seconds to give seqlog time to write to Seq | 30.451613 | 120 | 0.717161 | import logging
import seqlog
import time
seqlog.log_to_seq(
server_url="http://127.0.0.1:5341/",
api_key="RK2UCFPEIY7dsttQJA9F",
level=logging.NOTSET,
batch_size=10,
auto_flush_timeout=1, # seconds
override_root_logger=True,
# json_encoder_class=json.encoder.JSONEncoder # Optional; only specify this if you want to use a custom JSON encoder
)
logging.debug("A log message in level debug")
logging.info("A log message in level info")
logging.warning("A log message in level warning")
logging.error("A log message in level error")
logging.critical("A log message in level critical")
logging.info("Hello, {name}!", name="World")
logging.info("Processed order {orderId} by {customer}",
orderId = 15, customer = "Johnny")
try:
result = 2 / 0
except Exception as exception:
logging.exception("We got an exception")
time.sleep(2) # sleep for 2 seconds to give seqlog time to write to Seq | 0 | 0 | 0 |
d874044a2a4ab9b81bb785cf2015faeb757b6be2 | 157 | py | Python | test/gcs.py | blake-education/docker-registry | 225bb3ea1ce0bfe0f602486e0743c981352a970b | [
"Apache-2.0"
] | 1 | 2016-03-12T08:59:03.000Z | 2016-03-12T08:59:03.000Z | test/gcs.py | blake-education/docker-registry | 225bb3ea1ce0bfe0f602486e0743c981352a970b | [
"Apache-2.0"
] | 2 | 2021-03-26T00:50:23.000Z | 2021-04-30T21:42:56.000Z | test/gcs.py | blake-education/docker-registry | 225bb3ea1ce0bfe0f602486e0743c981352a970b | [
"Apache-2.0"
] | 1 | 2019-01-02T04:37:14.000Z | 2019-01-02T04:37:14.000Z |
import test_storage
import storage
| 14.272727 | 51 | 0.732484 |
import test_storage
import storage
class TestGSStorage(test_storage.TestLocalStorage):
def setUp(self):
self._storage = storage.load('gcs')
| 39 | 30 | 50 |
13eb3536e7ae4e6d1a87ac835080ececff092202 | 1,161 | py | Python | tests/dup_reads.py | robertDT/dt-snap | 5c4b7886336d72d8170371caea09ccd177961aa8 | [
"ECL-2.0",
"Apache-2.0"
] | 219 | 2015-01-18T00:28:42.000Z | 2022-02-10T09:00:10.000Z | tests/dup_reads.py | mateiz/snap | fe0fd2983195e145d0a2330eeda23333df832f56 | [
"ECL-2.0",
"Apache-2.0"
] | 126 | 2015-01-01T08:24:59.000Z | 2022-03-17T23:03:40.000Z | tests/dup_reads.py | mateiz/snap | fe0fd2983195e145d0a2330eeda23333df832f56 | [
"ECL-2.0",
"Apache-2.0"
] | 61 | 2015-01-25T06:28:18.000Z | 2022-02-24T12:01:04.000Z | # dup_reads.py
#
# create duplicate reads
#
import sys
import random
if (len(sys.argv) < 4 or len(sys.argv) > 5):
print "usage: %s <# of duplicate reads> <max duplication> read1.fq [read2.fq]" % sys.argv[p]
exit(1)
dupcount = int(sys.argv[1])
maxdup = int(sys.argv[2])
in1 = open(sys.argv[3], "r")
out1 = open("dup_" + sys.argv[3], "w")
paired = len(sys.argv) >= 5
if paired:
in2 = open(sys.argv[4], "r")
out2 = open("dup_" + sys.argv[4], "w")
for i in range(0, dupcount):
r1 = readread(in1)
if paired:
r2 = readread(in2)
ndup = random.randint(2,maxdup)
for j in range(0, ndup):
writeread(out1, ["@dup%d_%s" % (j, r1[0][1:]), r1[1], r1[2], r1[3]])
if paired:
writeread(out2, ["@dup%d_%s" % (j, r2[0][1:]), r2[1], r2[2], r2[3]])
| 27 | 104 | 0.55814 | # dup_reads.py
#
# create duplicate reads
#
import sys
import random
def readread(f):
result = [f.readline(),f.readline(),f.readline(),f.readline()]
if (result[0] and (result[0][0] != "@" or result[2][0] != "+" or len(result[1]) != len(result[3]))):
sys.stderr.write("invalid fasta file near %s" % (result[0]))
exit(1)
return result
def writeread(f, r):
for i in range(4):
f.write(r[i])
if (len(sys.argv) < 4 or len(sys.argv) > 5):
print "usage: %s <# of duplicate reads> <max duplication> read1.fq [read2.fq]" % sys.argv[p]
exit(1)
dupcount = int(sys.argv[1])
maxdup = int(sys.argv[2])
in1 = open(sys.argv[3], "r")
out1 = open("dup_" + sys.argv[3], "w")
paired = len(sys.argv) >= 5
if paired:
in2 = open(sys.argv[4], "r")
out2 = open("dup_" + sys.argv[4], "w")
for i in range(0, dupcount):
r1 = readread(in1)
if paired:
r2 = readread(in2)
ndup = random.randint(2,maxdup)
for j in range(0, ndup):
writeread(out1, ["@dup%d_%s" % (j, r1[0][1:]), r1[1], r1[2], r1[3]])
if paired:
writeread(out2, ["@dup%d_%s" % (j, r2[0][1:]), r2[1], r2[2], r2[3]])
| 314 | 0 | 46 |
a0ec4f9cd65c0753cc42ec21a3270f48a3086e12 | 791 | py | Python | chd/pipelines.py | mikhailsidorov/chd | 854e15fe32b2c431e194283f480503b7304702a7 | [
"MIT"
] | null | null | null | chd/pipelines.py | mikhailsidorov/chd | 854e15fe32b2c431e194283f480503b7304702a7 | [
"MIT"
] | null | null | null | chd/pipelines.py | mikhailsidorov/chd | 854e15fe32b2c431e194283f480503b7304702a7 | [
"MIT"
] | null | null | null | import os
from scrapy import Request
from scrapy.pipelines.files import FilesPipeline
from scrapy.exceptions import DropItem
from .items import Course, Lesson
from . import settings
| 30.423077 | 83 | 0.685209 | import os
from scrapy import Request
from scrapy.pipelines.files import FilesPipeline
from scrapy.exceptions import DropItem
from .items import Course, Lesson
from . import settings
class CleanFileNamesPipeline(object):
def process_item(self, item, spider):
if 'filename' in item.keys():
item['filename'][0] = item['filename'][0].replace('!', '')
return item
class CustomNamingFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None):
return request.meta.get('filename', '')
def get_media_requests(self, item, info):
filename = item.get('filename', None)
meta = {'filename': filename[0]} if filename else {}
return [Request(x, meta=meta) for x in item.get(self.files_urls_field, [])]
| 438 | 42 | 125 |
94a3544d98c823a23f88e60788185db1788ce63c | 87 | py | Python | radiorepo/radios/apps.py | AAkrout/radio-django | 75c0f80e4bbad25e103a121f32da7a1799798727 | [
"Apache-2.0"
] | null | null | null | radiorepo/radios/apps.py | AAkrout/radio-django | 75c0f80e4bbad25e103a121f32da7a1799798727 | [
"Apache-2.0"
] | null | null | null | radiorepo/radios/apps.py | AAkrout/radio-django | 75c0f80e4bbad25e103a121f32da7a1799798727 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 14.5 | 33 | 0.747126 | from django.apps import AppConfig
class RadiosConfig(AppConfig):
name = 'radios'
| 0 | 29 | 23 |
af8ceb7e971c9b1e516b864c1368d1deeaaef04d | 16,821 | py | Python | openid_connect_op/views/token_request_view.py | rerobins/django-openid-op | 96952a6d413718d0ed4c656265248b6f0d0c2e4e | [
"MIT"
] | 2 | 2020-02-15T20:20:36.000Z | 2020-05-12T02:39:42.000Z | openid_connect_op/views/token_request_view.py | rerobins/django-openid-op | 96952a6d413718d0ed4c656265248b6f0d0c2e4e | [
"MIT"
] | 3 | 2021-11-24T21:28:52.000Z | 2021-11-24T23:07:57.000Z | openid_connect_op/views/token_request_view.py | rerobins/django-openid-op | 96952a6d413718d0ed4c656265248b6f0d0c2e4e | [
"MIT"
] | 2 | 2019-11-25T07:40:28.000Z | 2021-11-29T16:06:59.000Z | import base64
import hashlib
import traceback
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from jwcrypto.common import base64url_encode
from ratelimit.mixins import RatelimitMixin
from openid_connect_op.models import OpenIDClient
from openid_connect_op.signals import access_token_start, access_token_finish
from openid_connect_op.utils.jwt import JWTTools
from . import OAuthRequestMixin
from .errors import OAuthError
from .parameters import AuthenticationParameters, TokenParameters
from ..models import OpenIDToken
# section 4.1.3 of OAUTH 2.0
# https://tools.ietf.org/pdf/draft-hunt-oauth-chain-01.pdf
| 44.975936 | 117 | 0.643957 | import base64
import hashlib
import traceback
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from jwcrypto.common import base64url_encode
from ratelimit.mixins import RatelimitMixin
from openid_connect_op.models import OpenIDClient
from openid_connect_op.signals import access_token_start, access_token_finish
from openid_connect_op.utils.jwt import JWTTools
from . import OAuthRequestMixin
from .errors import OAuthError
from .parameters import AuthenticationParameters, TokenParameters
from ..models import OpenIDToken
# section 4.1.3 of OAUTH 2.0
# https://tools.ietf.org/pdf/draft-hunt-oauth-chain-01.pdf
class TokenRequestView(OAuthRequestMixin, RatelimitMixin, View):
ratelimit_key = 'ip'
ratelimit_rate = '10/m'
ratelimit_block = True
ratelimit_method = 'ALL'
use_redirect_uri = False
attribute_parsing_error = 'invalid_request'
# noinspection PyAttributeOutsideInit
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
if request.method not in ('GET', 'POST'):
return HttpResponseBadRequest('Only GET or POST are supported on OpenID endpoint')
# noinspection PyBroadException
try:
self.parse_request_parameters(request, TokenParameters)
try:
self.request_parameters.check_errors()
except AttributeError as e:
raise OAuthError(error=self.attribute_parsing_error, error_description=str(e))
client, user, token = self.authenticate_client(request)
if self.request_parameters.grant_type == {'authorization_code'}:
return self.process_authorization_code_grant_type(request, client)
elif self.request_parameters.grant_type == {'refresh_token'}:
return self.process_refresh_token(request, client)
elif self.request_parameters.grant_type == {'http://oauth.net/grant_type/chain'}:
return self.process_chain_token(request, client, user, token)
else:
raise OAuthError(error='invalid_request',
error_description='Invalid grant type %s' % self.request_parameters.grant_type)
except OAuthError as err:
return self.oauth_send_answer(request, {
'error': err.error,
'error_description': err.error_description
})
except BaseException as err:
traceback.print_exc()
if settings.DEBUG:
return self.oauth_send_answer(request, {
'error': 'unknown_error',
'error_description': 'Unknown error: %s' % traceback.format_exc()
})
else:
return self.oauth_send_answer(request, {
'error': 'unknown_error',
'error_description': 'Unknown error occurred at %s, check the logs' % timezone.now()
})
def process_authorization_code_grant_type(self, request, client):
access_token_start.send('auth', request=request, openid_client=client)
if not self.request_parameters.code:
raise OAuthError(error='invalid_request',
error_description='Required parameter with name "code" is not present')
authorization_token = OpenIDToken.objects.filter(
token_hash=OpenIDToken.get_token_hash(self.request_parameters.code),
client=client,
token_type__in=(OpenIDToken.TOKEN_TYPE_AUTH, OpenIDToken.TOKEN_TYPE_DELEGATE)).first()
if not authorization_token:
raise OAuthError(error='unauthorized_client',
error_description='Authorization token not found')
if authorization_token.expired:
raise OAuthError(error='unauthorized_client',
error_description='Authorization token expired')
authentication_parameters = AuthenticationParameters(authorization_token.token_data)
# prevent reusing
authorization_token.delete()
delegated = authorization_token.token_type == OpenIDToken.TOKEN_TYPE_DELEGATE
if not delegated:
self.validate_redirect_uri(authentication_parameters)
return self.generate_tokens_and_oauth_response(authentication_parameters, client, request, delegated)
def process_refresh_token(self, request, client):
if not self.request_parameters.refresh_token:
raise OAuthError(error='invalid_request',
error_description='Required parameter with name "refresh_token" is not present')
try:
refresh_token = OpenIDToken.objects.get(
token_hash=OpenIDToken.get_token_hash(self.request_parameters.refresh_token),
token_type=OpenIDToken.TOKEN_TYPE_REFRESH_TOKEN,
client=client)
if refresh_token.expired:
raise OAuthError(error='invalid_grant', error_description='Refresh token expired')
except OpenIDToken.DoesNotExist:
raise OAuthError(error='invalid_grant', error_description='No such token was found')
original_access_token = refresh_token.root_token
authentication_parameters = AuthenticationParameters(original_access_token.token_data)
# noinspection PyTypeChecker
return self.generate_tokens_and_oauth_response(authentication_parameters, client, request)
def process_chain_token(self, request, client, user, access_token):
try:
target_client = OpenIDClient.objects.get(client_code=self.request_parameters.client_id)
except OpenIDClient.DoesNotExist:
raise OAuthError(error='invalid_request',
error_description='Requested client id not found')
def ld(param, default):
return request.POST.get(param, request.GET.get(param, default))
token_data = {
'username': user.username,
'scope': [x.strip() for x in ld('scope', 'openid').split(',')],
'claims': [x.strip() for x in ld('claims', '').split(',')],
'nonce': ld('nonce', None)
}
if not token_data['nonce']:
token_data['nonce'] = hashlib.sha256(
target_client.client_id.encode('utf-8')).hexdigest()
chain_token, chain_db_token = OpenIDToken.create_token(
target_client, OpenIDToken.TOKEN_TYPE_DELEGATE, token_data, 10000,
user, root_db_token=access_token)
return self.oauth_send_answer(request, {
'token': chain_token,
'token_type': 'delegate',
'expires': chain_db_token.expiration.isoformat(),
})
def generate_tokens_and_oauth_response(self, authentication_parameters, client,
request, delegated=False):
access_token_ttl = getattr(settings, 'OPENID_DEFAULT_ACCESS_TOKEN_TTL', 3600)
refresh_token_ttl = getattr(settings, 'OPENID_DEFAULT_REFRESH_TOKEN_TTL', 3600 * 10)
user = User.objects.get(username=authentication_parameters.username)
access_token, db_access_token = OpenIDToken.create_token(
client,
OpenIDToken.TOKEN_TYPE_ACCESS_BEARER_TOKEN,
authentication_parameters.to_dict(),
access_token_ttl,
user,
)
if not delegated:
refresh_token, refresh_db_token = OpenIDToken.create_token(
client,
OpenIDToken.TOKEN_TYPE_REFRESH_TOKEN,
{},
refresh_token_ttl,
user,
root_db_token=db_access_token
)
else:
refresh_token = None
id_token = self.create_id_token(request, client, authentication_parameters, db_access_token, user,
access_token=access_token)
access_token_finish.send('auth', request=request, openid_client=client, access_token=access_token,
refresh_token=refresh_token, id_token=id_token)
ret = {
'access_token': access_token,
'token_type': 'Bearer',
'expires_in': access_token_ttl,
'id_token': id_token
}
if refresh_token:
ret['refresh_token'] = refresh_token
return self.oauth_send_answer(request, ret)
def validate_redirect_uri(self, authentication_parameters):
auth_redirect_uri = authentication_parameters.redirect_uri
token_redirect_uri = self.request_parameters.redirect_uri
if auth_redirect_uri and auth_redirect_uri != token_redirect_uri:
raise OAuthError(error='invalid_request',
error_description='redirect_uri does not match the one used in /authorize endpoint')
if not auth_redirect_uri and token_redirect_uri:
raise OAuthError(error='invalid_request',
error_description='redirect_uri not used in authentication but passed for token')
def authenticate_client(self, request):
auth_header = request.META.get('HTTP_AUTHORIZATION')
if auth_header:
if auth_header.startswith('Basic '):
# only client is authenticated
return self.authenticate_with_http_basic(auth_header), None, None
if auth_header.startswith('Bearer '):
client, user, token = self.authenticate_with_bearer(auth_header)
if client:
return client, user, token
if self.request_parameters.client_assertion_type == 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer':
if not self.request_parameters.client_assertion:
raise OAuthError(error='unsupported_authentication_method',
error_description='Need client_assertion if client_assertion_type is jwt-bearer')
return self.authenticate_with_jwt_bearer(), None, None
if self.request_parameters.client_secret:
return self.authenticate_with_client_secret(), None, None
if self.request_parameters.client_id:
client = self.try_null_authentication()
if client:
return client, None, None
raise OAuthError(error='unsupported_authentication_method',
error_description='Only HTTP Basic auth or client_secret is supported')
def authenticate_with_http_basic(self, auth_header):
username, password = base64.b64decode(auth_header[6:].strip()).decode('utf-8').split(':', 1)
try:
client = OpenIDClient.objects.get(client_id=username)
if not client.check_client_secret(password):
raise OAuthError(error='unauthorized_client', error_description='Bad username or password')
if self.request_parameters.client_id and client.client_id != self.request_parameters.client_id:
raise OAuthError(error='invalid_request',
error_description='client_id does not match with authorized client')
if client.client_auth_type != client.CLIENT_AUTH_TYPE_BASIC:
raise OAuthError(error='invalid_request',
error_description='Used HTTP Basic but client configured to different auth')
return client
except OpenIDClient.DoesNotExist:
raise OAuthError(error='unauthorized_client', error_description='Bad username or password')
def authenticate_with_bearer(self, auth_header):
bearer_token = auth_header[7:].strip()
token = OpenIDToken.objects.filter(
token_hash=OpenIDToken.get_token_hash(bearer_token),
expiration__gte=timezone.now()
).first()
if not token:
return None, None, None
return token.client, token.user, token
def authenticate_with_client_secret(self):
if not self.request_parameters.client_id:
raise OAuthError(error='invalid_request',
error_description='Need client_id when using client_secret')
try:
client = OpenIDClient.objects.get(client_id=self.request_parameters.client_id)
if client.client_auth_type != client.CLIENT_AUTH_TYPE_POST:
raise OAuthError(error='invalid_request',
error_description='Client not configured to use POST authentication')
if client.check_client_secret(self.request_parameters.client_secret):
return client
except OpenIDClient.DoesNotExist:
pass
raise OAuthError(error='unauthorized_client', error_description='Bad client_id or client_secret')
def authenticate_with_jwt_bearer(self):
assertion = self.request_parameters.client_assertion
payload = JWTTools.unverified_jwt_payload(assertion)
for req in ('sub', 'iss', 'aud', 'jti', 'exp'):
if req not in payload:
raise OAuthError(error='invalid_request',
error_description='The assertion token must contain %s field' % req)
if payload['iss'] != payload['sub']:
raise OAuthError(error='invalid_request',
error_description='The assertion token\'s iss and sub fields differ')
auri = self.request.build_absolute_uri(self.request.path)
for aud in payload['aud']:
if auri == aud:
break
else:
raise OAuthError(error='invalid_request',
error_description='The assertion token is for audience %s, I am %s' % (
payload['aud'], auri))
client = OpenIDClient.objects.filter(client_id=payload['iss']).first()
if not client:
raise OAuthError(error='invalid_request',
error_description='Client with id %s is not registered on this server' % payload['iss'])
try:
JWTTools.validate_jwt(assertion, client)
except Exception as e:
traceback.print_exc()
print("debug: Client auth method", client.client_auth_type)
raise OAuthError(error='invalid_request',
error_description='JWT validation failed: %s' % e)
return client
def try_null_authentication(self):
if not self.request_parameters.client_id:
return None
try:
client = OpenIDClient.objects.get(client_id=self.request_parameters.client_id)
if client.client_auth_type != client.CLIENT_AUTH_TYPE_NONE:
return None
return client
except OpenIDClient.DoesNotExist:
pass
@staticmethod
def create_id_token(request, client, authentication_parameters, db_access_token, user, access_token=None):
id_token = {
"iss": request.build_absolute_uri('/'),
"sub": client.make_sub(settings.OPENID_SUB_PROVIDER(user, client)),
"aud": [client.client_id],
"exp": int(db_access_token.expiration.timestamp()),
# the time at which user was authenticated - we do not have this stored anywhere ...
# "auth_time": 1311280969,
# level of trustability of the login
# "acr": "urn:mace:incommon:iap:silver",
# names of authentication methods that were used to login this user
# "amr": None,
# the audience is the same as the acceptor of this token, so omitting the azp
# "azp": None
}
if authentication_parameters.nonce:
id_token['nonce'] = authentication_parameters.nonce
if access_token:
id_token['at_hash'] = make_access_token_hash(access_token)
if client.userinfo_in_id_token:
id_token.update({
'email': user.email,
'family_name': user.last_name,
'given_name': user.first_name
})
token = JWTTools.generate_jwt(id_token)
# save the token to the database
OpenIDToken.create_token(client, OpenIDToken.TOKEN_TYPE_ID_TOKEN, {
'token': token
}, db_access_token.expiration, user, db_access_token, token=token)
return token
def make_access_token_hash(access_token):
h = hashlib.sha256(access_token.encode('ascii')).digest()
h = base64url_encode(h[:(128 // 8)])
return h
| 15,241 | 675 | 45 |
ea7566a7ca97c2ade1f25e7bf6792259a199be6a | 4,895 | py | Python | accident/models/models.py | ElNahoko/HSE_ARNOSH | 1a8661db454e6a9e7f775a3ffd58a3936a43bb59 | [
"Apache-2.0"
] | 1 | 2019-08-10T17:57:58.000Z | 2019-08-10T17:57:58.000Z | accident/models/models.py | ElNahoko/HSE_ARNOSH | 1a8661db454e6a9e7f775a3ffd58a3936a43bb59 | [
"Apache-2.0"
] | null | null | null | accident/models/models.py | ElNahoko/HSE_ARNOSH | 1a8661db454e6a9e7f775a3ffd58a3936a43bb59 | [
"Apache-2.0"
] | 2 | 2019-08-14T18:08:04.000Z | 2019-09-04T19:01:08.000Z | # -*- coding: utf-8 -*-
import datetime
from odoo import models, fields, api , _
| 36.529851 | 129 | 0.539939 | # -*- coding: utf-8 -*-
import datetime
from odoo import models, fields, api , _
class res_partner(models.Model):
_inherit = 'res.partner'
class regle(models.Model):
_name = 'reg.reg'
_rec_name = 'nom'
nom = fields.Char(string="Règle", required=False, )
icon = fields.Binary(string="Icone", )
desc = fields.Char(string="Description", required=False, )
class accident(models.Model):
_name = 'acc.acc'
_rec_name = 'type'
_inherit = ['mail.thread']
id_soumetteur = fields.Many2one(
'res.users',
string='Agent',
default=lambda s: s.env.user,
readonly=True)
state = fields.Selection([
('draft', 'Brouillon'),
('normal', 'En Progrès'),
('done', 'Resolu'),
('blocked', 'Refuser')],
string='Kanban State',
copy=False,
default='draft',
required=True)
grav = fields.Char(string="Dégât Humain", required=False, )
Mat = fields.Char(string="Dégât Matériel", required=False, )
enviro = fields.Char(string="Dégât Environnemental", required=False, )
lieu = fields.Char(string="Lieu", required=False, )
temoin = fields.Char(string="Témoin", required=False, )
type = fields.Selection(string="Type", selection=[('Accident', 'Accident'), ('Incident', 'Incident'), ], required=False, )
consq = fields.Char(string="Conséquences" ,required=True, )
cause = fields.Char(string="Causes Propables" , required=True,)
regle = fields.Many2one('reg.reg', string="Règle de sécurité", required=False, )
date = fields.Datetime(string="Date",default=lambda s: fields.Datetime.now(),invisible=False,readonly=True, required=False, )
descaccident = fields.Char(string="Déscription", required=False, )
img = fields.Binary(string="Photo", )
@api.model
def create(self, vals):
if vals:
vals['reference'] = self.env['ir.sequence'].next_by_code('observation.hse') or _('New')
res = super(accident, self).create(vals)
res.state = 'normal'
return res
@api.multi
def print_accident(self):
return self.env.ref('accident.action_report_accident').report_action(self)
@api.multi
def create_action(self):
action_obj = self.env["action"]
for rec in self:
action_sor_accident = {
'source': 'Accident',
'originateur': rec.id_soumetteur.name,
'Dégâts Humains': rec.grav,
'Dégâts Matériels': rec.Mat,
'Dégâts Environnementales': rec.enviro,
'etat': 'Ouvert',
'date_creation': datetime.datetime.now()
}
action_sor_incident = {
'source': 'Incident',
'originateur': rec.id_soumetteur.name,
'Dégâts Humains': rec.grav,
'Dégâts Matériels': rec.Mat,
'Dégâts Environnementales': rec.enviro,
'etat': 'Ouvert',
'date_creation': datetime.datetime.now()
}
if rec.type == 'Incident':
action_idinc = action_obj.create(action_sor_incident)
action_id = action_idinc.id
if rec.type == 'Accident':
action_idacc = action_obj.create(action_sor_accident)
action_id = action_idacc.id
view_id = self.env.ref('action.action_form_view').id
self.write({'state': 'done'})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'action',
'view_id': view_id,
'type': 'ir.actions.act_window',
'name': _('SOR Action'),
'res_id': action_id
}
@api.multi
def refuser_observation(self):
return self.write({'state': 'blocked'})
@api.multi
def informer_responsable(self):
message_body = "Bonjour " + self.id_soumetteur.name + "," + \
"<br>Vous avez recu un input Urgent " + \
"<br>Dégâts Humains : " + self.grav + \
"<br>Dégâts Matériels : " + self.Mat + \
"<br>Dégâts Environnementales : " + self.enviro + \
"<br>Date : " + str(self.date) + \
'<br><br>Cordialement'
to = "hamza@gmail.com"
data = {
'subject': 'Observation Urgente de '+self.type,
'body_html': message_body,
'email_from': self.env.user.company_id.email,
'email_to': to
}
template_id = self.env['mail.mail'].create(data)
if self.env['mail.mail'].send(template_id):
print("5/5")
else:
print("0/5")
| 2,905 | 1,875 | 69 |
4623747cd5c86ee16b6435c58553a66988fc2a22 | 4,646 | py | Python | utils/bot.py | DevotedExuDevelopersTeam/ClanManager | 554eec3a011f8ea8d88b05b930ef5b07901f454b | [
"MIT"
] | 1 | 2022-02-15T20:30:42.000Z | 2022-02-15T20:30:42.000Z | utils/bot.py | DevotedExuDevelopersTeam/ClanManager | 554eec3a011f8ea8d88b05b930ef5b07901f454b | [
"MIT"
] | 4 | 2022-02-21T18:19:01.000Z | 2022-03-07T06:32:31.000Z | utils/bot.py | DevotedExuDevelopersTeam/ClanManager | 554eec3a011f8ea8d88b05b930ef5b07901f454b | [
"MIT"
] | null | null | null | import os
from datetime import datetime, timedelta
import aiosqlite
import disnake
from aiosqlite import connect
from disnake.ext import commands
from dotenv import load_dotenv
from exencolorlogs import Logger
from utils.constants import *
| 34.671642 | 93 | 0.647439 | import os
from datetime import datetime, timedelta
import aiosqlite
import disnake
from aiosqlite import connect
from disnake.ext import commands
from dotenv import load_dotenv
from exencolorlogs import Logger
from utils.constants import *
class Bot(commands.Bot):
def __init__(self):
super().__init__(
"cm!",
intents=disnake.Intents.all(),
strip_after_prefix=True,
case_insensitive=True,
test_guilds=[GUILD_ID],
)
self.log = Logger()
self.add_check(self.global_check)
self.add_app_command_check(self.slash_global_check, slash_commands=True)
self.pending_applicants = []
self.pending_verification_requests = []
async def execute(self, query: str, *args) -> aiosqlite.Cursor:
cur = await self.db.execute(query, args)
await self.db.commit()
return cur
async def create_database(self):
with open("config.sql", "r") as f:
await self.db.executescript(f.read())
def run(self):
if not os.path.exists(".tmp"):
os.mkdir(".tmp")
if not os.path.exists("data"):
os.mkdir("data")
load_dotenv()
try:
self.load_extensions("ext")
super().run(os.getenv("TOKEN"))
except disnake.LoginFailure:
self.log.critical(
"Improper token has been parsed. Please enter new token and restart the bot."
)
self.write_new_token()
async def start(self, *args, **kwargs):
self.log.info("Connecting to database...")
self.db = await connect("data/database.db")
self.log.info("Database connection established.")
await self.create_database()
await super().start(*args, **kwargs)
async def close(self):
self.log.info("Closing database connection...")
await self.db.close()
self.log.info("Database connection closed.")
await super().close()
async def on_ready(self):
self.log.info("Bot is ready!")
# server definition
self.server = self.get_guild(GUILD_ID)
# roles definition
self.staff = self.get_role(STAFF_ROLE_ID)
self.unverified = self.get_role(UNVERIFIED_ROLE_ID)
self.applicant = self.get_role(APPLICANT_ROLE_ID)
self.officer = self.get_role(OFFICER_ROLE_ID)
self.verified = self.get_role(VERIFIED_ROLE_ID)
self.clan_member = self.get_role(CLAN_MEMBER_ROLE_ID)
# channels definition
self.get_started = self.get_channel(GET_STARTED_CHANNEL_ID)
self.join_clan = self.get_channel(JOIN_CLAN_CHANNEL_ID)
self.verify = self.get_channel(VERIFY_CHANNEL_ID)
self.discussions = self.get_channel(DISCUSSIONS_CATEGORY_ID)
self.pending_verification = self.get_channel(PENDING_VERIFICATIONS_CHANNEL_ID)
self.admin = self.get_channel(LOG_CHANNEL_ID)
# application channels definition
self.pending_applications = self.get_channel(PENDING_APPLICATIONS_CHANNEL_ID)
self.accepted_applications = self.get_channel(ACCEPTED_APPLICATIONS_CHANNEL_ID)
self.denied_applications = self.get_channel(DENIED_APPLICATIONS_CHANNEL_ID)
@staticmethod
def global_check(ctx: commands.Context):
return ctx.channel.type != disnake.ChannelType.private
@staticmethod
def slash_global_check(inter: disnake.ApplicationCommandInteraction):
return inter.channel.type != disnake.ChannelType.private
async def add_ban_record(self, user: disnake.Member, time: timedelta):
await self.execute(
"INSERT OR REPLACE INTO bans VALUES (?, ?)",
user.id,
(datetime.now() + time).timestamp(),
)
def get_role(self, role_id: int):
return self.server.get_role(role_id)
def get_member(self, member_id: int):
return self.server.get_member(member_id)
async def set_pg_id(self, id: int, pg_id: int):
await self.execute("INSERT OR REPLACE INTO ids VALUES (?, ?)", id, pg_id)
async def get_pg_id(self, id: int) -> int | None:
cur = await self.execute("SELECT pg_id FROM ids WHERE id = ?", id)
try:
return (await cur.fetchone())[0]
except (KeyError, ValueError):
return None
async def get_member_by_pg_id(self, pg_id: int) -> disnake.Member | None:
cur = await self.execute("SELECT id FROM ids WHERE pg_id = ?", pg_id)
res = await cur.fetchone()
if len(res) == 0:
return None
return await self.server.get_or_fetch_member(res[0])
| 3,937 | 443 | 23 |
50f944da55c2a06a0752b38244ab878b2db49ffd | 273 | py | Python | pyRoovit/pyRoovit.extension/pyRoovit.tab/About.Panel/Sources.stack/pyRevit.pushbutton/script.py | jmcouffin/pyRoovit | acafa74fb6310a1042c703d791b0d53804c1882a | [
"BSD-3-Clause"
] | 5 | 2022-03-14T01:58:34.000Z | 2022-03-21T19:51:09.000Z | pyRoovit/pyRoovit.extension/pyRoovit.tab/About.Panel/Sources.stack/pyRevit.pushbutton/script.py | jmcouffin/pyRoovit | acafa74fb6310a1042c703d791b0d53804c1882a | [
"BSD-3-Clause"
] | 1 | 2022-03-14T10:18:40.000Z | 2022-03-14T15:52:25.000Z | pyRoovit/pyRoovit.extension/pyRoovit.tab/About.Panel/Sources.stack/pyRevit.pushbutton/script.py | jmcouffin/pyRoovit | acafa74fb6310a1042c703d791b0d53804c1882a | [
"BSD-3-Clause"
] | 1 | 2022-03-14T10:15:22.000Z | 2022-03-14T10:15:22.000Z | # import libraries
import clr
import os
# Get and build the pyrevit path
userProfile = os.environ.get("USERPROFILE")
prvPath = userProfile + '\\AppData\\Roaming\\pyRevit-Master\\'
# Load the path
try:
os.startfile(prvPath)
except:
print('The path was not found.') | 21 | 62 | 0.721612 | # import libraries
import clr
import os
# Get and build the pyrevit path
userProfile = os.environ.get("USERPROFILE")
prvPath = userProfile + '\\AppData\\Roaming\\pyRevit-Master\\'
# Load the path
try:
os.startfile(prvPath)
except:
print('The path was not found.') | 0 | 0 | 0 |
f7d1e2dc6d984322b052c5f2fa923962ff65e554 | 3,620 | py | Python | preprocess_xsum.py | tdtrinh11/tas-cus | 3383a63ccba18cce437617e67d0650a9c72d2a48 | [
"Apache-2.0"
] | 3 | 2021-09-19T07:15:40.000Z | 2021-11-19T14:13:51.000Z | preprocess_xsum.py | tdtrinh11/tas-cus | 3383a63ccba18cce437617e67d0650a9c72d2a48 | [
"Apache-2.0"
] | 1 | 2022-01-13T07:53:48.000Z | 2022-01-13T07:53:48.000Z | preprocess_xsum.py | tdtrinh11/tas-cus | 3383a63ccba18cce437617e67d0650a9c72d2a48 | [
"Apache-2.0"
] | 1 | 2022-02-02T11:29:50.000Z | 2022-02-02T11:29:50.000Z | """
Preprocess the XSUM dataset
There are several noisy training instances which do not contain any words in pre-defined vocabulary of NTM.
We remove these instances.
Here are the details about these removed instance:
- instance #37993:
input: Here are our favourites:
target: On Monday, we asked for you to send us your favourite shop pun names.
- instance #47104:
input: Here are some of the Ethiopian runner's greatest feats.
target: Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.
- instance #71767:
input: JANUARYFEBRUARYMARCHAPRILMAYJUNE
target: As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.
- instance #94109:
input: Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim
target: FOOTBALL LEAGUE RESULTS
- instance #95592:
input: KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM
target: Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.
"""
import os
train_input, train_target = [], []
hardcoded_delete_input = ['Here are our favourites:\n', "Here are some of the Ethiopian runner's greatest feats.\n",
'JANUARYFEBRUARYMARCHAPRILMAYJUNE\n',
'Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim\n',
'KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM\n']
hardcoded_delete_target = ['On Monday, we asked for you to send us your favourite shop pun names.\n',
'Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.\n',
'As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.\n',
'FOOTBALL LEAGUE RESULTS\n',
'Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.\n']
with open(f"data/xsum/train.source", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_input:
train_input.append(line)
with open(f"data/xsum/train.target", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_target:
train_target.append(line)
print(f"there are {len(train_input)} in the new source file")
print(f"there are {len(train_target)} in the new target file")
if os.path.exists("data/xsum/train.source"):
os.remove("data/xsum/train.source")
if os.path.exists("data/xsum/train.target"):
os.remove("data/xsum/train.target")
with open(f"data/xsum/train.source", "w", encoding='utf8') as f:
for item in train_input:
f.write(item)
with open(f"data/xsum/train.target", "w", encoding='utf8') as f:
for item in train_target:
f.write(item)
| 55.692308 | 247 | 0.699724 | """
Preprocess the XSUM dataset
There are several noisy training instances which do not contain any words in pre-defined vocabulary of NTM.
We remove these instances.
Here are the details about these removed instance:
- instance #37993:
input: Here are our favourites:
target: On Monday, we asked for you to send us your favourite shop pun names.
- instance #47104:
input: Here are some of the Ethiopian runner's greatest feats.
target: Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.
- instance #71767:
input: JANUARYFEBRUARYMARCHAPRILMAYJUNE
target: As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.
- instance #94109:
input: Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim
target: FOOTBALL LEAGUE RESULTS
- instance #95592:
input: KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM
target: Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.
"""
import os
train_input, train_target = [], []
hardcoded_delete_input = ['Here are our favourites:\n', "Here are some of the Ethiopian runner's greatest feats.\n",
'JANUARYFEBRUARYMARCHAPRILMAYJUNE\n',
'Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim\n',
'KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM\n']
hardcoded_delete_target = ['On Monday, we asked for you to send us your favourite shop pun names.\n',
'Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.\n',
'As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.\n',
'FOOTBALL LEAGUE RESULTS\n',
'Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.\n']
with open(f"data/xsum/train.source", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_input:
train_input.append(line)
with open(f"data/xsum/train.target", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_target:
train_target.append(line)
print(f"there are {len(train_input)} in the new source file")
print(f"there are {len(train_target)} in the new target file")
if os.path.exists("data/xsum/train.source"):
os.remove("data/xsum/train.source")
if os.path.exists("data/xsum/train.target"):
os.remove("data/xsum/train.target")
with open(f"data/xsum/train.source", "w", encoding='utf8') as f:
for item in train_input:
f.write(item)
with open(f"data/xsum/train.target", "w", encoding='utf8') as f:
for item in train_target:
f.write(item)
| 0 | 0 | 0 |
d247519e5c1cf58a5939d0ce0c852ed2f4c4d179 | 6,026 | py | Python | behavioral/model_comparison.py | bobaseb/neural_link_SV_iDE | ff45ec4850dd40e2bdfc153efa45575871cedbd9 | [
"MIT"
] | 3 | 2020-02-19T07:42:54.000Z | 2021-12-29T13:28:07.000Z | behavioral/model_comparison.py | bobaseb/neural_link_SV_iDE | ff45ec4850dd40e2bdfc153efa45575871cedbd9 | [
"MIT"
] | null | null | null | behavioral/model_comparison.py | bobaseb/neural_link_SV_iDE | ff45ec4850dd40e2bdfc153efa45575871cedbd9 | [
"MIT"
] | 3 | 2020-04-02T11:00:45.000Z | 2021-12-29T13:28:35.000Z | import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from statsmodels.tools.eval_measures import bic
import time
from scipy.stats import pearsonr
parent_dir = '/media/seb/HD_Numba_Juan/Dropbox/postdoc/NARPS/preprint1'
data_fn = parent_dir + '/participants_and_model.csv'
df = pd.read_csv(data_fn)
#list(df.columns.values)
#Check if the p_accept aligns with 4 levels of DV
DV_4levels_all = df[['participant_response','p_accept']]
DV_4levels_all_mn = DV_4levels_all.groupby(['participant_response']).mean()['p_accept']
DV_4levels_all_std = DV_4levels_all.groupby(['participant_response']).std()['p_accept']
DV_4levels_all_ranks = np.argsort(np.argsort(DV_4levels_all_mn.values)) #==[3,0,2,1]
DV_4levels_per_sub_mn = df.groupby(['participant_response','ID']).mean()['p_accept'].unstack(level=0)
DV_4levels_per_sub_mn = DV_4levels_per_sub_mn.drop([13,25,30,56])
bic_score_full = 0
bic_score_intercept = 0
bic_score_gain = 0
bic_score_loss = 0
num_subs = 0
good_ranks = 0
all_coefs = []
bic_all = []
bic_ranks = []
bad_subs_full_model = []
bad_bic_full_model = []
bad_rank_subs = []
bad_ranks = []
bad_probs = []
for sub in np.unique(df.ID):
if sub == 13 or sub == 25 or sub==30 or sub==56:
print('sub: ', sub, 'excluded')
continue
sub_df = df[df.ID==sub]
#Check if the p_accept aligns with 4 levels of DV
DV_vals = DV_4levels_per_sub_mn.loc[sub].values
nan_idx = np.where(np.isnan(DV_vals))[0]
DV_vals2 = [x for x in DV_vals if str(x) != 'nan']
DV_4levels_sub_ranks = np.argsort(np.argsort(DV_vals2))
DV_4levels_all_ranks2 = np.argsort(np.argsort(np.delete(DV_4levels_all_mn.values, nan_idx)))
num_subs += 1
if (DV_4levels_sub_ranks==DV_4levels_all_ranks2).all():
good_ranks += 1
else:
bad_rank_subs.append(sub)
bad_ranks.append(DV_4levels_sub_ranks)
bad_probs.append(DV_vals2)
#Run the logistic regressions
X = sub_df[['gain','loss']]
X['intercept'] = 1.0
y = sub_df.accept
#Run the full model
model_full = sm.Logit(y, X, missing='drop')
result_full = model_full.fit()
#result.summary()
coefficients_full = np.array(result_full.params)
all_coefs.append(coefficients_full)
bic_score_full += bic(result_full.llf,len(y),len(coefficients_full))
#Run the intercept only
model_intercept = sm.Logit(y, X['intercept'], missing='drop')
result_intercept = model_intercept.fit()
bic_score_intercept += bic(result_intercept.llf,len(y),1)
#Run intercept & gain
model_gain = sm.Logit(y, X[['gain', 'intercept']], missing='drop')
result_gain = model_gain.fit()
bic_score_gain += bic(result_gain.llf,len(y),2)
#Run intercept & loss
model_loss = sm.Logit(y, X[['loss', 'intercept']], missing='drop')
result_loss = model_loss.fit()
bic_score_loss += bic(result_loss.llf,len(y),2)
bic_per_sub = [bic(result_full.llf,len(y),len(coefficients_full)), bic(result_intercept.llf,len(y),1),
bic(result_gain.llf,len(y),2), bic(result_loss.llf,len(y),2)]
bic_all.append(bic_per_sub)
bic_ranks.append(np.argmin(bic_per_sub))
if np.argmin(bic_per_sub)!=0: #0th index is the full model
bad_subs_full_model.append(sub)
bad_bic_full_model.append(bic_per_sub)
print('proportion of good ranks: ', good_ranks/float(num_subs)) #just 2 subs have strongly rejected inverted with weakly rejected
print('full, gain, loss, intercept')
print(bic_score_full, bic_score_gain, bic_score_loss, bic_score_intercept)
#full model wins for everyone
print('correlation between loss and gains coefficients:')
print(pearsonr(all_coefs[:,0], all_coefs[:,1]))
print('DV levels of p_accept:')
print(DV_4levels_all_mn)
print(DV_4levels_all_std)
#Time for plotting
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["figure.figsize"] = (20,10)
offset_points=(-5, -5)
fs = 18
#fig.canvas.draw()
#Plot BICs
plt.subplot(1, 3, 1)
bic_all = np.vstack(bic_all)
bic_all2 = np.hstack([bic_all[:,1:],bic_all[:,0].reshape(len(bic_all[:,0]),1)])
#bic_labels = np.tile(['Gain & Loss','Baseline','Gain only', 'Loss only'],len(bic_all))
bic_labels = np.tile(['Baseline\n (Intercept only)','Gain', 'Loss','Full\n (Gain & Loss)'],len(bic_all2))
sns.set_palette(sns.color_palette("PuBu"))
sns.stripplot(bic_labels, bic_all2.flatten(), jitter=True)
sns.despine()
#plt.plot(bic_labels, bic_all2.flatten(), '.')
plt.xlabel('Behavioral model', fontsize=fs)
plt.ylabel('Bayesian Information Criterion\n (BIC)', fontsize=fs)
plt.annotate('a', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot the gain/loss coefficients
plt.subplot(1, 3, 2)
all_coefs = np.vstack(all_coefs)
plt.plot(all_coefs[:,0], all_coefs[:,1], 'k.')
plt.xlabel('Gain Coefficient\n (Full model)', fontsize=fs)
plt.ylabel('Loss Coefficient\n (Full model)', fontsize=fs)
plt.annotate('b', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot DV levels
plt.subplot(1, 3, 3)
DV_4levels_for_plot = DV_4levels_per_sub_mn.values[:,[0,2,3,1]]
#DV_labels = np.tile(['Strongly accept','Strongly reject','Weakly accept', 'Weakly reject'],len(DV_4levels_per_sub_mn.values))
DV_labels = np.tile(['Strongly\n accept', 'Weakly\n accept', 'Weakly\n reject', 'Strongly\n reject'],len(DV_4levels_per_sub_mn.values))
#sns.palplot(sns.color_palette("RdGy_r"))
sns.set_palette(sns.color_palette("RdYlGn_r"))
sns.stripplot(DV_labels, DV_4levels_for_plot.flatten(), jitter=True)
sns.despine()
#plt.plot(DV_labels, DV_4levels_for_plot.flatten(), '.')
plt.xlabel('Participant response', fontsize=fs)
plt.ylabel('Mean probability of accepting gamble\n (Full model)', fontsize=fs)
plt.annotate('c', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#plt.savefig(parent_dir + '/figs/behavioral_model.png', bbox_inches='tight', dpi=300)
plt.show()
| 40.716216 | 146 | 0.724859 | import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from statsmodels.tools.eval_measures import bic
import time
from scipy.stats import pearsonr
parent_dir = '/media/seb/HD_Numba_Juan/Dropbox/postdoc/NARPS/preprint1'
data_fn = parent_dir + '/participants_and_model.csv'
df = pd.read_csv(data_fn)
#list(df.columns.values)
#Check if the p_accept aligns with 4 levels of DV
DV_4levels_all = df[['participant_response','p_accept']]
DV_4levels_all_mn = DV_4levels_all.groupby(['participant_response']).mean()['p_accept']
DV_4levels_all_std = DV_4levels_all.groupby(['participant_response']).std()['p_accept']
DV_4levels_all_ranks = np.argsort(np.argsort(DV_4levels_all_mn.values)) #==[3,0,2,1]
DV_4levels_per_sub_mn = df.groupby(['participant_response','ID']).mean()['p_accept'].unstack(level=0)
DV_4levels_per_sub_mn = DV_4levels_per_sub_mn.drop([13,25,30,56])
bic_score_full = 0
bic_score_intercept = 0
bic_score_gain = 0
bic_score_loss = 0
num_subs = 0
good_ranks = 0
all_coefs = []
bic_all = []
bic_ranks = []
bad_subs_full_model = []
bad_bic_full_model = []
bad_rank_subs = []
bad_ranks = []
bad_probs = []
for sub in np.unique(df.ID):
if sub == 13 or sub == 25 or sub==30 or sub==56:
print('sub: ', sub, 'excluded')
continue
sub_df = df[df.ID==sub]
#Check if the p_accept aligns with 4 levels of DV
DV_vals = DV_4levels_per_sub_mn.loc[sub].values
nan_idx = np.where(np.isnan(DV_vals))[0]
DV_vals2 = [x for x in DV_vals if str(x) != 'nan']
DV_4levels_sub_ranks = np.argsort(np.argsort(DV_vals2))
DV_4levels_all_ranks2 = np.argsort(np.argsort(np.delete(DV_4levels_all_mn.values, nan_idx)))
num_subs += 1
if (DV_4levels_sub_ranks==DV_4levels_all_ranks2).all():
good_ranks += 1
else:
bad_rank_subs.append(sub)
bad_ranks.append(DV_4levels_sub_ranks)
bad_probs.append(DV_vals2)
#Run the logistic regressions
X = sub_df[['gain','loss']]
X['intercept'] = 1.0
y = sub_df.accept
#Run the full model
model_full = sm.Logit(y, X, missing='drop')
result_full = model_full.fit()
#result.summary()
coefficients_full = np.array(result_full.params)
all_coefs.append(coefficients_full)
bic_score_full += bic(result_full.llf,len(y),len(coefficients_full))
#Run the intercept only
model_intercept = sm.Logit(y, X['intercept'], missing='drop')
result_intercept = model_intercept.fit()
bic_score_intercept += bic(result_intercept.llf,len(y),1)
#Run intercept & gain
model_gain = sm.Logit(y, X[['gain', 'intercept']], missing='drop')
result_gain = model_gain.fit()
bic_score_gain += bic(result_gain.llf,len(y),2)
#Run intercept & loss
model_loss = sm.Logit(y, X[['loss', 'intercept']], missing='drop')
result_loss = model_loss.fit()
bic_score_loss += bic(result_loss.llf,len(y),2)
bic_per_sub = [bic(result_full.llf,len(y),len(coefficients_full)), bic(result_intercept.llf,len(y),1),
bic(result_gain.llf,len(y),2), bic(result_loss.llf,len(y),2)]
bic_all.append(bic_per_sub)
bic_ranks.append(np.argmin(bic_per_sub))
if np.argmin(bic_per_sub)!=0: #0th index is the full model
bad_subs_full_model.append(sub)
bad_bic_full_model.append(bic_per_sub)
print('proportion of good ranks: ', good_ranks/float(num_subs)) #just 2 subs have strongly rejected inverted with weakly rejected
print('full, gain, loss, intercept')
print(bic_score_full, bic_score_gain, bic_score_loss, bic_score_intercept)
#full model wins for everyone
print('correlation between loss and gains coefficients:')
print(pearsonr(all_coefs[:,0], all_coefs[:,1]))
print('DV levels of p_accept:')
print(DV_4levels_all_mn)
print(DV_4levels_all_std)
#Time for plotting
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["figure.figsize"] = (20,10)
offset_points=(-5, -5)
fs = 18
#fig.canvas.draw()
#Plot BICs
plt.subplot(1, 3, 1)
bic_all = np.vstack(bic_all)
bic_all2 = np.hstack([bic_all[:,1:],bic_all[:,0].reshape(len(bic_all[:,0]),1)])
#bic_labels = np.tile(['Gain & Loss','Baseline','Gain only', 'Loss only'],len(bic_all))
bic_labels = np.tile(['Baseline\n (Intercept only)','Gain', 'Loss','Full\n (Gain & Loss)'],len(bic_all2))
sns.set_palette(sns.color_palette("PuBu"))
sns.stripplot(bic_labels, bic_all2.flatten(), jitter=True)
sns.despine()
#plt.plot(bic_labels, bic_all2.flatten(), '.')
plt.xlabel('Behavioral model', fontsize=fs)
plt.ylabel('Bayesian Information Criterion\n (BIC)', fontsize=fs)
plt.annotate('a', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot the gain/loss coefficients
plt.subplot(1, 3, 2)
all_coefs = np.vstack(all_coefs)
plt.plot(all_coefs[:,0], all_coefs[:,1], 'k.')
plt.xlabel('Gain Coefficient\n (Full model)', fontsize=fs)
plt.ylabel('Loss Coefficient\n (Full model)', fontsize=fs)
plt.annotate('b', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot DV levels
plt.subplot(1, 3, 3)
DV_4levels_for_plot = DV_4levels_per_sub_mn.values[:,[0,2,3,1]]
#DV_labels = np.tile(['Strongly accept','Strongly reject','Weakly accept', 'Weakly reject'],len(DV_4levels_per_sub_mn.values))
DV_labels = np.tile(['Strongly\n accept', 'Weakly\n accept', 'Weakly\n reject', 'Strongly\n reject'],len(DV_4levels_per_sub_mn.values))
#sns.palplot(sns.color_palette("RdGy_r"))
sns.set_palette(sns.color_palette("RdYlGn_r"))
sns.stripplot(DV_labels, DV_4levels_for_plot.flatten(), jitter=True)
sns.despine()
#plt.plot(DV_labels, DV_4levels_for_plot.flatten(), '.')
plt.xlabel('Participant response', fontsize=fs)
plt.ylabel('Mean probability of accepting gamble\n (Full model)', fontsize=fs)
plt.annotate('c', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#plt.savefig(parent_dir + '/figs/behavioral_model.png', bbox_inches='tight', dpi=300)
plt.show()
| 0 | 0 | 0 |
ecebcbe1583f36069a540a75d483c4308fc1b4e4 | 595 | py | Python | code/chapter-1/exercise1_20.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-1/exercise1_20.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-1/exercise1_20.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | import turtle
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(60)
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.right(30)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.back(100)
turtle.right(90)
turtle.forward(300)
turtle.left(90)
turtle.forward(100)
turtle.back(100)
turtle.right(30)
turtle.forward(75)
turtle.left(30)
turtle.forward(100)
turtle.done()
| 17 | 19 | 0.779832 | import turtle
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(60)
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.right(30)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.back(100)
turtle.right(90)
turtle.forward(300)
turtle.left(90)
turtle.forward(100)
turtle.back(100)
turtle.right(30)
turtle.forward(75)
turtle.left(30)
turtle.forward(100)
turtle.done()
| 0 | 0 | 0 |