hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0008a22baca00584adf6ac2a3f849b58fa45823
| 2,147
|
py
|
Python
|
lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2015
"""Create sources table
Revision ID: 22d887e4ec0a
Revises: 1a80adff031a
Create Date: 2015-03-30 11:37:20.737582
"""
from alembic import context, op
import sqlalchemy as sa
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '22d887e4ec0a'
down_revision = '1a80adff031a'
def upgrade():
op.create_table('sources',
sa.Column('request_id', GUID()),
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('rse_id', GUID()),
sa.Column('dest_rse_id', GUID()),
sa.Column('url', sa.String(2048)),
sa.Column('ranking', sa.Integer),
sa.Column('bytes', sa.BigInteger),
sa.Column('updated_at', sa.DateTime),
sa.Column('created_at', sa.DateTime))
if context.get_context().dialect.name != 'sqlite':
op.create_primary_key('SOURCES_PK', 'sources', ['request_id', 'rse_id', 'scope', 'name'])
op.create_foreign_key('SOURCES_REQ_ID_FK', 'sources', 'requests', ['request_id'], ['id'])
op.create_foreign_key('SOURCES_REPLICAS_FK', 'sources', 'replicas', ['scope', 'name', 'rse_id'], ['scope', 'name', 'rse_id'])
op.create_foreign_key('SOURCES_RSES_FK', 'sources', 'rses', ['rse_id'], ['id'])
op.create_foreign_key('SOURCES_DST_RSES_FK', 'sources', 'rses', ['dest_rse_id'], ['id'])
op.create_check_constraint('SOURCES_CREATED_NN', 'sources', 'created_at is not null')
op.create_check_constraint('SOURCES_UPDATED_NN', 'sources', 'updated_at is not null')
op.create_index('SOURCES_SRC_DST_IDX', 'sources', ['rse_id', 'dest_rse_id'])
def downgrade():
op.drop_table('sources')
| 39.759259
| 133
| 0.633442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,086
| 0.505822
|
c000ce6357216e513a1258f8b804cf4a615522f7
| 4,024
|
py
|
Python
|
algorithms/FdGars/FdGars_main.py
|
ss1004124654/DGFraud-TF2
|
18c2bcc03e850afb7d9b507464b366cad30d675f
|
[
"Apache-2.0"
] | 51
|
2021-05-24T08:38:52.000Z
|
2022-03-28T13:14:21.000Z
|
algorithms/FdGars/FdGars_main.py
|
aqeelferoze/DGFraud-TF2
|
18c2bcc03e850afb7d9b507464b366cad30d675f
|
[
"Apache-2.0"
] | 6
|
2021-06-20T05:21:19.000Z
|
2022-02-26T21:58:25.000Z
|
algorithms/FdGars/FdGars_main.py
|
aqeelferoze/DGFraud-TF2
|
18c2bcc03e850afb7d9b507464b366cad30d675f
|
[
"Apache-2.0"
] | 18
|
2021-06-01T12:36:51.000Z
|
2022-03-30T15:18:34.000Z
|
"""
This code is attributed to Yingtong Dou (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)
https://github.com/safe-graph/DGFraud-TF2
"""
import argparse
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import optimizers
from algorithms.FdGars.FdGars import FdGars
from utils.data_loader import load_data_dblp
from utils.utils import preprocess_adj, preprocess_feature, sample_mask
# init the common args, expect the model specific args
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='random seed')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=512,
help='batch size')
parser.add_argument('--train_size', type=float, default=0.2,
help='training set percentage')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate')
parser.add_argument('--weight_decay', type=float, default=0.001,
help='weight decay')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--nhid', type=int, default=64,
help='number of hidden units in GCN')
args = parser.parse_args()
# set seed
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
def FdGars_main(support: list,
features: tf.SparseTensor,
label: tf.Tensor, masks: list,
args: argparse.ArgumentParser().parse_args()) -> None:
"""
Main function to train, val and test the model
:param support: a list of the sparse adjacency matrices
:param features: node feature tuple for all nodes {coords, values, shape}
:param label: the label tensor for all nodes
:param masks: a list of mask tensors to obtain the train, val, test data
:param args: additional parameters
"""
model = FdGars(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
# train
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
train_loss, train_acc = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
val_loss, val_acc = model([support, features, label, masks[1]],
training=False)
if epoch % 10 == 0:
print(
f"train_loss: {train_loss:.4f}, "
f"train_acc: {train_acc:.4f},"
f"val_loss: {val_loss:.4f},"
f"val_acc: {val_acc:.4f}")
# test
_, test_acc = model([support, features, label, masks[2]], training=False)
print(f"Test acc: {test_acc:.4f}")
if __name__ == "__main__":
# load the data
adj_list, features, [idx_train, _, idx_val, _, idx_test, _], y = \
load_data_dblp(meta=False, train_size=args.train_size)
# convert to dense tensors
train_mask = tf.convert_to_tensor(sample_mask(idx_train, y.shape[0]))
val_mask = tf.convert_to_tensor(sample_mask(idx_val, y.shape[0]))
test_mask = tf.convert_to_tensor(sample_mask(idx_test, y.shape[0]))
label = tf.convert_to_tensor(y, dtype=tf.float32)
# normalize the adj matrix and feature matrix
features = preprocess_feature(features)
support = preprocess_adj(adj_list[0])
# initialize the model parameters
args.input_dim = features[2][1]
args.output_dim = y.shape[1]
args.train_size = len(idx_train)
args.num_features_nonzero = features[1].shape
# cast sparse matrix tuples to sparse tensors
features = tf.cast(tf.SparseTensor(*features), dtype=tf.float32)
support = [tf.cast(tf.SparseTensor(*support), dtype=tf.float32)]
FdGars_main(support, features, label,
[train_mask, val_mask, test_mask], args)
| 36.917431
| 79
| 0.671223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,194
| 0.29672
|
c0014ae28f168834414c0db2df301fe99732fb5d
| 849
|
py
|
Python
|
problem3a.py
|
mvignoul/phys218_example
|
de40ca54ecaa493c171a7f032bcc2c50ad929a64
|
[
"MIT"
] | null | null | null |
problem3a.py
|
mvignoul/phys218_example
|
de40ca54ecaa493c171a7f032bcc2c50ad929a64
|
[
"MIT"
] | null | null | null |
problem3a.py
|
mvignoul/phys218_example
|
de40ca54ecaa493c171a7f032bcc2c50ad929a64
|
[
"MIT"
] | null | null | null |
""" find the Schwarzschild radius of the Sun in m using pint"""
import pint
class Sun:
""" Class to describe a star based on its mass in terms of solar masses """
def __init__(self, mass):
self.ureg = pint.UnitRegistry()
self.ureg.define("Msolar = 1.98855*10**30 * kilogram")
self.mass = mass * self.ureg.Msolar
def schwarz(self):
""" Find the Schwarzchild radius for the class """
g_newt = self.ureg.newtonian_constant_of_gravitation
msun = self.mass
r_sch = 2 * g_newt * msun / self.ureg.speed_of_light**2
return r_sch.to_base_units()
def schwarz_rad(mass):
""" Given a mass, find the Schwarzschild radius """
star = Sun(mass)
radius = star.schwarz()
return radius
if __name__ == "__main__":
MASS = 1.0
RAD = schwarz_rad(MASS)
print(RAD)
| 29.275862
| 79
| 0.63722
| 537
| 0.632509
| 0
| 0
| 0
| 0
| 0
| 0
| 285
| 0.335689
|
c00233a09c18a5f027b1634d9d3dd63a23d04cbb
| 1,009
|
py
|
Python
|
morpfw/authn/pas/user/rulesprovider.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 8
|
2018-12-08T01:41:58.000Z
|
2020-12-21T15:30:12.000Z
|
morpfw/authn/pas/user/rulesprovider.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 17
|
2019-02-05T15:01:32.000Z
|
2020-04-28T16:17:42.000Z
|
morpfw/authn/pas/user/rulesprovider.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 2
|
2018-12-08T05:03:37.000Z
|
2019-03-20T07:15:21.000Z
|
from ....crud.rulesprovider.base import RulesProvider
from .. import exc
from ..app import App
from ..utils import has_role
from .model import UserCollection, UserModel
class UserRulesProvider(RulesProvider):
context: UserModel
def change_password(self, password: str, new_password: str, secure: bool = True):
context = self.context
if secure and not has_role(self.request, "administrator"):
if not context.validate(password, check_state=False):
raise exc.InvalidPasswordError(context.userid)
context.storage.change_password(context, context.identity.userid, new_password)
def validate(self, password: str, check_state=True) -> bool:
context = self.context
if check_state and context.data["state"] != "active":
return False
return context.storage.validate(context, context.userid, password)
@App.rulesprovider(model=UserModel)
def get_user_rulesprovider(context):
return UserRulesProvider(context)
| 34.793103
| 87
| 0.718533
| 724
| 0.717542
| 0
| 0
| 110
| 0.109019
| 0
| 0
| 30
| 0.029732
|
c003ee8ec790d27dd3cd5b33bab3613edcd51ffa
| 1,523
|
py
|
Python
|
dsa_extras/library/codec_code/huffman.py
|
palette-swapped-serra/dsa-extras
|
99544453da510b886b2a4c47cf2eceabee329cd2
|
[
"Unlicense"
] | 1
|
2020-08-24T00:26:08.000Z
|
2020-08-24T00:26:08.000Z
|
dsa_extras/library/codec_code/huffman.py
|
palette-swapped-serra/dsa-extras
|
99544453da510b886b2a4c47cf2eceabee329cd2
|
[
"Unlicense"
] | null | null | null |
dsa_extras/library/codec_code/huffman.py
|
palette-swapped-serra/dsa-extras
|
99544453da510b886b2a4c47cf2eceabee329cd2
|
[
"Unlicense"
] | null | null | null |
from dsa.parsing.line_parsing import line_parser
from dsa.parsing.token_parsing import make_parser
_parser = line_parser(
'Huffman table entry',
make_parser(
'Huffman table entry data',
('integer', 'encoded bit sequence'),
('hexdump', 'decoded bytes')
)
)
class HuffmanTable:
def __init__(self, decode, encode):
self._decode = decode
self._encode = encode
def _decode_gen(self, stream):
read_byte = stream.read(1)[0]
bit_offset = 0
value = 1
while True:
if value in self._decode:
encoded = self._decode[value]
yield encoded
if encoded[-1] == 0:
return
value = 1 # clear composed value
# append a bit to the composed value
value = (value << 1) | ((read_byte >> bit_offset) & 1)
bit_offset += 1
if bit_offset == 8:
bit_offset = 0
read_byte = stream.read(1)[0]
def decode(self, stream):
return b''.join(self._decode_gen(stream))
class Loader:
def __init__(self):
self._decode = {}
self._encode = {}
def line(self, tokens):
compressed, uncompressed = _parser(tokens)[0]
self._decode[compressed] = uncompressed
self._encode[uncompressed] = compressed
def result(self):
return HuffmanTable(self._decode, self._encode)
| 26.258621
| 67
| 0.54432
| 1,205
| 0.791202
| 625
| 0.410374
| 0
| 0
| 0
| 0
| 165
| 0.108339
|
c005b37d484c98345b8c20c5521de817a98327d9
| 891
|
py
|
Python
|
src/fake_news_detector/core/data_process/exploration.py
|
elena20ruiz/FNC
|
163cabc245a645e4b39fc328e988cfc765082a22
|
[
"Apache-2.0"
] | 4
|
2018-10-04T13:10:40.000Z
|
2020-10-02T09:12:50.000Z
|
src/fake_news_detector/core/data_process/exploration.py
|
elena20ruiz/FNC
|
163cabc245a645e4b39fc328e988cfc765082a22
|
[
"Apache-2.0"
] | 15
|
2018-12-14T20:08:28.000Z
|
2019-01-07T22:52:43.000Z
|
src/fake_news_detector/core/data_process/exploration.py
|
FNClassificator/FNC
|
163cabc245a645e4b39fc328e988cfc765082a22
|
[
"Apache-2.0"
] | null | null | null |
def split_in_three(data_real, data_fake):
min_v = min(data_fake.min(), data_real.min())
max_v = max(data_fake.max(), data_real.max())
tercio = (max_v - min_v) / 3
# Calculate 1/3
th_one = min_v + tercio
# Calculate 2/3
th_two = max_v - tercio
first_f, second_f, third_f = split_data(th_one, th_two, data_fake)
first_r, second_r, third_r = split_data(th_one, th_two, data_real)
total_f = len(data_fake)
fake = [first_f/total_f, second_f/total_f, third_f/total_f]
total_r = len(data_real)
real = [first_r/total_r, second_r/total_r, third_r/total_r]
return fake, real
def split_data(th_one, th_two, data):
first = 0
second = 0
third = 0
for i in data:
if i <= th_one:
third += 1
elif i >= th_two:
first += 1
else:
second +=1
return first, second, third
| 28.741935
| 70
| 0.613917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.03367
|
c006629acd80aec892cca41444a68366c027a55b
| 6,486
|
py
|
Python
|
evap/evaluation/tests/test_auth.py
|
Sohn123/EvaP
|
8b0ba8365cb673ef59829cf8db5ab829472a9c58
|
[
"MIT"
] | null | null | null |
evap/evaluation/tests/test_auth.py
|
Sohn123/EvaP
|
8b0ba8365cb673ef59829cf8db5ab829472a9c58
|
[
"MIT"
] | null | null | null |
evap/evaluation/tests/test_auth.py
|
Sohn123/EvaP
|
8b0ba8365cb673ef59829cf8db5ab829472a9c58
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
import urllib
from django.urls import reverse
from django.core import mail
from django.conf import settings
from django.test import override_settings
from model_bakery import baker
from evap.evaluation import auth
from evap.evaluation.models import Contribution, Evaluation, UserProfile
from evap.evaluation.tests.tools import WebTest
class LoginTests(WebTest):
csrf_checks = False
@classmethod
def setUpTestData(cls):
cls.external_user = baker.make(UserProfile, email="extern@extern.com")
cls.external_user.ensure_valid_login_key()
cls.inactive_external_user = baker.make(UserProfile, email="inactive@extern.com", is_active=False)
cls.inactive_external_user.ensure_valid_login_key()
evaluation = baker.make(Evaluation, state='published')
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.external_user,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.inactive_external_user,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
@override_settings(PAGE_URL='https://example.com')
def test_login_url_generation(self):
generated_url = self.external_user.login_url
self.assertEqual(generated_url, 'https://example.com/key/{}'.format(self.external_user.login_key))
reversed_url = reverse('evaluation:login_key_authentication', args=[self.external_user.login_key])
self.assertEqual(reversed_url, '/key/{}'.format(self.external_user.login_key))
def test_login_url_works(self):
self.assertRedirects(self.app.get(reverse("contributor:index")), "/?next=/contributor/")
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
old_login_key = self.external_user.login_key
old_login_key_valid_until = self.external_user.login_key_valid_until
page = self.app.get(url_with_key)
self.external_user.refresh_from_db()
self.assertEqual(old_login_key, self.external_user.login_key)
self.assertEqual(old_login_key_valid_until, self.external_user.login_key_valid_until)
self.assertContains(page, 'Login')
self.assertContains(page, self.external_user.full_name)
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, 'Logout')
self.assertContains(page, self.external_user.full_name)
def test_login_key_valid_only_once(self):
page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.external_user.login_key]))
self.assertContains(page, self.external_user.full_name)
url_with_key = reverse("evaluation:login_key_authentication", args=[self.external_user.login_key])
page = self.app.post(url_with_key).follow().follow()
self.assertContains(page, 'Logout')
page = self.app.get(reverse("django-auth-logout")).follow()
self.assertNotContains(page, 'Logout')
page = self.app.get(url_with_key).follow()
self.assertContains(page, 'The login URL is not valid anymore.')
self.assertEqual(len(mail.outbox), 1) # a new login key was sent
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
page = self.app.post(reverse("evaluation:login_key_authentication", args=[new_key])).follow().follow()
self.assertContains(page, self.external_user.full_name)
def test_inactive_external_users_can_not_login(self):
page = self.app.get(reverse("evaluation:login_key_authentication", args=[self.inactive_external_user.login_key])).follow()
self.assertContains(page, "Inactive users are not allowed to login")
self.assertNotContains(page, "Logout")
def test_login_key_resend_if_still_valid(self):
old_key = self.external_user.login_key
page = self.app.post("/", params={"submit_type": "new_key", "email": self.external_user.email}).follow()
new_key = UserProfile.objects.get(id=self.external_user.id).login_key
self.assertEqual(old_key, new_key)
self.assertEqual(len(mail.outbox), 1) # a login key was sent
self.assertContains(page, "We sent you an email with a one-time login URL. Please check your inbox.")
@override_settings(
OIDC_OP_AUTHORIZATION_ENDPOINT='https://oidc.example.com/auth',
ACTIVATE_OPEN_ID_LOGIN=True,
)
def test_oidc_login(self):
# This should send them to /oidc/authenticate
page = self.app.get("/").click("Login")
# which should then redirect them to OIDC_OP_AUTHORIZTATION_ENDPOINT
location = page.headers['location']
self.assertIn(settings.OIDC_OP_AUTHORIZATION_ENDPOINT, location)
parse_result = urllib.parse.urlparse(location)
parsed_query = urllib.parse.parse_qs(parse_result.query)
self.assertIn("email", parsed_query["scope"][0].split(" "))
self.assertIn("/oidc/callback/", parsed_query["redirect_uri"][0])
state = parsed_query["state"][0]
user = baker.make(UserProfile)
# usually, the browser would now open that page and login. Then, they'd be redirected to /oidc/callback
with patch.object(auth.OIDCAuthenticationBackend, 'authenticate', return_value=user, __name__='authenticate'):
page = self.app.get(f"/oidc/callback/?code=secret-code&state={state}")
# The oidc module will now send a request to the oidc provider, asking whether the code is valid.
# We've mocked the method that does that and will just return a UserProfile.
# Thus, at this point, the user should be logged in and be redirected back to the start page.
location = page.headers['location']
parse_result = urllib.parse.urlparse(location)
self.assertEqual(parse_result.path, "/")
page = self.app.get(location)
# A GET here should then redirect to the users real start page.
# This should be a 403 since the user is external and has no course participation
page = page.follow(expect_errors=True)
# user should see the Logout button then.
self.assertIn('Logout', page.body.decode())
| 47
| 130
| 0.707216
| 6,113
| 0.942492
| 0
| 0
| 3,310
| 0.51033
| 0
| 0
| 1,534
| 0.236509
|
c006bcd2ec1c5a47b7a93378891b836502179c96
| 1,827
|
py
|
Python
|
gwd/converters/spike2kaggle.py
|
kazakh-shai/kaggle-global-wheat-detection
|
b26295ea257f73089f1a067b70b4a7ee638f6b83
|
[
"Apache-2.0"
] | 136
|
2020-08-24T08:18:16.000Z
|
2022-03-31T13:45:12.000Z
|
gwd/converters/spike2kaggle.py
|
kazakh-shai/kaggle-global-wheat-detection
|
b26295ea257f73089f1a067b70b4a7ee638f6b83
|
[
"Apache-2.0"
] | 5
|
2020-10-07T08:44:36.000Z
|
2021-12-17T06:00:57.000Z
|
gwd/converters/spike2kaggle.py
|
kazakh-shai/kaggle-global-wheat-detection
|
b26295ea257f73089f1a067b70b4a7ee638f6b83
|
[
"Apache-2.0"
] | 28
|
2020-08-24T11:07:07.000Z
|
2022-01-01T13:07:54.000Z
|
import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image-pattern", default="/data/SPIKE_images/*jpg")
parser.add_argument("--annotation-root", default="/data/SPIKE_annotations")
parser.add_argument("--kaggle_output_path", default="/data/spike.csv")
parser.add_argument("--coco_output_path", default="/data/coco_spike.json")
return parser.parse_args()
def main():
args = parse_args()
img_paths = glob(args.image_pattern)
annotations = []
for img_path in tqdm(img_paths):
ann_path = osp.join(args.annotation_root, (osp.basename(img_path.replace("jpg", "bboxes.tsv"))))
ann = pd.read_csv(ann_path, sep="\t", names=["x_min", "y_min", "x_max", "y_max"])
h, w = cv2.imread(img_path).shape[:2]
ann[["x_min", "x_max"]] = ann[["x_min", "x_max"]].clip(0, w)
ann[["y_min", "y_max"]] = ann[["y_min", "y_max"]].clip(0, h)
ann["height"] = h
ann["width"] = w
ann["bbox_width"] = ann["x_max"] - ann["x_min"]
ann["bbox_height"] = ann["y_max"] - ann["y_min"]
ann = ann[(ann["bbox_width"] > 0) & (ann["bbox_height"] > 0)].copy()
ann["bbox"] = ann[["x_min", "y_min", "bbox_width", "bbox_height"]].values.tolist()
ann["image_id"] = osp.basename(img_path).split(".")[0]
annotations.append(ann)
annotations = pd.concat(annotations)
annotations["source"] = "spike"
print(annotations.head())
annotations[["image_id", "source", "width", "height", "bbox"]].to_csv(args.kaggle_output_path, index=False)
kaggle2coco.main(args.kaggle_output_path, args.coco_output_path)
if __name__ == "__main__":
main()
| 38.0625
| 111
| 0.642036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 488
| 0.267105
|
c008713b35128a47b245f9ad063e4cc7dcc2e046
| 5,090
|
py
|
Python
|
shuttl/tests/test_views/test_organization.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | 2
|
2017-06-26T18:06:58.000Z
|
2017-10-11T21:45:29.000Z
|
shuttl/tests/test_views/test_organization.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | null | null | null |
shuttl/tests/test_views/test_organization.py
|
shuttl-io/shuttl-cms
|
50c85db0de42e901c371561270be6425cc65eccc
|
[
"MIT"
] | null | null | null |
import json
from shuttl import app
from shuttl.tests import testbase
from shuttl.Models.Reseller import Reseller
from shuttl.Models.organization import Organization, OrganizationDoesNotExistException
class OrganizationViewTest(testbase.BaseTest):
def _setUp(self):
pass
def test_index(self):
# rv = self.app.get('/')
# assert 'Shuttl' in rv.data.decode('utf-8')
pass
def test_login(self):
rv = self.login('test')
pass
def login(self, organization):
return self.app.post('/login', data=dict(
organization=organization
), follow_redirects=True)
def test_creation(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
self.assertEqual(results.status_code, 201)
results2 = self.app.post("/organization/", data = dict(name="testOrg"))
self.assertEqual(results2.status_code, 409)
results = json.loads(results.data.decode())
expected = {
'reseller': {
'directory': '',
'name': 'shuttl',
'_url': 'shuttl.com',
'subdir': '',
'id': 1,
'admins': [],
'organizations': [],
'_price': 10.0
},
'id': 1,
'name': 'testOrg',
'websites': [],
'users': []
}
self.assertEqual(len(Organization.query.all()), 1)
self.assertEqual(len(list(self.reseller.organizations.all())), 1)
self.assertEqual(results, expected)
pass
def test_getAll(self):
self.app.post("/organization/", data = dict(name="testOrg"))
self.app.post("/organization/", data = dict(name="testOrg2"))
self.app.post("/organization/", data = dict(name="testOrg3"))
expected = [
Organization.query.filter(Organization.name == "testOrg").first().serialize(),
Organization.query.filter(Organization.name == "testOrg2").first().serialize(),
Organization.query.filter(Organization.name == "testOrg3").first().serialize()
]
results = self.app.get("/organization/")
results = json.loads(results.data.decode())
self.assertEqual(len(results), 3)
self.assertEqual(expected, results)
pass
def test_get(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
results_dict = json.loads(results.data.decode())
id = results_dict["id"]
results = self.app.get("/organization/{0}".format(id))
self.assertEqual(results.status_code, 200)
results = json.loads(results.data.decode())
self.assertEqual(results_dict, results)
results = self.app.get("/organization/1234")
self.assertEqual(results.status_code, 404)
pass
def test_patch(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
reseller = Reseller.Create(name="test3", _url="shuttl2.com")
results_dict = json.loads(results.data.decode())
results = self.app.patch("/organization/{0}".format(results_dict["id"]), data=dict(name="testOrg4"))
self.assertEqual(results.status_code, 200)
self.assertRaises(OrganizationDoesNotExistException, Organization.Get, name="testOrg", vendor=self.reseller)
results = json.loads(results.data.decode())
self.assertEqual(results["name"], "testOrg4")
org = Organization.Get(name="testOrg4", vendor=self.reseller)
self.assertEqual(org.serialize(), results)
results = self.app.patch("/organization/{0}".format(results_dict["id"]), data=dict(vendor=reseller.id))
self.assertRaises(OrganizationDoesNotExistException, Organization.Get, name="testOrg4", vendor=self.reseller)
results = json.loads(results.data.decode())
org = Organization.Get(name="testOrg4", vendor=reseller)
self.assertEqual(org.serialize(), results)
self.assertEqual(len(list(self.reseller.organizations.all())), 0)
self.assertEqual(len(list(reseller.organizations.all())), 1)
results = self.app.patch("/organization/1234", data=dict(vendor=reseller.id))
self.assertEqual(results.status_code, 404)
results = self.app.patch("/organization/", data=dict(vendor=reseller.id))
self.assertEqual(results.status_code, 405)
pass
def test_delete(self):
results = self.app.post("/organization/", data = dict(name="testOrg"))
results2 = self.app.post("/organization/", data = dict(name="testOrg2"))
results = json.loads(results.data.decode())
res3 = self.app.delete("/organization/{0}".format(results["id"]))
self.assertEqual(res3.status_code, 200)
self.assertEqual(len(list(self.reseller.organizations.all())), 1)
res3 = self.app.delete("/organization/")
self.assertEqual(res3.status_code, 405)
res3 = self.app.delete("/organization/1234")
self.assertEqual(res3.status_code, 404)
pass
| 40.72
| 117
| 0.621022
| 4,881
| 0.958939
| 0
| 0
| 0
| 0
| 0
| 0
| 760
| 0.149312
|
c00b0d921904cae0f3219c2a2df1410ec3c0ae18
| 3,477
|
py
|
Python
|
emissary/controllers/load.py
|
LukeB42/Emissary
|
31629a8baedc91a9b60c551a01b2b45372b9a8c7
|
[
"MIT"
] | 193
|
2015-06-20T23:46:05.000Z
|
2021-02-16T14:04:29.000Z
|
emissary/controllers/load.py
|
LukeB42/Emissary
|
31629a8baedc91a9b60c551a01b2b45372b9a8c7
|
[
"MIT"
] | 4
|
2015-08-23T15:25:55.000Z
|
2016-01-06T11:29:20.000Z
|
emissary/controllers/load.py
|
LukeB42/Emissary
|
31629a8baedc91a9b60c551a01b2b45372b9a8c7
|
[
"MIT"
] | 21
|
2015-07-05T12:20:06.000Z
|
2019-07-12T08:07:46.000Z
|
# This file contains functions designed for
# loading cron tables and storing new feeds.
from emissary import db
from sqlalchemy import and_
from emissary.controllers.utils import spaceparse
from emissary.controllers.cron import parse_timings
from emissary.models import APIKey, Feed, FeedGroup
def create_feed(log, db, key, group, feed):
"""
Takes a key object, a group name and a dictionary
describing a feed ({name:,url:,schedule:,active:})
and reliably attaches a newly created feed to the key
and group.
"""
if not type(feed) == dict:
log('Unexpected type when creating feed for API key "%s"' % key.name)
return
for i in ['name', 'schedule', 'active', 'url']:
if not i in feed.keys():
log('%s: Error creating feed. Missing "%s" field from feed definition.' % (key.name, i))
return
f = Feed.query.filter(and_(Feed.key == key, Feed.name == feed['name'])).first()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == group)).first()
if f:
if f.group:
log('%s: Error creating feed "%s" in group "%s", feed already exists in group "%s".' % \
(key.name, feed['name'], group, f.group.name))
return
elif fg:
log('%s: %s: Adding feed "%s"' % (key.name, fg.name, f.name))
fg.append(f)
db.session.add(fg)
db.session.add(f)
db.session.commit()
return
if not fg:
log('%s: Creating feed group %s.' % (key.name, group))
fg = FeedGroup(name=group)
key.feedgroups.append(fg)
try:
parse_timings(feed['schedule'])
except Exception, e:
log('%s: %s: Error creating "%s": %s' % \
(key.name, fg.name, feed['name'], e.message))
log('%s: %s: Creating feed "%s"' % (key.name, fg.name, feed['name']))
f = Feed(
name=feed['name'],
url=feed['url'],
active=feed['active'],
schedule=feed['schedule']
)
fg.feeds.append(f)
key.feeds.append(f)
db.session.add(key)
db.session.add(fg)
db.session.add(f)
db.session.commit()
def parse_crontab(filename):
"""
Get a file descriptor on filename and
create feeds and groups for API keys therein.
"""
def log(message):
print message
# read filename into a string named crontab
try:
fd = open(filename, "r")
except OSError:
print "Error opening %s" % filename
raise SystemExit
crontab = fd.read()
fd.close()
# keep a resident api key on hand
key = None
for i, line in enumerate(crontab.split('\n')):
# Set the APIKey we're working with when we find a line starting
# with apikey:
if line.startswith("apikey:"):
if ' ' in line:
key_str = line.split()[1]
key = APIKey.query.filter(APIKey.key == key_str).first()
if not key:
print 'Malformed or unknown API key at line %i in %s: %s' % (i+1, filename, line)
raise SystemExit
else:
print 'Using API key "%s".' % key.name
if line.startswith("http"):
feed = {'active': True}
# Grab the URL and set the string to the remainder
feed['url'] = line.split().pop(0)
line = ' '.join(line.split()[1:])
# Grab names and groups
names = spaceparse(line)
if not names:
print "Error parsing feed or group name at line %i in %s: %s" % (i+1, filename, line)
continue
feed['name'], group = names[:2]
# The schedule should be the last five items
schedule = line.split()[-5:]
try:
parse_timings(schedule)
except Exception, e:
print "Error parsing schedule at line %i in %s: %s" % (i+1, filename, e.message)
continue
feed['schedule'] = ' '.join(schedule)
create_feed(log, db, key, group, feed)
| 27.816
| 91
| 0.6543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,288
| 0.370434
|
c00f497cdfab4a0df082b81815ffb6293fc4eaf2
| 7,661
|
py
|
Python
|
vodgen/main.py
|
Oveof/Vodgen
|
3e4b9a715f385b76dc34d82ac188b6d3db170957
|
[
"MIT"
] | null | null | null |
vodgen/main.py
|
Oveof/Vodgen
|
3e4b9a715f385b76dc34d82ac188b6d3db170957
|
[
"MIT"
] | 10
|
2021-11-30T22:01:30.000Z
|
2022-03-18T14:50:08.000Z
|
vodgen/main.py
|
Oveof/Vodgen
|
3e4b9a715f385b76dc34d82ac188b6d3db170957
|
[
"MIT"
] | null | null | null |
"""Vodgen app"""
from msilib.schema import Directory
import sys
import json
import re
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox,
QFileDialog, QLabel, QLineEdit, QMainWindow, QPlainTextEdit, QPushButton, QVBoxLayout, QWidget)
from videocutter import create_video
from thumbnail import Thumbnail, Player, Config, ImageInfo, MatchInfo
import sys
from os.path import exists
#sys.stdout = open("vodgen.log", "w")
import logging
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.warning, filename="./vodgen.log")
if not exists("./characterinfo.json"):
logging.error("characterinfo.json could not be found!")
exit()
if not exists("./config.json"):
logging.error("config.json could not be found!")
exit()
class Error(Exception):
pass
class InvalidRoundName(Error):
pass
class MissingPlayer1Character(Error):
pass
class MissingPlayer2Character(Error):
pass
def formatTitle(title):
game_info = title.split(": ")[1].split(" - ")[0]
tournament_round = ' '.join(game_info.split(' ')[-2:])
#gameRound = gameInfo.split(' ', 2)
game_name = game_info.split(' ')[0]
if "Winners" in game_info:
game_name = game_info.split(' Winners')[0]
elif "Losers" in game_info:
game_name = game_info.split(' Losers')[0]
elif "Grand Finals" in game_info:
game_name = game_info.split(' Grand')[0]
else:
raise InvalidRoundName()
player_info = title.split("-")[1]
team1 = player_info.split("vs")[0].strip()
team1_players = team1.split("(")[0].split(" + ")
team1_characters_search = re.search(r"\(([A-Za-z0-9_, .é+]+)\)", team1)
if team1_characters_search == None:
raise MissingPlayer1Character()
team1_characters = team1_characters_search.group(1).split(", ")[0].split(" + ")
team2 = player_info.split("vs")[1].strip()
team2_players = team2.split("(")[0].split(" + ")
team2_characters_search = re.search(r"\(([A-Za-z0-9_, .é+]+)\)", team2)
if team2_characters_search == None:
raise MissingPlayer2Character
team2_characters = team2_characters_search.group(1).split(", ")[0].split(" + ")
player_names = team1_players + team2_players
player_characters = team1_characters + team2_characters
player_list = []
for x in range(len(player_names)):
if len(player_names) / 2 > x:
team_num = 0
else:
team_num = 1
player_list.append(Player(player_names[x], player_characters[x], team_num, x+1))
return player_list, tournament_round, game_name
class MainWindow(QMainWindow):
"""Main UI window"""
def __init__(self):
super().__init__()
self.setWindowTitle("Vodgen")
layout = QVBoxLayout()
self.choose_stream = QPushButton("Choose stream file")
self.choose_stream.clicked.connect(self.choose_video_file)
self.choose_region = QComboBox()
self.choose_game = QComboBox()
self.choose_banner = QPushButton("Choose Banner")
self.choose_banner.clicked.connect(self.choose_banner_file)
#Adds regions form config to dropdown menu
with open('config.json', encoding="utf-8") as file:
config = json.load(file)
for attribute, _ in config["tournament"].items():
self.choose_region.addItem(attribute)
for attribute, _ in config["game"].items():
self.choose_game.addItem(attribute)
self.only_thumbnails = QCheckBox("Only thumbnails")
self.create_videos_button = QPushButton("Generate VoDs and thumbnails")
self.create_videos_button.clicked.connect(self.create_all)
self.textbox = QPlainTextEdit()
self.textbox.resize(280,40)
self.choose_codec = QComboBox()
self.choose_codec.addItem("")
self.choose_codec.addItem("h264_nvenc")
self.choose_codec.addItem("AMF")
self.choose_stream_label = QLabel("")
self.choose_banner_label = QLabel("")
layout.addWidget(self.choose_region)
layout.addWidget(self.choose_game)
layout.addWidget(self.choose_stream)
layout.addWidget(self.choose_stream_label)
layout.addWidget(self.choose_banner)
layout.addWidget(self.choose_banner_label)
layout.addWidget(self.textbox)
layout.addWidget(self.only_thumbnails)
layout.addWidget(self.choose_codec)
layout.addWidget(self.create_videos_button)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
def choose_video_file(self):
"""Choose file helper method"""
self.video_path = QFileDialog.getOpenFileName(self, "Select File", filter="MP4 (*.mp4)")
self.choose_stream_label.setText(self.video_path[0])
def choose_banner_file(self):
"""Choose file helper method"""
self.banner_path = QFileDialog.getOpenFileName(self, "Select File", filter="PNG (*.png)")
self.choose_banner_label.setText(self.banner_path[0])
def choose_dir(self):
"""Choose directory helper method"""
return QFileDialog.getExistingDirectory(self, "Select Directory")
def create_all(self):
if self.textbox.toPlainText() == "":
logging.warning("Input is empty")
return
user_input = self.textbox.toPlainText().split("\n")
for line in user_input:
logging.info(f"Started work on line: {line}")
try:
title = line.split(" ", 2)[2]
start_time = line.split(" ")[0]
end_time = line.split(" ")[1]
except IndexError:
logging.warning(f"Invalid line: {line}")
return 0
try:
player_list, tournament_round, game_name, = formatTitle(title)
except InvalidRoundName:
logging.warning("Invalid tournament round name on line: " + line )
return 0
except MissingPlayer1Character:
logging.warning("Missing player 1 character name on line: " + line)
return 0
except MissingPlayer2Character:
logging.warning("Missing player 2 character name on line: " + line)
return 0
match = MatchInfo(str(self.choose_game.currentText()), tournament_round)
image_info = ImageInfo()
config = Config(str(self.choose_game.currentText()), str(self.choose_region.currentText()))
windows_title = title.replace("|", "¤")
windows_title = windows_title.replace(":", "#")
new_thumbnail = Thumbnail(player_list, match, image_info, config, windows_title)
new_thumbnail.create_thumbnail(self.banner_path[0])
logging.info(f"Thumbnail created for line: {line}")
results_directory = ""
with open('config.json', encoding="utf-8") as file:
config = json.load(file)
results_directory = config["tournament"][str(self.choose_region.currentText())]["output_dir"]
if not exists(results_directory):
logging.warning("Output directory could not be found in filesystem")
logging.info("Creating output directory...")
os.mkdir(results_directory)
create_video(self.video_path[0], start_time, end_time, f"{results_directory}/" + windows_title + ".mp4", self.choose_region.currentText())
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec()
#sys.stdout.close()
| 38.497487
| 150
| 0.638951
| 5,109
| 0.666623
| 0
| 0
| 0
| 0
| 0
| 0
| 1,250
| 0.1631
|
c01048af256422693f245a8c170084866f81cf42
| 1,733
|
py
|
Python
|
bin/plpproject.py
|
stefanct/pulp-tools
|
63a05d59908534ad01133d0111e181fa69d00ce3
|
[
"Apache-2.0"
] | 2
|
2018-02-09T08:12:34.000Z
|
2020-06-16T17:45:33.000Z
|
bin/plpproject.py
|
stefanct/pulp-tools
|
63a05d59908534ad01133d0111e181fa69d00ce3
|
[
"Apache-2.0"
] | 2
|
2018-02-09T07:54:32.000Z
|
2018-03-09T08:51:31.000Z
|
bin/plpproject.py
|
stefanct/pulp-tools
|
63a05d59908534ad01133d0111e181fa69d00ce3
|
[
"Apache-2.0"
] | 6
|
2018-03-08T11:12:22.000Z
|
2019-12-05T12:36:47.000Z
|
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import plptools as plp
class PkgDep(plp.PkgDep):
def __init__(self, *kargs, **kwargs):
super(PkgDep, self).__init__(*kargs, **kwargs)
class Package(plp.Package):
def __init__(self, *kargs, **kwargs):
super(Package, self).__init__(*kargs, **kwargs)
class ArtifactoryServer(object):
def __init__(self, name, url, ssl_verify=True):
self.name = name
self.url = url
self.ssl_verify = ssl_verify
class Module(plp.Module):
def __init__(self, *kargs, **kwargs):
super(Module, self).__init__(*kargs, **kwargs)
class BuildStep(object):
def __init__(self, name, command):
self.name = name
self.command = command
class Group(plp.Group):
def __init__(self, *kargs, **kwargs):
super(Group, self).__init__(*kargs, **kwargs)
class BuildStepMap(object):
def __init__(self, name, stepList):
self.name = name
self.stepList = stepList
class BuildSteps(object):
def __init__(self, stepList):
self.stepList = stepList
self.steps = {}
for step in stepList:
self.steps[step.name] = step
def get(self, name): return self.steps.get(name).stepList
| 23.739726
| 74
| 0.705713
| 1,078
| 0.622043
| 0
| 0
| 0
| 0
| 0
| 0
| 593
| 0.342181
|
c011388324be1ffe30d4e8f33cf11884af58be55
| 300
|
py
|
Python
|
leetcode/268_missing_number/268_missing_number.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
leetcode/268_missing_number/268_missing_number.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
leetcode/268_missing_number/268_missing_number.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
full_length = len(nums) + 1
for num in range(full_length):
if num not in nums_set:
return num
| 25
| 38
| 0.503333
| 300
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.216667
|
c0115ad71776b57663adb5064185c84d654f136a
| 5,177
|
py
|
Python
|
tests/shell/test_console.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 65
|
2015-04-21T10:41:18.000Z
|
2022-01-02T16:25:40.000Z
|
tests/shell/test_console.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 85
|
2015-01-20T14:23:52.000Z
|
2022-02-19T17:08:46.000Z
|
tests/shell/test_console.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 32
|
2015-03-13T07:43:05.000Z
|
2020-04-24T07:56:53.000Z
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the shell console
:author: Thomas Calmant
"""
# Pelix
from pelix.utilities import to_str, to_bytes
# Standard library
import random
import string
import sys
import threading
import time
# Tests
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
try:
import subprocess
except ImportError:
# Can't run the test if we can't start another process
pass
else:
class ShellStandaloneTest(unittest.TestCase):
"""
Tests the console shell when started as a script
"""
@staticmethod
def random_str():
"""
Generates a random string
:return: A random string
"""
data = list(string.ascii_letters)
random.shuffle(data)
return ''.join(data)
def test_echo(self):
"""
Tests the console shell 'echo' method
"""
# Get shell PS1 (static method)
import pelix.shell.core
ps1 = pelix.shell.core._ShellService.get_ps1()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Avoid being blocked...
timer = threading.Timer(5, process.terminate)
timer.start()
# Wait for prompt
got = ""
while ps1 not in got:
char = to_str(process.stdout.read(1))
if not char:
if sys.version_info[0] == 2:
self.skipTest("Shell console test doesn't work on "
"Python 2.7 with Travis")
else:
if process.poll():
output = to_str(process.stdout.read())
else:
output = "<no output>"
self.fail("Can't read from stdout (rc={})\n{}"
.format(process.returncode, output))
else:
got += char
# We should be good
timer.cancel()
try:
# Try echoing
data = self.random_str()
# Write command
process.stdin.write(to_bytes("echo {}\n".format(data)))
process.stdin.flush()
# Read result
last_line = to_str(process.stdout.readline()).rstrip()
self.assertEqual(last_line, data, "Wrong output")
# Stop the process
process.stdin.write(to_bytes("exit\n"))
process.stdin.flush()
# Wait for the process to stop (1 second max)
delta = 0
start = time.time()
while delta <= 1:
delta = time.time() - start
if process.poll() is not None:
break
time.sleep(.1)
else:
self.fail("Process took too long to stop")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
def test_properties(self):
"""
Tests the console shell properties parameter
"""
# Prepare some properties
key1 = self.random_str()[:5]
key2 = self.random_str()[:5]
val1 = self.random_str()
val2 = self.random_str()
# Start the shell process
process = subprocess.Popen(
[sys.executable, '-m', 'pelix.shell',
'-D', '{}={}'.format(key1, val1), '{}={}'.format(key2, val2)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
# List properties, stop and get output
output = to_str(process.communicate(to_bytes("properties"))[0])
found = 0
for line in output.splitlines(False):
if key1 in line:
self.assertIn(val1, line)
found += 1
elif key2 in line:
self.assertIn(val2, line)
found += 1
self.assertEqual(found, 2, "Wrong number of properties")
finally:
try:
# Kill it in any case
process.terminate()
except OSError:
# Process was already stopped
pass
| 30.633136
| 80
| 0.459533
| 4,388
| 0.847595
| 0
| 0
| 259
| 0.050029
| 0
| 0
| 1,398
| 0.270041
|
c01227c807be8c1f87a3e23c71237c6860b77b30
| 708
|
py
|
Python
|
src/util/utils.py
|
5agado/intro-ai
|
dfb7cd636ad8f8ac2d88053f9d3f279730b8608a
|
[
"Apache-2.0"
] | 3
|
2015-11-07T14:45:20.000Z
|
2018-01-27T13:06:25.000Z
|
src/util/utils.py
|
5agado/intro-ai
|
dfb7cd636ad8f8ac2d88053f9d3f279730b8608a
|
[
"Apache-2.0"
] | null | null | null |
src/util/utils.py
|
5agado/intro-ai
|
dfb7cd636ad8f8ac2d88053f9d3f279730b8608a
|
[
"Apache-2.0"
] | null | null | null |
import os
import math
def dotProduct(v1, v2):
return sum(x * y for x, y in zip(v1, v2))
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
def getResourcesPath():
return os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'resources'))
def readTrainModel(filePath, numOutputs = 1):
f = open(filePath, 'r')
res = []
for line in f:
sLine = list(map(float, line.strip().split(" ")))
res.append(((sLine[:-numOutputs]), sLine[-numOutputs:]))
return res
def readMatrix(filePath):
f = open(filePath, 'r')
res = []
for line in f:
res.append(list(map(float, line.strip().split(" "))))
return res
| 27.230769
| 94
| 0.577684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.032486
|
c0126570af4c13122b92f578f7e7cd7fb226531b
| 3,530
|
py
|
Python
|
scripts/models/xgboost/test-xgboost_tuning3.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
scripts/models/xgboost/test-xgboost_tuning3.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
scripts/models/xgboost/test-xgboost_tuning3.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Juan Quintana
# @Date: 2018-09-26 10:01:02
# @Last Modified by: Juan Quintana
# @Last Modified time: 2018-09-26 16:04:24
"""
XGBOOST Regressor with Bayesian tuning: OPTION 3
In this case it will be used hyperopt-sklearn and his native algorithm
"xgboost_regression".
NOTE: scikit-learn tools is not working for this estimator.
Reference: https://github.com/hyperopt/hyperopt-sklearn
"""
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import sys
sys.path.append('../../')
from datasets import solar
from tools.reader import get_dcol
from preprocessing.scalers.normalization import Scaler
from models.metrics import metrics_regression
from tools.timer import *
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
import time
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
from sklearn.metrics import r2_score, mean_absolute_error
import os
os.environ['OMP_NUM_THREADS'] = str(2)
def main():
# init timer
t = Timer()
t.add('test')
""" DATA PREPARATION """
# load data
data, dcol = solar.load()
# select data
ly = ['y']
lx = ['doy', 'hour', 'LCDC267', 'MCDC267', 'HCDC267', 'TCDC267', 'logAPCP267', 'RH267', 'TMP267', 'DSWRF267']
data = data[lx + ly]
dcol = get_dcol(data, ltarget=ly)
# select one hour data
hour = 11
idata = data[data.hour == hour]
idata.drop('hour', axis=1, inplace=True)
idcol = get_dcol(idata, ltarget=['y'])
# clean
del(data)
del(dcol)
# filtering outliers (ghi vs power)
from preprocessing.outliers import median2D
isoutlier = median2D.launch(idata['DSWRF267'].values, idata.y.values, percent=20.)
idata['isoutlier'] = isoutlier
idata = idata[idata.isoutlier == False]
idata.drop('isoutlier', axis=1, inplace=True)
# prepare data
X = idata[idcol['lx']].values
scaler = Scaler()
y = scaler.fit_transform(idata[idcol['ly']].values).ravel()
print('Prepared data: X: %s y: %s' % (X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print('Prepared data: X_train: %s y_train: %s' % (X_train.shape, y_train.shape))
print('Prepared data: X_test: %s y_test: %s' % (X_test.shape, y_test.shape))
# replace training dataset
X = X_train
y = y_train
""" ESTIMATOR WITH BAYESIAN TUNING """
from hpsklearn import HyperoptEstimator, xgboost_regression
from hyperopt import tpe
# Instantiate a HyperoptEstimator with the search space and number of evaluations
clf = HyperoptEstimator(regressor=xgboost_regression('my_clf'),
preprocessing=[],
algo=tpe.suggest,
max_evals=250,
trial_timeout=300)
clf.fit(X, y)
print(clf.best_model())
y_hat = clf.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('test')
print('\nBayesian tuning -test: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
# training
y_hat = clf.predict(X)
dscores = metrics_regression(y, y_hat, X.shape[1])
print('Bayesian tuning - train: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
if __name__ == '__main__':
main()
| 32.990654
| 113
| 0.654958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,200
| 0.339943
|
c0128df7aa9cde7c55a1d29edac835d168b71fd9
| 719
|
py
|
Python
|
EstruturaDeRepeticao/exercicio32.py
|
Nicolas-Wursthorn/exercicios-python-brasil
|
b2b564d48b519be04643636033ec0815e6d99ea1
|
[
"MIT"
] | null | null | null |
EstruturaDeRepeticao/exercicio32.py
|
Nicolas-Wursthorn/exercicios-python-brasil
|
b2b564d48b519be04643636033ec0815e6d99ea1
|
[
"MIT"
] | null | null | null |
EstruturaDeRepeticao/exercicio32.py
|
Nicolas-Wursthorn/exercicios-python-brasil
|
b2b564d48b519be04643636033ec0815e6d99ea1
|
[
"MIT"
] | null | null | null |
# O Departamento Estadual de Meteorologia lhe contratou para desenvolver um programa que leia as um conjunto indeterminado de temperaturas, e informe ao final a menor e a maior temperaturas informadas, bem como a média das temperaturas.
temperaturas = []
while True:
graus = float(input("Digite a temperatura em graus (tecle 0 para parar): "))
temperaturas.append(graus)
media = sum(temperaturas) / len(temperaturas)
if graus == 0:
temperaturas.pop()
print("A maior temperatura registrada: {}°C".format(max(temperaturas)))
print("A menor temperatura registrada: {}°C".format(min(temperaturas)))
print("A temperatura média registrada: {}°C".format(media))
break
| 47.933333
| 236
| 0.709318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 409
| 0.564917
|
c012b837e3e30a6eafa5b481e3b7199fb7fec744
| 369
|
py
|
Python
|
src/utils/tools.py
|
Xuenew/2c
|
2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520
|
[
"Apache-2.0"
] | null | null | null |
src/utils/tools.py
|
Xuenew/2c
|
2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520
|
[
"Apache-2.0"
] | null | null | null |
src/utils/tools.py
|
Xuenew/2c
|
2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/7.
Description:通用函数
Changelog: all notable changes to this file will be documented
"""
import hashlib
def md5_encryption(string: str) -> str:
"""
对字符串进行md5加密
:param string: 加密目标字符串
:return:
"""
m = hashlib.md5()
m.update(string.encode("utf-8"))
return m.hexdigest()
| 18.45
| 66
| 0.631436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.652812
|
c0145129a570eee9c990a840954e93502103b3c8
| 14,612
|
py
|
Python
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 10
|
2020-05-11T20:29:28.000Z
|
2022-01-13T01:41:27.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 1
|
2020-01-25T20:08:59.000Z
|
2020-01-25T20:08:59.000Z
|
"""
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
"""
import re
from parso import ParserSyntaxError, parse
from jedi._compatibility import force_unicode
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.evaluate.gradual.typing import TypeVar, LazyGenericClass, \
AbstractAnnotatedClass
from jedi.evaluate.gradual.typing import GenericClass
from jedi.evaluate.helpers import is_string
from jedi.evaluate.compiled import builtin_from_name
from jedi import debug
from jedi import parser_utils
def eval_annotation(context, annotation):
"""
Evaluates an annotation node. This means that it evaluates the part of
`int` here:
foo: int = 3
Also checks for forward references (strings)
"""
context_set = context.eval_node(annotation)
if len(context_set) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (annotation, context_set))
return context_set
evaled_context = list(context_set)[0]
if is_string(evaled_context):
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
if result is not None:
return context.eval_node(result)
return context_set
def _evaluate_annotation_string(context, string, index=None):
node = _get_forward_reference_node(context, string)
if node is None:
return NO_CONTEXTS
context_set = context.eval_node(node)
if index is not None:
context_set = context_set.filter(
lambda context: context.array_type == u'tuple' # noqa
and len(list(context.py__iter__())) >= index
).py__simple_getitem__(index)
return context_set
def _get_forward_reference_node(context, string):
try:
new_node = context.evaluator.grammar.parse(
force_unicode(string),
start_symbol='eval_input',
error_recovery=False
)
except ParserSyntaxError:
debug.warning('Annotation not parsed: %s' % string)
return None
else:
module = context.tree_node.get_root_node()
parser_utils.move(new_node, module.end_pos[0])
new_node.parent = context.tree_node
return new_node
def _split_comment_param_declaration(decl_text):
"""
Split decl_text on commas, but group generic expressions
together.
For example, given "foo, Bar[baz, biz]" we return
['foo', 'Bar[baz, biz]'].
"""
try:
node = parse(decl_text, error_recovery=False).children[0]
except ParserSyntaxError:
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
return []
if node.type == 'name':
return [node.get_code().strip()]
params = []
try:
children = node.children
except AttributeError:
return []
else:
for child in children:
if child.type in ['name', 'atom_expr', 'power']:
params.append(child.get_code().strip())
return params
@evaluator_method_cache()
def infer_param(execution_context, param):
contexts = _infer_param(execution_context, param)
evaluator = execution_context.evaluator
if param.star_count == 1:
tuple_ = builtin_from_name(evaluator, 'tuple')
return ContextSet([GenericClass(
tuple_,
generics=(contexts,),
) for c in contexts])
elif param.star_count == 2:
dct = builtin_from_name(evaluator, 'dict')
return ContextSet([GenericClass(
dct,
generics=(ContextSet([builtin_from_name(evaluator, 'str')]), contexts),
) for c in contexts])
pass
return contexts
def _infer_param(execution_context, param):
"""
Infers the type of a function parameter, using type annotations.
"""
annotation = param.annotation
if annotation is None:
# If no Python 3-style annotation, look for a Python 2-style comment
# annotation.
# Identify parameters to function in the same sequence as they would
# appear in a type comment.
all_params = [child for child in param.parent.children
if child.type == 'param']
node = param.parent.parent
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
if not match:
return NO_CONTEXTS
params_comments = _split_comment_param_declaration(match.group(1))
# Find the specific param being investigated
index = all_params.index(param)
# If the number of parameters doesn't match length of type comment,
# ignore first parameter (assume it's self).
if len(params_comments) != len(all_params):
debug.warning(
"Comments length != Params length %s %s",
params_comments, all_params
)
from jedi.evaluate.context.instance import InstanceArguments
if isinstance(execution_context.var_args, InstanceArguments):
if index == 0:
# Assume it's self, which is already handled
return NO_CONTEXTS
index -= 1
if index >= len(params_comments):
return NO_CONTEXTS
param_comment = params_comments[index]
return _evaluate_annotation_string(
execution_context.function_context.get_default_param_context(),
param_comment
)
# Annotations are like default params and resolve in the same way.
context = execution_context.function_context.get_default_param_context()
return eval_annotation(context, annotation)
def py__annotations__(funcdef):
dct = {}
for function_param in funcdef.get_params():
param_annotation = function_param.annotation
if param_annotation is not None:
dct[function_param.name.value] = param_annotation
return_annotation = funcdef.annotation
if return_annotation:
dct['return'] = return_annotation
return dct
@evaluator_method_cache()
def infer_return_types(function_execution_context):
"""
Infers the type of a function's return value,
according to type annotations.
"""
all_annotations = py__annotations__(function_execution_context.tree_node)
annotation = all_annotations.get("return", None)
if annotation is None:
# If there is no Python 3-type annotation, look for a Python 2-type annotation
node = function_execution_context.tree_node
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
if not match:
return NO_CONTEXTS
return _evaluate_annotation_string(
function_execution_context.function_context.get_default_param_context(),
match.group(1).strip()
).execute_annotation()
if annotation is None:
return NO_CONTEXTS
context = function_execution_context.function_context.get_default_param_context()
unknown_type_vars = list(find_unknown_type_vars(context, annotation))
annotation_contexts = eval_annotation(context, annotation)
if not unknown_type_vars:
return annotation_contexts.execute_annotation()
type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations)
return ContextSet.from_sets(
ann.define_generics(type_var_dict)
if isinstance(ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann})
for ann in annotation_contexts
).execute_annotation()
def infer_type_vars_for_execution(execution_context, annotation_dict):
"""
Some functions use type vars that are not defined by the class, but rather
only defined in the function. See for example `iter`. In those cases we
want to:
1. Search for undefined type vars.
2. Infer type vars with the execution state we have.
3. Return the union of all type vars that have been found.
"""
context = execution_context.function_context.get_default_param_context()
annotation_variable_results = {}
executed_params, _ = execution_context.get_executed_params_and_issues()
for executed_param in executed_params:
try:
annotation_node = annotation_dict[executed_param.string_name]
except KeyError:
continue
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_context_set = context.eval_node(annotation_node)
star_count = executed_param._param_node.star_count
actual_context_set = executed_param.infer(use_hints=False)
if star_count == 1:
actual_context_set = actual_context_set.merge_types_of_iterate()
elif star_count == 2:
# TODO _dict_values is not public.
actual_context_set = actual_context_set.try_merge('_dict_values')
for ann in annotation_context_set:
_merge_type_var_dicts(
annotation_variable_results,
_infer_type_vars(ann, actual_context_set),
)
return annotation_variable_results
def _merge_type_var_dicts(base_dict, new_dict):
for type_var_name, contexts in new_dict.items():
try:
base_dict[type_var_name] |= contexts
except KeyError:
base_dict[type_var_name] = contexts
def _infer_type_vars(annotation_context, context_set):
"""
This function tries to find information about undefined type vars and
returns a dict from type var name to context set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
"""
type_var_dict = {}
if isinstance(annotation_context, TypeVar):
return {annotation_context.py__name__(): context_set.py__class__()}
elif isinstance(annotation_context, LazyGenericClass):
name = annotation_context.py__name__()
if name == 'Iterable':
given = annotation_context.get_generics()
if given:
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
context_set.merge_types_of_iterate()
)
)
elif name == 'Mapping':
given = annotation_context.get_generics()
if len(given) == 2:
for context in context_set:
try:
method = context.get_mapping_item_contexts
except AttributeError:
continue
key_contexts, value_contexts = method()
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
key_contexts,
)
)
for nested_annotation_context in given[1]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
value_contexts,
)
)
return type_var_dict
def find_type_from_comment_hint_for(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[1], name)
def find_type_from_comment_hint_with(context, node, name):
assert len(node.children[1].children) == 3, \
"Can only be here when children[1] is 'foo() as f'"
varlist = node.children[1].children[2]
return _find_type_from_comment_hint(context, node, varlist, name)
def find_type_from_comment_hint_assign(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[0], name)
def _find_type_from_comment_hint(context, node, varlist, name):
index = None
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
# something like "a, b = 1, 2"
index = 0
for child in varlist.children:
if child == name:
break
if child.type == "operator":
continue
index += 1
else:
return []
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return []
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
if match is None:
return []
return _evaluate_annotation_string(
context, match.group(1).strip(), index
).execute_annotation()
def find_unknown_type_vars(context, node):
def check_node(node):
if node.type in ('atom_expr', 'power'):
trailer = node.children[-1]
if trailer.type == 'trailer' and trailer.children[0] == '[':
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
check_node(subscript_node)
else:
type_var_set = context.eval_node(node)
for type_var in type_var_set:
if isinstance(type_var, TypeVar) and type_var not in found:
found.append(type_var)
found = [] # We're not using a set, because the order matters.
check_node(node)
return found
def _unpack_subscriptlist(subscriptlist):
if subscriptlist.type == 'subscriptlist':
for subscript in subscriptlist.children[::2]:
if subscript.type != 'subscript':
yield subscript
else:
if subscriptlist.type != 'subscript':
yield subscriptlist
| 35.990148
| 94
| 0.638653
| 0
| 0
| 307
| 0.02101
| 2,289
| 0.156652
| 0
| 0
| 2,797
| 0.191418
|
c01466f2b1b58f8291be4e054c30cb12aa407427
| 326
|
py
|
Python
|
string_30.py
|
Technicoryx/python_strings_inbuilt_functions
|
78892d043c6c6d65affe3bd4906ba0162c5d6604
|
[
"MIT"
] | null | null | null |
string_30.py
|
Technicoryx/python_strings_inbuilt_functions
|
78892d043c6c6d65affe3bd4906ba0162c5d6604
|
[
"MIT"
] | null | null | null |
string_30.py
|
Technicoryx/python_strings_inbuilt_functions
|
78892d043c6c6d65affe3bd4906ba0162c5d6604
|
[
"MIT"
] | null | null | null |
"""Below Python Programme demonstrate rpartition
functions in a string"""
string = "Python is fun"
# 'is' separator is found
print(string.rpartition('is '))
# 'not' separator is not found
print(string.rpartition('not '))
string = "Python is fun, isn't it"
# splits at last occurence of 'is'
print(string.rpartition('is'))
| 21.733333
| 48
| 0.717791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.665644
|
c014e0fef503433734848ae3b6b9307338d2ae08
| 4,583
|
py
|
Python
|
env/lib/python3.8/site-packages/unidecode/x054.py
|
avdhari/enigma
|
b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c
|
[
"MIT"
] | 48
|
2021-11-20T08:17:53.000Z
|
2022-03-19T13:57:15.000Z
|
venv/lib/python3.6/site-packages/unidecode/x054.py
|
mrsaicharan1/iiita-updates
|
a22a0157b90d29b946d0f020e5f76744f73a6bff
|
[
"Apache-2.0"
] | 392
|
2015-07-30T14:37:05.000Z
|
2022-03-21T16:56:09.000Z
|
venv/lib/python3.6/site-packages/unidecode/x054.py
|
mrsaicharan1/iiita-updates
|
a22a0157b90d29b946d0f020e5f76744f73a6bff
|
[
"Apache-2.0"
] | 15
|
2015-10-01T21:31:08.000Z
|
2020-05-05T00:03:27.000Z
|
data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
| 17.694981
| 19
| 0.382064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,036
| 0.662448
|
c0176451a4af477e1653a56580ea468230721ad1
| 2,023
|
py
|
Python
|
merge_sort.py
|
BCLaird/refreshers
|
135e21fdb4396d7b2c558cb08b7e9abf9db7c768
|
[
"Unlicense"
] | null | null | null |
merge_sort.py
|
BCLaird/refreshers
|
135e21fdb4396d7b2c558cb08b7e9abf9db7c768
|
[
"Unlicense"
] | null | null | null |
merge_sort.py
|
BCLaird/refreshers
|
135e21fdb4396d7b2c558cb08b7e9abf9db7c768
|
[
"Unlicense"
] | null | null | null |
import sys
import unittest
def merge(nums1, nums2):
"""
:param nums1: Sorted list of numbers.
:param nums2: Sorted list of numbers.
:return: Combined sorted list of numbers.
"""
merged = list()
while len(nums1) != 0 and len(nums2) != 0:
if nums1[0] <= nums2[0]:
merged.append(nums1.pop(0))
else:
merged.append(nums2.pop(0))
while len(nums1) != 0:
merged.append(nums1.pop(0))
while len(nums2) != 0:
merged.append(nums2.pop(0))
return merged
def merge_sort(nums):
"""
:param nums: List of numbers to sort.
:return: Sorted list of numbers.
"""
if len(nums) != 1:
nums1 = merge_sort(nums[:(len(nums) / 2)])
nums2 = merge_sort(nums[(len(nums) / 2):])
sorted_nums = merge(nums1, nums2)
return sorted_nums
else:
# Nothing to do for a list of length 1.
return nums
class TestInsertionSort(unittest.TestCase):
def test_one(self):
self.assertEqual([1], merge_sort([1]))
def test_two(self):
unsorted = [2, 1]
sorted = [1, 2]
self.assertEqual(sorted, merge_sort(unsorted))
def test_three(self):
unsorted = [2, 3, 1]
sorted = [1, 2, 3]
self.assertEqual(sorted, merge_sort(unsorted))
def test_reversed(self):
unsorted = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
def test_front_half_sorted(self):
unsorted = [1, 2, 3, 4, 5, 10, 9, 8, 7, 6]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
def test_back_half_sorted(self):
unsorted = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
if __name__ == "__main__":
sys.stdout.write("Bryan Laird merge_sort module. Test mode.\n")
sys.exit(unittest.main())
| 25.2875
| 68
| 0.565991
| 955
| 0.472071
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.161641
|
c017dec4951ea873a5632989e93e1321faa87a5d
| 1,548
|
py
|
Python
|
tests/test_utils/test_textio.py
|
hongxuenong/mmocr
|
e8e3a059f8f2e4fca96af37751c33563fc48e2ba
|
[
"Apache-2.0"
] | 2,261
|
2021-04-08T03:45:41.000Z
|
2022-03-31T23:37:46.000Z
|
tests/test_utils/test_textio.py
|
hongxuenong/mmocr
|
e8e3a059f8f2e4fca96af37751c33563fc48e2ba
|
[
"Apache-2.0"
] | 789
|
2021-04-08T05:40:13.000Z
|
2022-03-31T09:42:39.000Z
|
tests/test_utils/test_textio.py
|
hongxuenong/mmocr
|
e8e3a059f8f2e4fca96af37751c33563fc48e2ba
|
[
"Apache-2.0"
] | 432
|
2021-04-08T03:56:16.000Z
|
2022-03-30T18:44:43.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from mmocr.utils import list_from_file, list_to_file
lists = [
[],
[' '],
['\t'],
['a'],
[1],
[1.],
['a', 'b'],
['a', 1, 1.],
[1, 1., 'a'],
['啊', '啊啊'],
['選択', 'noël', 'Информацией', 'ÄÆä'],
]
def test_list_to_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
list_to_file(filename, lines)
lines2 = [
line.rstrip('\r\n')
for line in open(filename, 'r', encoding='utf-8').readlines()
]
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2 for line1, line2 in zip(lines, lines2))
def test_list_from_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for encoding in ['utf-8', 'utf-8-sig']:
for lineend in ['\n', '\r\n']:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
with open(filename, 'w', encoding=encoding) as f:
f.writelines(f'{line}{lineend}' for line in lines)
lines2 = list_from_file(filename, encoding=encoding)
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2
for line1, line2 in zip(lines, lines2))
| 32.25
| 77
| 0.505168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.152575
|
c01a0bc60c407d254aecde1ca9086a30bc750870
| 2,638
|
py
|
Python
|
License Plate Detection.py
|
jairajsahgal/License_Plate_and_Face_Recognition
|
6a9762f2ca90730e828b3d256418b073b9e80cb0
|
[
"Apache-2.0"
] | null | null | null |
License Plate Detection.py
|
jairajsahgal/License_Plate_and_Face_Recognition
|
6a9762f2ca90730e828b3d256418b073b9e80cb0
|
[
"Apache-2.0"
] | null | null | null |
License Plate Detection.py
|
jairajsahgal/License_Plate_and_Face_Recognition
|
6a9762f2ca90730e828b3d256418b073b9e80cb0
|
[
"Apache-2.0"
] | null | null | null |
import cv2
from Text_Detection import detect_characters, detect_string, detect_words
import re
from live_recognition import facial_recognition
#
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
####################################################
frameWidth = 640
frameHeight = 480
nPlateCascade = cv2.CascadeClassifier("../../Resources/haarcascade_russian_plate_number.xml")
minArea=500
color=(255,0,255)
name=None
# count = 0
state_codes = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA', 'GJ', 'HR', 'HP', 'JH', 'KA', 'KL', 'MP', 'MH', 'MN', 'ML', 'MZ', 'NL', 'OD', 'PB', 'RJ', 'SK', 'TN', 'TR', 'UP', 'WB', 'TS','ap', 'ar', 'as', 'br', 'cg', 'ga', 'gj', 'hr', 'hp', 'jh', 'ka', 'kl', 'mp', 'mh', 'mn', 'ml', 'mz', 'nl', 'od', 'pb', 'rj', 'sk', 'tn', 'tr', 'up', 'wb', 'ts']
######################################################
# cap = cv2.VideoCapture("C:\\Users\\jaira\\PycharmProjects\\opencv_tutorial\\Resources\\test.mp4")
cap=cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10,150)
success, img = cap.read()
while success:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in numberPlates:
area = w*h
if area > minArea:
cv2.rectangle(img=img,pt1=(x,y),pt2=(x+w,y+h),
color=color,thickness=2)
# cv2.putText(img=img,text="Number Plate",org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
imgRoi=img[y:y+h,x:x+w]
cv2.moveWindow("ROI",40,30)
cv2.imshow(winname="ROI",mat=imgRoi)
temp=detect_words(imgRoi)
for i in state_codes:
if i in temp:
temp2 = ''.join(ch for ch in temp if ch.isalnum() and ch!="." and ch!="_")
if temp[-2:].isnumeric() and temp[2:4].isnumeric() and len(temp)==10:
cv2.putText(img=img,text=temp,org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
print(temp)
if name==None:
name,face_img=facial_recognition(img)
cv2.imshow("Face Recognition",face_img)
cv2.imshow("Result", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# except:
# break
cv2.destroyAllWindows()
| 41.873016
| 338
| 0.559515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.26232
|
c01a696f221a5d8d2da0a1df4899941c79bacd5a
| 17,268
|
py
|
Python
|
LogParser/LTEV2.py
|
a22057916w/python_advance
|
c964ad3237b503f5ef83e1add12d8007113690b1
|
[
"MIT"
] | null | null | null |
LogParser/LTEV2.py
|
a22057916w/python_advance
|
c964ad3237b503f5ef83e1add12d8007113690b1
|
[
"MIT"
] | null | null | null |
LogParser/LTEV2.py
|
a22057916w/python_advance
|
c964ad3237b503f5ef83e1add12d8007113690b1
|
[
"MIT"
] | null | null | null |
##! python3
##==============================================================================
## Copyright (c) 2021 COMPAL Electronic Inc. All rights reserved.
## This program contains proprietary and confidential information.
## All rights reserved except as may be permitted by prior written consent.
##
## Compal STiD NPSD Test Program Release Notification.
##
## ModuleName:
## LTE.py (Log to Excel)
##
## Abstract:
## Parsing log info to a excel with 4 sheets.
## 1. Read log file: parse -> store (a list of dict)
## 2. Read the INI threshold data: store as dict
## 3. New excel workbook: by openpyxl
## 4. Set worksheet according to Step 1: by dict and DataFrame
## 5. Set condition formating for each sheet
## according to Step 2: by dict
## 6. Save the workbook to xlsx file
##
## Author:
## 25-Oct-2021 Willy Chen
##
## Revision History:
## Rev 1.0.0.1 25-Oct-2021 Willy
## First create.
##==============================================================================
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
class cLogParser:
listKey = ["Power_dBm_CH15", "Power_dBm_CH21", "Power_dBm_CH24", "Current_mA_CH15", "Current_mA_CH21", "Current_mA_CH24", "dBm_LNA_ON", "dBm_LNA_Off",
"Current_mA_3G_CH9750", "Current_mA_3G_CH2787", "Current_mA_2G_CH124", "dBm_CH9750", "dBm_CH2787", "dBm_2G_CH124", "dBm_CH124"]
listInfo, listLTE, listZigbee = [], [], []
def __init__(self):
# get directory names of TryingLog (first layer)
listSN = os.listdir(g_strLogDir)
# iterate through log files in a SN folder (second layer)
self.parseLog(listSN)
# merge data from two different log files
self.mergeLogs()
def parseLog(self, listSN):
printLog("[I][parseLog] ------- Start Parsing Log -------")
strLTEName, strZigbeeName = "GFI20_RF_LTE.log", "GFI20_RF_Zigbee.log"
try:
for strSN in listSN:
dictLTE = {
"SN" : strSN,
"dBm_CH9750" : None,
"dBm_CH2787" : None,
"dBm_2G_CH124" : None,
"Current_mA_3G_CH9750" : None,
"Current_mA_3G_CH2787" : None,
"Current_mA_2G_CH124" : None,
"dBm_CH124" : None }
dictZigbee = {
"SN" : strSN,
"Power_dBm_CH15" : None,
"Power_dBm_CH21" : None,
"Power_dBm_CH24" : None,
"dBm_LNA_ON" : None,
"dBm_LNA_Off" : None,
"Current_mA_CH15" : None,
"Current_mA_CH21" : None,
"Current_mA_CH24" : None }
b_hasLTE, b_hasZigbee = False, False # flag for checking if the target log exists
strSNLog = os.path.join(g_strLogDir, strSN) # set abspath for SN logs
for strLogName in os.listdir(strSNLog):
strLogPath = os.path.join(strSNLog, strLogName)
# check GFI20_RF_LTE.log exists. If not, flag = False and parse only SN.
reMatch = re.fullmatch("^.*RF_LTE\.log", strLogName)
if(reMatch != None):
self.parseLTE(dictLTE, strLogPath, strSN)
b_hasLTE = True
# parse GFI20_RF_Zigbee.log files
reMatch = re.fullmatch("^.*RF_Zigbee\.log", strLogName)
if(reMatch != None):
self.parseZigbee(dictZigbee, strLogPath, strSN)
b_hasZigbee = True
# if log not exists, append initial dict
self.listLTE.append(dictLTE)
self.listZigbee.append(dictZigbee)
# if there is no target log file in the folder, parse only SN
if not b_hasLTE:
#listLTE.append({"SN": strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strLTEName))
if not b_hasZigbee:
#listZigbee.append({"SN" : strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strZigbeeName))
printLog("[I][parseLog] ------- Finish Parsing Log -------")
except Exception as e:
printLog("[E][parseLog] Unexpected Error: " + str(e))
def parseLTE(self, dictLTE, strLTEPath, strSN):
printLog("[I][parseLTE] Parse LTE log: %s" % strLTEPath)
try:
listPostfix = [" \n", " A\n", " dBm\n"]
with open(strLTEPath, encoding='big5') as log: # big5 for windows
content = log.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]*"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ LTE_3G Freq 897.4 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[11], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[8], listPostfix[1], 1000, False)
if re.search("-+ LTE_3G Freq 1950 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[12], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[9], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 914.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[13], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[10], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 959.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_RX_RSSI, self.listKey[14], listPostfix[2], 1, True)
except Exception as e:
printLog("[E][parseLTE] Unexpected Error: " + str(e))
def parseZigbee(self, dictZigbee, strZigBeePath, strSN):
printLog("[I][parseZigbee] Parse Zigbee log: %s" % strZigBeePath)
try:
listPostfix = ["dBm\n", " A\n", " dBm\n"]
with open(strZigBeePath, encoding="big5") as Zigbee: # big5 for windows
content = Zigbee.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]* dBm"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ ZIGBEE_2450 Freq 2425 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[0], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[3], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2455 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[1], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[4], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2470 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[2], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[5], listPostfix[1], 1000, False)
if re.search("-+ LNA ON -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[6], listPostfix[2], 1, False)
if re.search("-+ LNA OFF -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[7], listPostfix[2], 1, False)
except Exception as e:
printLog("[E][parseZigbee] Unexpected Error: " + str(e))
def get_log_value(self, cut_content, dictInfo, re_target, strKey, strPostfix, nUnit, b_getMulti):
for line in cut_content:
# search pattern like "Power: (int/float) dBm"
if re.search(re_target, line) != None:
# get the figure of the line like "Power: 8.817 dBm\n"
fValue = eval(line.split(": ")[1].strip(strPostfix))
dictInfo[strKey] = fValue * nUnit
if not b_getMulti:
break;
# merge two list of dict to single list of dict
def mergeLogs(self):
try:
printLog("[I][mergeLogs] ------- Merging two Log data -------")
# listLTE and listZigbee both has same length
self.listInfo = [None] * len(self.listLTE)
for i in range (0, len(self.listLTE)):
self.listLTE[i].update(self.listZigbee[i]) # merge two dict
self.listInfo[i] = self.listLTE[i]
printLog("[I][mergeLogs] ------- Merged two Log data -------")
except Exception as e:
printLog("[E][mergeLogs] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of parsing log to excel |#
#\====================================================================/#
def log_to_excel(self):
printLog("[I][log_to_excel] ------- Parsing Log to Excel -------")
dictThreshold = {} # store INI threshold ata for setting conditional formating
try:
# ========== get the threshold data from INI ==========
printLog("[I][log_to_excel] ----- INI reading -----")
for key in self.listKey:
dictThreshold[key] = self.readINI(key)
printLog("[I][log_to_excel] ----- INI read -----")
# ========== New Excel workbook and sheets ==========
df_logInfo = pd.DataFrame(self.listInfo) # listInfo -> list of dict
listSheetName = ["Zigbee_Power_Current", "Zigbee_LAN", "LTE_Current", "LTE_dBm"]
listCol = [self.listKey[:6], self.listKey[6:8], self.listKey[8:11], self.listKey[11:15]] # columns for each sheet above
wb = openpyxl.Workbook() # 新增 Excel 活頁
wb.remove(wb['Sheet']) # remove the default sheet when start a workbook
printLog("[I][log_to_excel] ----- Excel Sheet Creating -----")
for i in range(0, len(listSheetName)):
self.newSheet(wb, listSheetName[i], df_logInfo[["SN"] + listCol[i]])
printLog("[I][log_to_excel] ----- Excel Sheet Created -----")
# modify cell font-color according to thershold that parsed from INI
self.set_threshold_to_excel(wb, dictThreshold)
wb.save('LTEV2.xlsx') # save the worksheet as excel file
printLog("[I][log_to_excel] ------- Parsed Log to Excel -------")
except Exception as e:
printLog("[E][log_to_excel] Unexpected Error: " + str(e))
# read INI values one by one by giving keys, then store to var dictThreshold
def readINI(self, strKey):
try:
config = configparser.ConfigParser()
config.read(g_strINIPath)
strMethod = 'Method%s' % g_nMethodIndex
strValue = config.get(strMethod, strKey)
# search pattern like "+-(int/float),+-(int/float)"
if re.fullmatch("[+-]?[0-9]+\.?[0-9]*,[+-]?[0-9]+\.?[0-9]*", strValue):
printLog("[I][readINI] %s = %s" % (strKey, strValue))
return strValue
else:
printLog("[W][readINI] Read %s Fail !!" % strKey)
sys.exit("Read %s Fail !!" % strKey)
except Exception as e:
printLog("[E][readINI] Error: %s" % str(e))
sys.exit("Error: %s" % str(e))
# new worksheets by DataFrame
def newSheet(self, workbook, strSheetName, df_SheetCol):
try:
workbook.create_sheet(strSheetName)
for row in dataframe_to_rows(df_SheetCol, index=False, header=True):
workbook[strSheetName].append(row)
printLog("[I][newSheet] Sheet: %s Created" % strSheetName)
except Exception as e:
printLog("[E][newSheet] Unexpected Error: " + str(e))
# set conditional formating for sheets by dictionay containg thershold data
def set_threshold_to_excel(self, workbook, dictThreshold):
try:
printLog("[I][set_threshold_to_excel] ----- threshold setting -----")
# iterate through every worksheet to set conditional formatting
for ws in workbook.worksheets:
printLog("[I][set_threshold_to_excel] setting worksheet: %s" % ws.title)
# iterate from Col 2 since Col 1 is the Serial Number(SN)
for col in ws.iter_cols(min_row=1, max_row=ws.max_row, min_col=2, max_col=ws.max_column):
strStart, strEnd = None, None # set the test range for cell e.g. A1:A10
istInterval = [] # set the threshold range for the formula below
# check the column is not empty, col[0] is column name
if len(col) > 1:
strStart = col[1].coordinate # set starting cell for thershold testing
strEnd = col[-1].coordinate # set ending cell
# get the thershold and store as interval for the formula below
strThreshold = dictThreshold[col[0].value] # get the test thershold by the column name(col[0])
listInterval = strThreshold.split(",")
red_text = Font(color="9C0006") # font-color: RED
range_string = "%s:%s" % (strStart, strEnd) # the value would be like A1:A10
ws.conditional_formatting.add(range_string,
CellIsRule(operator='notBetween', formula=listInterval, stopIfTrue=True, font=red_text))
printLog("[I][set_threshold_to_excel] ----- threshold set -----")
except Exception as e:
printLog("[E][set_threshold_to_excel] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of printing log of LTE.py |#
#\====================================================================/#
def getDateTimeFormat():
strDateTime = "[%s]" % (time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
return strDateTime
def printLog(strPrintLine):
strFileName = os.path.basename(__file__).split('.')[0]
fileLog = codecs.open(g_strFileName + ".log", 'a', "utf-8")
print(strPrintLine)
fileLog.write("%s%s\r\n" % (getDateTimeFormat(), strPrintLine))
fileLog.close()
if __name__ == "__main__":
global g_strFileName, g_strINIPath, g_nMethodIndex
g_strFileName = os.path.basename(__file__).split('.')[0]
g_strINIPath = os.path.join(os.getcwd(), g_strFileName + ".ini")
g_nMethodIndex = 1
printLog("========== Start ==========")
printLog("[I][main] Python " + sys.version)
printLog("[I][main] %s.py %s" % (g_strFileName, g_strVersion))
# ------------ find the target file --------------
try:
LogParser = cLogParser()
LogParser.log_to_excel()
except Exception as e:
printLog("[E][main] Unexpected Error: " + str(e))
printLog("========== End ==========")
| 47.180328
| 154
| 0.527044
| 14,502
| 0.83943
| 0
| 0
| 0
| 0
| 0
| 0
| 6,085
| 0.352223
|
c01a9c714b265a55e25bf66dffd00ac40d13d9db
| 1,504
|
py
|
Python
|
cnn/test2.py
|
INFINITSY/darts
|
684f97e407ee044a14c375f4a3078398a4b802bc
|
[
"Apache-2.0"
] | null | null | null |
cnn/test2.py
|
INFINITSY/darts
|
684f97e407ee044a14c375f4a3078398a4b802bc
|
[
"Apache-2.0"
] | null | null | null |
cnn/test2.py
|
INFINITSY/darts
|
684f97e407ee044a14c375f4a3078398a4b802bc
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
# darts_025 = [0, 0, 0, 0, 2, 5, 6, 7, 8]
darts_025 = [0, 0, 0, 2, 3, 5, 7, 8]
darts_05 = [0, 0, 3, 3, 4, 4, 5, 7, 7]
adas_025_9 = [0, 0, 0, 0, 3, 5, 7]
adas_05_9 = [0, 0, 1, 4, 5, 6, 6, 7, 7, 7, 7]
adas_05_95 = []
adas_05_97 = [0, 0, 0, 2, 4, 4, 4, 4, 4, 6, 8]
mile = [0, 0, 0, 2, 4, 4, 4, 3, 4, 4, 4]
mile_adas_025_9 = [0, 0, 0, 0, 3, 4, 5, 5, 6, 6, 6]
mile_adas_05_9 = [0, 0, 0, 3, 4, 5, 5, 5, 5, 6, 6]
mile_adas_05_95 = [0, 0, 0, 0, 1, 1, 5, 5, 6, 6, 6]
mile_adas_05_97 = [0, 0, 0, 0, 0, 3, 3, 4, 4, 4, 4]
plt.plot(range(0, 36, 5), darts_025, '-o', label='DARTS, lr: 0.025')
# plt.plot(range(0, 41, 5), darts_05, '-o', label='DARTS, lr: 0.05')
#
# # plt.plot(range(0, 31, 5), adas_025_9, '-o', label='DARTS+Adas, lr: 0.025, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_9, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_97, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.97')
plt.plot(range(0, 51, 5), mile, '--o', label='MiLeNAS, lr: 0.025')
plt.plot(range(0, 51, 5), mile_adas_025_9, '--o', label='MiLeNAS+Adas, lr: 0.025, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_9, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_95, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.95')
plt.plot(range(0, 51, 5), mile_adas_05_97, '--o', linewidth=3.0, label='MiLeNAS+Adas, lr: 0.05, beta: 0.97')
plt.xlabel('Epoch')
plt.ylabel('#Skip-connection')
plt.legend()
plt.show()
| 44.235294
| 108
| 0.571809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 610
| 0.405585
|
c01b7158b50aafc1ed3b64cfb1feeaebd488a0fb
| 21,040
|
py
|
Python
|
great_international/panels/capital_invest.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
great_international/panels/capital_invest.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
great_international/panels/capital_invest.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
from wagtail.admin.edit_handlers import (
InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel,
PageChooserPanel,
)
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from core.helpers import make_translated_interface
from core.panels import SearchEngineOptimisationPanel
class InternationalCapitalInvestLandingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
FieldPanel('hero_subheading'),
FieldPanel('hero_subtitle'),
FieldPanel('hero_cta_text'),
FieldPanel('hero_cta_link'),
]
),
MultiFieldPanel(
heading="Reason to invest in the UK section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Reason to Invest Title, Reason to Invest Content'),
FieldPanel('reason_to_invest_section_title'),
FieldPanel('reason_to_invest_section_intro'),
FieldPanel('reason_to_invest_section_content'),
ImageChooserPanel('reason_to_invest_section_image'),
FieldPanel('how_we_help_title'),
FieldPanel('how_we_help_intro'),
HelpPanel('Each icon requires corresponding text to show '
'on page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_one_icon'),
FieldPanel('how_we_help_one_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_two_icon'),
FieldPanel('how_we_help_two_text'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_three_icon'),
FieldPanel('how_we_help_three_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_four_icon'),
FieldPanel('how_we_help_four_text'),
]),
]),
FieldPanel('how_we_help_cta_text'),
FieldPanel('how_we_help_cta_link'),
]
),
MultiFieldPanel(
heading="Investment Opportunities by regions",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Opportunity Title, 1 Related Region'),
FieldPanel('region_ops_section_title'),
FieldPanel('region_ops_section_intro'),
InlinePanel(
'added_region_card_fields',
label="Region card fields"
),
]
),
MultiFieldPanel(
heading="Informative banner",
children=[
FieldPanel('banner_information')
],
),
MultiFieldPanel(
heading="Related region pages",
classname='collapsible collapsed',
children=[
HelpPanel('Please use this to link to a related region, '
'rather than adding in manually the region title, '
'image and text in the above section when the '
'capital invest region pages are available'),
InlinePanel(
'added_regions',
label="Related Regions"
),
]
),
MultiFieldPanel(
heading="Energy Sector",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Energy Sector Title, Energy Sector Content'),
FieldPanel('energy_sector_title'),
FieldPanel('energy_sector_content'),
ImageChooserPanel('energy_sector_image'),
HelpPanel('CTA requires text and PDF to show on teh page.'),
FieldPanel('energy_sector_cta_text'),
DocumentChooserPanel('energy_sector_pdf_document'),
]
),
MultiFieldPanel(
heading="Homes in England Section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Homes In England Section Title, Title and PDF '
'for each card'),
FieldPanel('homes_in_england_section_title'),
InlinePanel(
'added_homes_in_england_card_fields',
label="Homes In England cards"
)
]
),
MultiFieldPanel(
heading="Contact Section",
classname='collapsible collapsed',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_section_title'),
FieldPanel('contact_section_text'),
FieldPanel('contact_section_cta_text')
]
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestRegionPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
FieldPanel('hero_title'),
ImageChooserPanel('hero_image'),
],
),
FieldPanel('featured_description'),
MultiFieldPanel(
heading="Region summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Summary Section Content'),
ImageChooserPanel('region_summary_section_image'),
FieldPanel('region_summary_section_intro'),
FieldPanel('region_summary_section_content'),
],
),
MultiFieldPanel(
heading="Investment opportunities",
classname='collapsible collapsed',
children=[
FieldPanel('investment_opps_title'),
FieldPanel('investment_opps_intro'),
]
),
MultiFieldPanel(
heading="Economics Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_1_heading'),
FieldPanel('economics_stat_1_number'),
FieldPanel('economics_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_2_heading'),
FieldPanel('economics_stat_2_number'),
FieldPanel('economics_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_3_heading'),
FieldPanel('economics_stat_3_number'),
FieldPanel('economics_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_4_heading'),
FieldPanel('economics_stat_4_number'),
FieldPanel('economics_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_5_heading'),
FieldPanel('economics_stat_5_number'),
FieldPanel('economics_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_6_heading'),
FieldPanel('economics_stat_6_number'),
FieldPanel('economics_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Location Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_1_heading'),
FieldPanel('location_stat_1_number'),
FieldPanel('location_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_2_heading'),
FieldPanel('location_stat_2_number'),
FieldPanel('location_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_3_heading'),
FieldPanel('location_stat_3_number'),
FieldPanel('location_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_4_heading'),
FieldPanel('location_stat_4_number'),
FieldPanel('location_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_5_heading'),
FieldPanel('location_stat_5_number'),
FieldPanel('location_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_6_heading'),
FieldPanel('location_stat_6_number'),
FieldPanel('location_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Extra optional Property and Infrastructure section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Property and Infrastructure Section Title, '
'Property and Infrastructure Section Content'),
ImageChooserPanel('property_and_infrastructure_section_image'),
FieldPanel('property_and_infrastructure_section_title'),
FieldPanel('property_and_infrastructure_section_content'),
],
),
MultiFieldPanel(
heading="Accordions subsections",
classname='collapsible collapsed',
children=[
HelpPanel('Required: subsections title and at least one title and content for an accordion to show'),
FieldPanel('subsections_title'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('sub_section_one_title'),
ImageChooserPanel('sub_section_one_icon'),
FieldPanel('sub_section_one_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_two_title'),
ImageChooserPanel('sub_section_two_icon'),
FieldPanel('sub_section_two_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_three_title'),
ImageChooserPanel('sub_section_three_icon'),
FieldPanel('sub_section_three_content')
]),
]),
]
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
FieldPanel('contact_cta_text'),
FieldPanel('contact_cta_link'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityListingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('search_results_title'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityPagePanels:
content_panels = [
FieldPanel('title'),
MultiFieldPanel(
heading="Related sector",
classname='collapsible collapsed',
children=[
InlinePanel('related_sectors', label="Related Sectors"),
],
),
MultiFieldPanel(
heading="Related region",
classname='collapsible collapsed',
children=[
PageChooserPanel(
'related_region',
[
'great_international.'
'AboutUkRegionPage'
]
),
],
),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
],
),
MultiFieldPanel(
heading="Opportunity summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Opportunity Summary Intro'),
FieldPanel('opportunity_summary_intro'),
FieldPanel('opportunity_summary_content'),
ImageChooserPanel('opportunity_summary_image'),
],
),
MultiFieldPanel(
heading="Opportunity Details",
classname='collapsible',
children=[
HelpPanel('Icons require the corresponding text to show on '
'page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('location_icon'),
FieldPanel('location_heading'),
FieldPanel('location'),
]),
MultiFieldPanel([
ImageChooserPanel('project_promoter_icon'),
FieldPanel('project_promoter_heading'),
FieldPanel('project_promoter'),
]),
MultiFieldPanel([
ImageChooserPanel('scale_icon'),
FieldPanel('scale_heading'),
FieldPanel('scale'),
FieldPanel('scale_value'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('sector_icon'),
FieldPanel('sector_heading'),
InlinePanel('related_sub_sectors',
label="Related Sectors"),
]),
MultiFieldPanel([
ImageChooserPanel('investment_type_icon'),
FieldPanel('investment_type_heading'),
FieldPanel('investment_type'),
]),
MultiFieldPanel([
ImageChooserPanel('planning_status_icon'),
FieldPanel('planning_status_heading'),
FieldPanel('planning_status'),
]),
]),
],
),
MultiFieldPanel(
heading="Project Details",
classname='collapsible',
children=[
HelpPanel('Title requires corresponding text to show on page'),
FieldPanel('project_background_title'),
FieldPanel('project_background_intro'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('project_description_title'),
FieldPanel('project_description_content'),
]),
MultiFieldPanel([
FieldPanel('project_promoter_title'),
FieldPanel('project_promoter_content'),
]),
]),
ImageChooserPanel('project_image')
],
),
MultiFieldPanel(
heading="Similar projects",
classname='collapsible',
children=[
HelpPanel('Section shows if there are opportunities with the same related sector. '
'They are chosen randomly. Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('similar_projects_cta_text'),
FieldPanel('similar_projects_cta_link'),
],
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('heading'),
FieldPanel('intro'),
FieldPanel('comment'),
FieldPanel('cta_text'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormSuccessPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('message_box_heading'),
FieldPanel('message_box_description'),
FieldPanel('what_happens_next_description')
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
| 37.841727
| 117
| 0.506369
| 20,654
| 0.981654
| 0
| 0
| 0
| 0
| 0
| 0
| 6,469
| 0.307462
|
c01b9112e0f0afc9d0edfc412d08f777b3d1b9d7
| 137
|
py
|
Python
|
_6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
_6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py
|
YordanPetrovDS/Python_Fundamentals
|
81163054cd3ac780697eaa43f099cc455f253a0c
|
[
"MIT"
] | null | null | null |
divisor = int(input())
bound = int(input())
for num in range(bound, 0, -1):
if num % divisor == 0:
print(num)
break
| 17.125
| 31
| 0.540146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c01c83fcecb0cd27f766c4572561b04ffed5866c
| 595
|
py
|
Python
|
Task1C.py
|
benkw26/IA-Flood-Warning-Project
|
ded20ebd52f4d328d7437682cffebf6843d26aa3
|
[
"MIT"
] | null | null | null |
Task1C.py
|
benkw26/IA-Flood-Warning-Project
|
ded20ebd52f4d328d7437682cffebf6843d26aa3
|
[
"MIT"
] | null | null | null |
Task1C.py
|
benkw26/IA-Flood-Warning-Project
|
ded20ebd52f4d328d7437682cffebf6843d26aa3
|
[
"MIT"
] | null | null | null |
from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Store the coordinates of Cambridge City Centre
CambCoord = (52.2053, 0.1218)
#store the radius value
radius = 10
near_cambstations = stations_within_radius(stations, CambCoord, radius)
print(sorted([station.name for station in near_cambstations]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
| 31.315789
| 75
| 0.719328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.314286
|
c01ee7097d54131caabca59efa7cd0ede8253fd5
| 2,072
|
py
|
Python
|
fixture/project.py
|
Sashatq/bugtrack
|
ce8bcac2b9041b5ea34de30a10a3431fc62ec21a
|
[
"Apache-2.0"
] | null | null | null |
fixture/project.py
|
Sashatq/bugtrack
|
ce8bcac2b9041b5ea34de30a10a3431fc62ec21a
|
[
"Apache-2.0"
] | null | null | null |
fixture/project.py
|
Sashatq/bugtrack
|
ce8bcac2b9041b5ea34de30a10a3431fc62ec21a
|
[
"Apache-2.0"
] | null | null | null |
from model.objects import Objects
import time
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cache = None
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.open_manage_project_page()
self.project_cache = []
for row in wd.find_elements_by_xpath("//div[@id='content']/div[2]/table/tbody/tr"):
cells = row.find_elements_by_tag_name("td")
pname = cells[0].text
description = cells[4].text
self.project_cache.append(Objects(pname=pname, description=description))
return list(self.project_cache)
def open_manage_project_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, objects):
wd = self.app.wd
self.open_manage_project_page()
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_form(objects)
wd.find_element_by_xpath("//input[@value='Add Project']").click()
time.sleep(4)
self.project_cache = None
def fill_form(self, objects):
self.change_field_value("name", objects.pname)
self.change_field_value("description", objects.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_project(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='sidebar']/ul/li[7]/a/i").click()
wd.find_element_by_link_text("Manage Projects").click()
wd.find_element_by_css_selector("td > a").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
| 37
| 95
| 0.639479
| 2,023
| 0.976351
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.139479
|
c0200d7fd7135bdc552ee8dcbd7eedcc4a90fd2d
| 4,597
|
py
|
Python
|
john_doe/cities/hungary.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/hungary.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/hungary.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
cities = [
'Budapest',
'Debrecen',
'Miskolc',
'Szeged',
'Pecs',
'Zuglo',
'Gyor',
'Nyiregyhaza',
'Kecskemet',
'Szekesfehervar',
'Szombathely',
'Jozsefvaros',
'Paradsasvar',
'Szolnok',
'Tatabanya',
'Kaposvar',
'Bekescsaba',
'Erd',
'Veszprem',
'Erzsebetvaros',
'Zalaegerszeg',
'Kispest',
'Sopron',
'Eger',
'Nagykanizsa',
'Dunaujvaros',
'Hodmezovasarhely',
'Salgotarjan',
'Cegled',
'Ozd',
'Baja',
'Vac',
'Szekszard',
'Papa',
'Gyongyos',
'Kazincbarcika',
'Godollo',
'Gyula',
'Hajduboszormeny',
'Kiskunfelegyhaza',
'Ajka',
'Oroshaza',
'Mosonmagyarovar',
'Dunakeszi',
'Kiskunhalas',
'Esztergom',
'Jaszbereny',
'Komlo',
'Nagykoros',
'Mako',
'Budaors',
'Szigetszentmiklos',
'Tata',
'Szentendre',
'Hajduszoboszlo',
'Siofok',
'Torokszentmiklos',
'Hatvan',
'Karcag',
'Gyal',
'Monor',
'Keszthely',
'Varpalota',
'Bekes',
'Dombovar',
'Paks',
'Oroszlany',
'Komarom',
'Vecses',
'Mezotur',
'Mateszalka',
'Mohacs',
'Csongrad',
'Kalocsa',
'Kisvarda',
'Szarvas',
'Satoraljaujhely',
'Hajdunanas',
'Balmazujvaros',
'Mezokovesd',
'Tapolca',
'Szazhalombatta',
'Balassagyarmat',
'Tiszaujvaros',
'Dunaharaszti',
'Fot',
'Dabas',
'Abony',
'Berettyoujfalu',
'Puspokladany',
'God',
'Sarvar',
'Gyomaendrod',
'Kiskoros',
'Pomaz',
'Mor',
'Sarospatak',
'Batonyterenye',
'Bonyhad',
'Gyomro',
'Tiszavasvari',
'Ujfeherto',
'Nyirbator',
'Sarbogard',
'Nagykata',
'Budakeszi',
'Pecel',
'Pilisvorosvar',
'Sajoszentpeter',
'Szigethalom',
'Balatonfured',
'Hajduhadhaz',
'Kisujszallas',
'Dorog',
'Kormend',
'Marcali',
'Barcs',
'Tolna',
'Tiszafured',
'Kiskunmajsa',
'Tiszafoldvar',
'Albertirsa',
'Nagyatad',
'Tiszakecske',
'Toeroekbalint',
'Koszeg',
'Celldomolk',
'Heves',
'Mezobereny',
'Szigetvar',
'Pilis',
'Veresegyhaz',
'Bicske',
'Edeleny',
'Lajosmizse',
'Kistarcsa',
'Hajdusamson',
'Csorna',
'Nagykallo',
'Isaszeg',
'Sarkad',
'Kapuvar',
'Ullo',
'Siklos',
'Toekoel',
'Maglod',
'Paszto',
'Szerencs',
'Turkeve',
'Szeghalom',
'Kerepes',
'Jaszapati',
'Janoshalma',
'Tamasi',
'Kunszentmarton',
'Hajdudorog',
'Vasarosnameny',
'Solymar',
'Rackeve',
'Derecske',
'Kecel',
'Nadudvar',
'Ocsa',
'Dunafoldvar',
'Fehergyarmat',
'Kiskunlachaza',
'Kunszentmiklos',
'Szentgotthard',
'Devavanya',
'Biatorbagy',
'Kunhegyes',
'Lenti',
'Ercsi',
'Balatonalmadi',
'Polgar',
'Tura',
'Suelysap',
'Fuzesabony',
'Jaszarokszallas',
'Gardony',
'Tarnok',
'Nyiradony',
'Zalaszentgrot',
'Sandorfalva',
'Soltvadkert',
'Nyergesujfalu',
'Bacsalmas',
'Csomor',
'Putnok',
'Veszto',
'Kistelek',
'Zirc',
'Halasztelek',
'Mindszent',
'Acs',
'Enying',
'Letavertes',
'Nyirtelek',
'Szentlorinc',
'Felsozsolca',
'Solt',
'Fegyvernek',
'Nagyecsed',
'Encs',
'Ibrany',
'Mezokovacshaza',
'Ujszasz',
'Bataszek',
'Balkany',
'Sumeg',
'Tapioszecso',
'Szabadszallas',
'Battonya',
'Polgardi',
'Mezocsat',
'Totkomlos',
'Piliscsaba',
'Szecseny',
'Fuzesgyarmat',
'Kaba',
'Pusztaszabolcs',
'Teglas',
'Mezohegyes',
'Jaszladany',
'Tapioszele',
'Aszod',
'Diosd',
'Taksony',
'Tiszalok',
'Izsak',
'Komadi',
'Lorinci',
'Alsozsolca',
'Kartal',
'Dunavarsany',
'Erdokertes',
'Janossomorja',
'Kerekegyhaza',
'Balatonboglar',
'Szikszo',
'Domsod',
'Nagyhalasz',
'Kisber',
'Kunmadaras',
'Berhida',
'Kondoros',
'Melykut',
'Jaszkiser',
'Csurgo',
'Csorvas',
'Nagyszenas',
'Ujkigyos',
'Tapioszentmarton',
'Tat',
'Egyek',
'Tiszaluc',
'Orbottyan',
'Rakoczifalva',
'Hosszupalyi',
'Paty',
'Elek',
'Vamospercs',
'Morahalom',
'Bugyi',
'Emod',
'Labatlan',
'Csakvar',
'Algyo',
'Kenderes',
'Csenger',
'Fonyod',
'Rakamaz',
'Martonvasar',
'Devecser',
'Orkeny',
'Tokaj',
'Tiszaalpar',
'Kemecse',
'Korosladany'
]
| 16.301418
| 24
| 0.513161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,911
| 0.633239
|
c02010efda9ce4c421135232fa4f140efb168b1f
| 969
|
py
|
Python
|
carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py
|
Carbon0-Games/carbon0-web-app
|
068a7223b2717d602944ec561adcde39930cba85
|
[
"MIT"
] | 2
|
2020-10-30T15:07:28.000Z
|
2020-12-22T04:29:50.000Z
|
carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py
|
Carbon0-Games/carbon0-web-app
|
068a7223b2717d602944ec561adcde39930cba85
|
[
"MIT"
] | 45
|
2020-09-22T12:47:55.000Z
|
2022-03-12T00:48:18.000Z
|
carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py
|
Carbon0-Games/carbon0-web-app
|
068a7223b2717d602944ec561adcde39930cba85
|
[
"MIT"
] | 1
|
2020-09-08T15:48:13.000Z
|
2020-09-08T15:48:13.000Z
|
# Generated by Django 3.1.1 on 2020-09-09 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0009_auto_20200908_2201"),
]
operations = [
migrations.RemoveField(
model_name="mission",
name="description",
),
migrations.RemoveField(
model_name="mission",
name="status",
),
migrations.AddField(
model_name="mission",
name="action",
field=models.CharField(
help_text="Describes what the user needs to do.",
max_length=500,
null=True,
),
),
migrations.AddField(
model_name="mission",
name="clicks_needed",
field=models.IntegerField(
default=1, help_text="Number of the links user needs to click."
),
),
]
| 25.5
| 79
| 0.522188
| 876
| 0.904025
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.252838
|
c02051ed0ef783ea63f4159e47ac37ce14107e5a
| 353
|
py
|
Python
|
wstack/cli/input.py
|
CCSGroupInternational/wstack
|
3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e
|
[
"Apache-2.0"
] | null | null | null |
wstack/cli/input.py
|
CCSGroupInternational/wstack
|
3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e
|
[
"Apache-2.0"
] | null | null | null |
wstack/cli/input.py
|
CCSGroupInternational/wstack
|
3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e
|
[
"Apache-2.0"
] | null | null | null |
import json
from ..webstack import run as webstack_run
def process(json_file_list):
for json_filename in json_file_list:
with open(json_filename) as json_file:
json_data = json.load(json_file)
webstack_data = json_data.get('webstack', None)
if webstack_data:
webstack_run(webstack_data)
| 29.416667
| 59
| 0.668555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.028329
|
c02268e8d6e0cd77362adcfa474291e89d12f983
| 4,396
|
py
|
Python
|
imgtags.py
|
Donearm/scripts
|
ad3429dc4b69e6108f538bf1656216c7a192c9fd
|
[
"OML"
] | 25
|
2015-02-23T00:07:14.000Z
|
2022-03-27T01:57:41.000Z
|
imgtags.py
|
Donearm/scripts
|
ad3429dc4b69e6108f538bf1656216c7a192c9fd
|
[
"OML"
] | null | null | null |
imgtags.py
|
Donearm/scripts
|
ad3429dc4b69e6108f538bf1656216c7a192c9fd
|
[
"OML"
] | 7
|
2015-11-25T22:04:37.000Z
|
2020-02-18T22:11:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2011-2019, Gianluca Fiore
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
###############################################################################
#
# Requirements: Python 3.7 or later, Py3exiv
#
__author__ = "Gianluca Fiore"
__license__ = "GPL"
__version__ = "0.2"
__date__ = "20190912"
__email__ = "forod.g@gmail.com"
__status__ = "beta"
import sys
import argparse
import os.path
import py3exiv
def argument_parser():
"""Argument parser"""
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("-f", "--force",
action="store_true",
help="force writing of tags regardless of them being already present",
dest="force")
cli_parser.add_argument("-i", "--image",
required=True,
action="store",
help="the image",
dest="image")
cli_parser.add_argument("-d", "--delete",
action="store_true",
help="delete all tags present in an image",
dest="delete")
cli_parser.add_argument(action="store",
nargs="*",
help="the tags to be written into the file",
dest="tags")
options = cli_parser.parse_args()
return options
def write_tags(image, key, tags):
"""Write each tags into the iptc key inside an image. Tags must be a list"""
image[key] = pyexiv2.IptcTag(key, tags)
image.write()
def delete_tags(metadata, key):
"""Delete any tags present inside an image"""
try:
metadata.__delitem__(key)
except KeyError:
print(("There's not a %s tag in this image, exiting..." % key))
return 1
def main ():
"""main loop"""
options = argument_parser()
image = os.path.abspath(options.image)
if os.path.isfile(image) and image.endswith(('jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG', 'tiff', 'TIFF')):
m = pyexiv2.ImageMetadata(image)
m.read()
iptckeys = m.iptc_keys
xmpkeys = m.xmp_keys
exifkeys = m.exif_keys
if options.delete:
# delete all tags
try:
k = m['Iptc.Application2.Keywords']
delete_tags(m, 'Iptc.Application2.Keywords')
print("Deleting tags")
m.write()
return 0
except KeyError:
# there are already no tags, skip...
print(("%s has no tags, nothing to delete" % options.image))
return 0
if not options.tags:
# without tags given perhaps the user wants just see the already
# presents tags (if any)
try:
k = m['Iptc.Application2.Keywords']
print(("%s is already tagged with %s " % (options.image, k.value)))
return 0
except:
print(("%s has no tags set" % options.image))
return 0
else:
try:
k = m['Iptc.Application2.Keywords']
if options.force:
# Force switch enabled, write tags without questions
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("There are already these tags present:\n")
for t in k.value:
print(t)
s = input("\nDo you want to overwrite them with %s ? [y/n] " % options.tags)
if s == 'y' or s == 'Y':
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("Exiting...")
sys.exit(0)
except KeyError:
# there is no previously set tag with this name, pyexiv2 throws KeyError
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("No image given")
if __name__ == '__main__':
status = main()
sys.exit(status)
| 33.30303
| 110
| 0.526615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,841
| 0.41879
|
c024a36ba5d1c4863b44768937e8d76c18a7d61a
| 4,344
|
py
|
Python
|
notes-to-self/trace.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | 4,681
|
2017-03-10T22:38:41.000Z
|
2022-03-31T11:47:44.000Z
|
notes-to-self/trace.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | 2,143
|
2017-03-11T05:58:32.000Z
|
2022-03-31T10:29:00.000Z
|
notes-to-self/trace.py
|
guilledk/trio
|
d09c21df3ffe401ee4314d869d82a886bd776e3c
|
[
"Apache-2.0",
"MIT"
] | 313
|
2017-03-11T05:24:33.000Z
|
2022-03-23T18:26:02.000Z
|
import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work – I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
class Trace(trio.abc.Instrument):
def __init__(self, out):
self.out = out
self.out.write("[\n")
self.ids = count()
self._task_metadata(-1, "I/O manager")
def _write(self, **ev):
ev.setdefault("pid", os.getpid())
if ev["ph"] != "M":
ev.setdefault("ts", trio.current_time() * 1e6)
self.out.write(json.dumps(ev))
self.out.write(",\n")
def _task_metadata(self, tid, name):
self._write(
name="thread_name",
ph="M",
tid=tid,
args={"name": name},
)
self._write(
name="thread_sort_index",
ph="M",
tid=tid,
args={"sort_index": tid},
)
def task_spawned(self, task):
self._task_metadata(task._counter, task.name)
self._write(
name="task lifetime",
ph="B",
tid=task._counter,
)
def task_exited(self, task):
self._write(
name="task lifetime",
ph="E",
tid=task._counter,
)
def before_task_step(self, task):
self._write(
name="running",
ph="B",
tid=task._counter,
)
def after_task_step(self, task):
self._write(
name="running",
ph="E",
tid=task._counter,
)
def task_scheduled(self, task):
try:
waker = trio.lowlevel.current_task()
except RuntimeError:
pass
else:
id = next(self.ids)
self._write(
ph="s",
cat="wakeup",
id=id,
tid=waker._counter,
)
self._write(
cat="wakeup",
ph="f",
id=id,
tid=task._counter,
)
def before_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="B",
tid=-1,
)
def after_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="E",
tid=-1,
)
async def child1():
print(" child1: started! sleeping now...")
await trio.sleep(1)
print(" child1: exiting!")
async def child2():
print(" child2: started! sleeping now...")
await trio.sleep(1)
print(" child2: exiting!")
async def parent():
print("parent: started!")
async with trio.open_nursery() as nursery:
print("parent: spawning child1...")
nursery.start_soon(child1)
print("parent: spawning child2...")
nursery.start_soon(child2)
print("parent: waiting for children to finish...")
# -- we exit the nursery block here --
print("parent: all done!")
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
| 29.154362
| 109
| 0.570672
| 2,170
| 0.49931
| 0
| 0
| 0
| 0
| 639
| 0.147032
| 1,876
| 0.431661
|
c024d083863172cd08c0e34544cc15c4d39eca0b
| 345
|
py
|
Python
|
common/data_refinery_common/models/__init__.py
|
dongbohu/ccdl_test
|
be50b7ca35fba28676b594ba3f003b0b581abcb7
|
[
"BSD-3-Clause"
] | null | null | null |
common/data_refinery_common/models/__init__.py
|
dongbohu/ccdl_test
|
be50b7ca35fba28676b594ba3f003b0b581abcb7
|
[
"BSD-3-Clause"
] | 3
|
2020-06-05T17:18:10.000Z
|
2021-06-10T20:55:12.000Z
|
common/data_refinery_common/models/__init__.py
|
dongbohu/ccdl_test
|
be50b7ca35fba28676b594ba3f003b0b581abcb7
|
[
"BSD-3-Clause"
] | null | null | null |
from data_refinery_common.models.surveys import SurveyJob, SurveyJobKeyValue
from data_refinery_common.models.batches import (
BatchStatuses,
Batch,
BatchKeyValue,
File
)
from data_refinery_common.models.jobs import (
WorkerJob,
DownloaderJob,
ProcessorJob
)
from data_refinery_common.models.organism import Organism
| 24.642857
| 76
| 0.791304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c0263e8b4a30de418c75ebe0717861acba376145
| 1,144
|
py
|
Python
|
src/client_sample.py
|
ryoutoku/gunicorn-soap
|
5b7c6bedb7fda1486eb4402114276bdc7fd0e77c
|
[
"MIT"
] | null | null | null |
src/client_sample.py
|
ryoutoku/gunicorn-soap
|
5b7c6bedb7fda1486eb4402114276bdc7fd0e77c
|
[
"MIT"
] | null | null | null |
src/client_sample.py
|
ryoutoku/gunicorn-soap
|
5b7c6bedb7fda1486eb4402114276bdc7fd0e77c
|
[
"MIT"
] | null | null | null |
from zeep import Client
from models import RequestParameter
class Caller:
def __init__(self):
wsdl_url = "http://0.0.0.0:8080/?wsdl"
self._name = "dummy_name"
self._times = 3
self._client = Client(wsdl_url)
def call_say_hello_1(self):
result = self._client.service.say_hello_1(
self._name,
self._times)
print(result)
def call_say_hello_2(self):
result = self._client.service.say_hello_2(
{
"name": self._name,
"times": self._times
}
)
print(result)
def call_say_hello_3(self):
param = RequestParameter(
name=self._name,
times=self._times
)
result = self._client.service.say_hello_3(param.as_dict())
print(result)
print(type(result))
def main():
caller = Caller()
caller.call_say_hello_1()
print("=====================")
caller.call_say_hello_2()
print("=====================")
caller.call_say_hello_3()
print("=====================")
if __name__ == '__main__':
main()
| 22
| 66
| 0.532343
| 808
| 0.706294
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.11451
|
c028333a0436a3c88c477f3244b6bd0fca21d64d
| 1,600
|
py
|
Python
|
rest_api/views.py
|
vikash98k/django-rest-api
|
51c83a5d9c65f03b4b790ac965cd2222c6326752
|
[
"MIT"
] | 1
|
2021-11-15T03:29:24.000Z
|
2021-11-15T03:29:24.000Z
|
rest_api/views.py
|
vikash98k/django-rest-api
|
51c83a5d9c65f03b4b790ac965cd2222c6326752
|
[
"MIT"
] | null | null | null |
rest_api/views.py
|
vikash98k/django-rest-api
|
51c83a5d9c65f03b4b790ac965cd2222c6326752
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from .permissions import IsOwner
from .serializers import BucketlistSerializer, UserSerializer
from .models import Bucketlist
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
class CreateView(generics.ListCreateAPIView):
"""This class handles the GET and POSt requests of our rest api."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save(owner=self.request.user)
class DetailsView(generics.RetrieveUpdateDestroyAPIView):
"""This class handles GET, PUT, PATCH and DELETE requests."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
class UserView(generics.ListAPIView):
"""View to list the user queryset."""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
class UserDetailsView(generics.RetrieveAPIView):
"""View to retrieve a user instance."""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
| 39.02439
| 71
| 0.775625
| 1,267
| 0.791875
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.1625
|
c028ccbb3fd75e9d2a67753c98bec97a1ad49fb6
| 7,113
|
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateIPSecTunnelBgpSessionDetails(object):
"""
CreateIPSecTunnelBgpSessionDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateIPSecTunnelBgpSessionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param oracle_interface_ip:
The value to assign to the oracle_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type oracle_interface_ip: str
:param customer_interface_ip:
The value to assign to the customer_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_interface_ip: str
:param customer_bgp_asn:
The value to assign to the customer_bgp_asn property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_bgp_asn: str
"""
self.swagger_types = {
'oracle_interface_ip': 'str',
'customer_interface_ip': 'str',
'customer_bgp_asn': 'str'
}
self.attribute_map = {
'oracle_interface_ip': 'oracleInterfaceIp',
'customer_interface_ip': 'customerInterfaceIp',
'customer_bgp_asn': 'customerBgpAsn'
}
self._oracle_interface_ip = None
self._customer_interface_ip = None
self._customer_bgp_asn = None
@property
def oracle_interface_ip(self):
"""
Gets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:return: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._oracle_interface_ip
@oracle_interface_ip.setter
def oracle_interface_ip(self, oracle_interface_ip):
"""
Sets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:param oracle_interface_ip: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._oracle_interface_ip = oracle_interface_ip
@property
def customer_interface_ip(self):
"""
Gets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:return: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_interface_ip
@customer_interface_ip.setter
def customer_interface_ip(self, customer_interface_ip):
"""
Sets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:param customer_interface_ip: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_interface_ip = customer_interface_ip
@property
def customer_bgp_asn(self):
"""
Gets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:return: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_bgp_asn
@customer_bgp_asn.setter
def customer_bgp_asn(self, customer_bgp_asn):
"""
Sets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:param customer_bgp_asn: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_bgp_asn = customer_bgp_asn
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.240838
| 245
| 0.679882
| 6,573
| 0.924083
| 0
| 0
| 6,603
| 0.9283
| 0
| 0
| 5,513
| 0.77506
|
c029732da347682368446ad63ee588078d4cc569
| 752
|
py
|
Python
|
server_django/prikmeter/views.py
|
ttencate/smartmetertap
|
c768a5818766f897cb5dcd223286b173b31a3a65
|
[
"BSD-3-Clause"
] | 1
|
2017-10-26T05:28:08.000Z
|
2017-10-26T05:28:08.000Z
|
server_django/prikmeter/views.py
|
ttencate/smartmetertap
|
c768a5818766f897cb5dcd223286b173b31a3a65
|
[
"BSD-3-Clause"
] | 9
|
2017-10-16T07:15:51.000Z
|
2021-09-11T07:39:40.000Z
|
server_django/prikmeter/views.py
|
ttencate/smartmetertap
|
c768a5818766f897cb5dcd223286b173b31a3a65
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import auth, messages
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST, require_safe
@require_safe
def index(request):
context = {}
return render(request, 'prikmeter/index.html', context)
@require_POST
def login(request):
email = request.POST['email']
password = request.POST['password']
user = auth.authenticate(request, email=email, password=password)
if user:
auth.login(request, user)
else:
messages.error(request, 'Invalid username or password.')
return redirect(request.POST['next'] or 'prikmeter:index')
@require_POST
def logout(request):
auth.logout()
return redirect(request.POST['next'] or 'prikmeter:index')
| 26.857143
| 69
| 0.720745
| 0
| 0
| 0
| 0
| 587
| 0.780585
| 0
| 0
| 116
| 0.154255
|
c02a0889807a2eb0056cc9fc59fcd71cd6dcb6b8
| 146
|
py
|
Python
|
scenarios/order_show/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
scenarios/order_show/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
scenarios/order_show/executable.py
|
trenton42/txbalanced
|
9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14
|
[
"MIT"
] | null | null | null |
import balanced
balanced.configure('ak-test-1o9QKwUCrwstHWO5sGxICtIJdQXFTjnrV')
order = balanced.Order.fetch('/orders/OR7qAh5x1cFzX0U9hD628LPa')
| 29.2
| 64
| 0.842466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.527397
|
c02a0ebb0ba3ca2e51b79708e03d7785c34ec44d
| 490
|
py
|
Python
|
python/array/leetcode/move_zero.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | 153
|
2020-09-24T12:46:51.000Z
|
2022-03-31T21:30:44.000Z
|
python/array/leetcode/move_zero.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | null | null | null |
python/array/leetcode/move_zero.py
|
googege/algo-learn
|
054d05e8037005c5810906d837de889108dad107
|
[
"MIT"
] | 35
|
2020-12-22T11:07:06.000Z
|
2022-03-09T03:25:08.000Z
|
from typing import List
# 移动零
class Solution:
# 新开一个数组
def moveZeroes1(self, nums: List[int]) -> None:
temp, k = [0] * len(nums), 0
for n in nums:
if n != 0:
temp[k] = n
k += 1
nums[:] = temp[:]
# 双指针解法
def moveZeroes2(self, nums: List[int]) -> None:
k = 0
for i, v in enumerate(nums):
if v != 0:
nums[i], nums[k] = nums[k], nums[i]
k += 1
| 19.6
| 51
| 0.418367
| 477
| 0.920849
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.092664
|
c02a3ab8ab4ad9227b45abfaa1e75b75d929e0e8
| 6,368
|
py
|
Python
|
make_tfrecords.py
|
ssarfjoo/improvedsegan
|
df74761ed6404189ba26ccef40c38dddec334684
|
[
"MIT"
] | 36
|
2017-10-26T04:15:48.000Z
|
2021-08-10T02:10:18.000Z
|
make_tfrecords.py
|
ssarfjoo/improvedsegan
|
df74761ed6404189ba26ccef40c38dddec334684
|
[
"MIT"
] | 4
|
2020-01-28T21:34:47.000Z
|
2022-02-09T23:26:49.000Z
|
make_tfrecords.py
|
ssarfjoo/improvedsegan
|
df74761ed6404189ba26ccef40c38dddec334684
|
[
"MIT"
] | 5
|
2018-09-03T11:50:41.000Z
|
2021-12-25T08:58:45.000Z
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = dset_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
noisy_dir = dset_desc['noisy']
baseline_dir = None
if 'baseline' in dset_desc.keys():
baseline_dir = dset_desc['baseline']
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14, baseline_dir)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
| 43.319728
| 89
| 0.617619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,194
| 0.1875
|
c02a8e075173d16d5fa606436bcb12e60ab69d67
| 1,684
|
py
|
Python
|
software.py
|
schamberg97/bullshitMgimoProj
|
1577e7a2b0256b98f259a2cc6667194abd689ec8
|
[
"Unlicense"
] | null | null | null |
software.py
|
schamberg97/bullshitMgimoProj
|
1577e7a2b0256b98f259a2cc6667194abd689ec8
|
[
"Unlicense"
] | null | null | null |
software.py
|
schamberg97/bullshitMgimoProj
|
1577e7a2b0256b98f259a2cc6667194abd689ec8
|
[
"Unlicense"
] | null | null | null |
from myShop import MyShop
from myBot import MYBOT
from sMenu import Menu
class software:
@staticmethod
def runShowTables():
#подключаем библиотеку
import xml.dom.minidom as minidom
from sMenu import Menu
#читаем XML из файла
dom = minidom.parse("myShop.xml")
dom.normalize()
#Читаем таблицу
def listTable(what,whatSub):
pars=dom.getElementsByTagName(what)[0]
#Читаем элементы таблицы Materials
nodes=pars.getElementsByTagName(whatSub)
#Выводим элементы таблицы на экран
for node in nodes:
id = node.getElementsByTagName("id")[0]
name = node.getElementsByTagName("name")[0]
print(id.firstChild.data, name.firstChild.data)
menu_items=["Категории", "Цвета", "Адреса", "Материал", "Сезон", "Товар"]
menu_actions=['categories','colors', 'cities', 'materials', 'seasons', 'products'] # Базу клиентов и заказов не предлагаем ;)
menu_actions_nodes=['category','color', 'city', 'material', 'season', 'product']
menu_title="Смотреть таблицу"
my_menu=Menu(menu_title, menu_items)
choice=my_menu.get_user_choice()
listTable(menu_actions[choice-1], menu_actions_nodes[choice-1])
@staticmethod
def run():
#Создаем магазин товаров
myShop=MyShop("myShop.xml")
#myShop.printProduct()
#Добавляем тестовые данные
myShop.addSampleData(30, 30, 500)
#myShop.printProduct()
myShop.saveXML("new.xml")
#Создаем бота
bot=MYBOT(myShop)
#обучаем бота
bot.botTraining(0)
#получаем данные от пользователя
print('Для выхода - нажмите Ctrl-C')
sd=bot.getUserChoice()
#строим рекомендацию и выводим рекомендованный товар
print("Ваш рекомендованный товар: ",bot.getPrecigion(sd))
| 29.034483
| 127
| 0.71734
| 1,970
| 0.963796
| 0
| 0
| 1,947
| 0.952544
| 0
| 0
| 1,008
| 0.493151
|
c02b02d3a9106d7127a9a094f2f01f8ba90e6fb6
| 22,963
|
py
|
Python
|
app/src/main/Python/Translate.py
|
tangcan1600/XuMiJie
|
2e47d519c1c62ec3eabb576d80f783dd62052f44
|
[
"MIT"
] | null | null | null |
app/src/main/Python/Translate.py
|
tangcan1600/XuMiJie
|
2e47d519c1c62ec3eabb576d80f783dd62052f44
|
[
"MIT"
] | null | null | null |
app/src/main/Python/Translate.py
|
tangcan1600/XuMiJie
|
2e47d519c1c62ec3eabb576d80f783dd62052f44
|
[
"MIT"
] | null | null | null |
import time, sys, os, hashlib, json, re
import requests, random, js2py
import urllib.request
import urllib.parse
# 检验是否含有中文字符
def language(strs):
for char in strs:
if (u'\u4e00' <= char and char <= u'\u9fff'):
return 'zh', 'en' # 含有中文
return 'en', 'zh' # 不含有中文
class Baidu():
def __init__(self):
self.url = 'https://fanyi.baidu.com/v2transapi?from=zh&to=en'
self.header = {
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'origin': 'https://fanyi.baidu.com',
'referer': 'https://fanyi.baidu.com/?aldtype=16047',
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
'x-requested-with': 'XMLHttpRequest',
'cookie': 'BIDUPSID=D3290C65C03AEF0E98D97B8641DFFB15; PSTM=1570785944; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; BAIDUID=0CC6F13854E81A68D3C564D36E7C8A03:FG=1; APPGUIDE_8_2_2=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDSFRCVID=wt_OJeC626EDLgju-c_JbHce7gSxbKcTH6aoxbIy4_AgXmAxrp74EG0PJf8g0Ku-dWitogKKBmOTHg-F_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=JJkO_D_atKvjDbTnMITHh-F-5fIX5-RLf5TuLPOF5lOTJh0RbtOkjnQD-UL82bT2fRcQ0tJLb4DaStJbLjbke6cbDa_fJ5Fs-I5O0R4854QqqR5R5bOq-PvHhxoJqbbJX2OZ0l8KtDQpshRTMR_V-p4p-472K6bML5baabOmWIQHDPnPyJuMBU_sWMcChnjjJbn4KKJxWJLWeIJo5Dcf3PF3hUJiBMjLBan7056IXKohJh7FM4tW3J0ZyxomtfQxtNRJ0DnjtpChbRO4-TF-D5jXeMK; delPer=0; PSINO=2; H_PS_PSSID=1435_21104_18560_26350; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1580216234,1580216243,1580458514,1580458537; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1580458539; __yjsv5_shitong=1.0_7_ed303110bee0e644d4985049ba8a5cd1f28d_300_1580458537306_120.10.109.208_66a3b40c; yjs_js_security_passport=630340c0505f771135167fa6df3e5215699dcf0b_1580458538_js; to_lang_often=%5B%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D; from_lang_often=%5B%7B%22value%22%3A%22vie%22%2C%22text%22%3A%22%u8D8A%u5357%u8BED%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D'
}
self.data = None
def get_sign_ctx(self):
ctx = execjs.compile(
r"""
function n(r, o) {
for (var t = 0; t < o.length - 2; t += 3) {
var a = o.charAt(t + 2);
a = a >= "a" ? a.charCodeAt(0) - 87 : Number(a),
a = "+" === o.charAt(t + 1) ? r >>> a : r << a,
r = "+" === o.charAt(t) ? r + a & 4294967295 : r ^ a
}
return r
}
function e(r) {
var o = r.match(/[\uD800-\uDBFF][\uDC00-\uDFFF]/g);
if (null === o) {
var t = r.length;
t > 30 && (r = "" + r.substr(0, 10) + r.substr(Math.floor(t / 2) - 5, 10) + r.substr(-10, 10))
} else {
for (var e = r.split(/[\uD800-\uDBFF][\uDC00-\uDFFF]/), C = 0, h = e.length, f = []; h > C; C++)
"" !== e[C] && f.push.apply(f, a(e[C].split(""))),
C !== h - 1 && f.push(o[C]);
var g = f.length;
g > 30 && (r = f.slice(0, 10).join("") + f.slice(Math.floor(g / 2) - 5, Math.floor(g / 2) + 5).join("") + f.slice(-10).join(""))
}
var u = void 0
, l = "" + String.fromCharCode(103) + String.fromCharCode(116) + String.fromCharCode(107);
u =' """ + str(self.get_gtk()) + r""" ';
for (var d = u.split("."), m = Number(d[0]) || 0, s = Number(d[1]) || 0, S = [], c = 0, v = 0; v < r.length; v++) {
var A = r.charCodeAt(v);
128 > A ? S[c++] = A : (2048 > A ? S[c++] = A >> 6 | 192 : (55296 === (64512 & A) && v + 1 < r.length && 56320 === (64512 & r.charCodeAt(v + 1)) ? (A = 65536 + ((1023 & A) << 10) + (1023 & r.charCodeAt(++v)),
S[c++] = A >> 18 | 240,
S[c++] = A >> 12 & 63 | 128) : S[c++] = A >> 12 | 224,
S[c++] = A >> 6 & 63 | 128),
S[c++] = 63 & A | 128)
}
for (var p = m, F = "" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(97) + ("" + String.fromCharCode(94) + String.fromCharCode(43) + String.fromCharCode(54)), D = "" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(51) + ("" + String.fromCharCode(94) + String.fromCharCode(43) + String.fromCharCode(98)) + ("" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(102)), b = 0; b < S.length; b++)
p += S[b],
p = n(p, F);
return p = n(p, D),
p ^= s,
0 > p && (p = (2147483647 & p) + 2147483648),
p %= 1e6,
p.toString() + "." + (p ^ m)
}
"""
)
return ctx
def get_sign(self, text):
ctx = self.get_sign_ctx()
sign = ctx.call("e", text)
# print(sign)
return sign
def get_token(self):
s = requests.session()
url = 'https://fanyi.baidu.com/'
html = requests.get(url, headers=self.header)
html = html.text
# print(html)
raw_tk_str = str(re.search('token:.*,', html))
token = raw_tk_str.split('\'')[1]
# print(token)
return token
def get_cookie(self):
import urllib.request
import http.cookiejar
cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('https://fanyi.baidu.com/?aldtype=16047#zh/en/aa%E9%80%9F%E5%BA%A6')
# print(response)
for item in cookie:
print('%s = %s' % (item.name, item.value))
def get_gtk(self):
url = 'https://fanyi.baidu.com/'
html = requests.get(url)
html = html.text
raw_gtk_str = str(re.search('window.gtk = .*;', html))
gtk = raw_gtk_str.split('\'')[1]
# print('gtk '+gtk)
return gtk
def get_data(self, text, from_lan, to_lan):
data = {}
data['from'] = from_lan
data['to'] = to_lan
data['query'] = text
data['simple_means_flag'] = 3
data['transtype'] = 'realtime'
data['sign'] = self.get_sign(text)
data['token'] = self.get_token()
return data
def translate(self, text, from_lan, to_lan):
try:
self.data = self.get_data(text, from_lan, to_lan)
response = requests.post(self.url, headers=self.header, data=self.data)
# print('百度翻译结果为:',response.json()['trans_result']['data'][0]['dst'])
return response.json()['trans_result']['data'][0]['dst']
except:
return '程序出现了一点小问题,无法翻译'
class Bing():
def __init__(self):
self.url = "http://api.microsofttranslator.com/v2/ajax.svc/TranslateArray2?"
def translate(self, content, from_lan, to_lan):
try:
data = {}
data['from'] = '"' + from_lan + '"'
data['to'] = '"' + to_lan + '"'
data['texts'] = '["'
data['texts'] += content
data['texts'] += '"]'
data['options'] = "{}"
data['oncomplete'] = 'onComplete_3'
data['onerror'] = 'onError_3'
data['_'] = '1430745999189'
data = urllib.parse.urlencode(data).encode('utf-8')
strUrl = self.url + data.decode() + "&appId=%223DAEE5B978BA031557E739EE1E2A68CB1FAD5909%22"
response = urllib.request.urlopen(strUrl)
str_data = response.read().decode('utf-8')
# print(str_data)
tmp, str_data = str_data.split('"TranslatedText":')
translate_data = str_data[1:str_data.find('"', 1)]
# print('必应翻译结果为:',translate_data)
return translate_data
except:
return '程序出现了一点小问题,无法翻译'
class Ciba():
def __init__(self, word, lan, tolan):
self.word = word
self.url = 'http://fy.iciba.com/ajax.php?a=fy'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
# 构造post请求的参数
self.post_data = {
'f': lan,
't': tolan,
'w': self.word
}
# 发送请求
def request_post(self):
res = requests.post(url=self.url, headers=self.headers, data=self.post_data)
# print(res.content.decode())
return res.content.decode()
# 解析数据
@staticmethod
def parse_data(data):
dict_data = json.loads(data)
if 'out' in dict_data['content']:
return dict_data['content']['out']
elif 'word_mean' in dict_data['content']:
return dict_data['content']['word_mean']
def translate(self):
data = self.request_post()
try:
# print('词霸翻译结果为:',self.parse_data(data))
return self.parse_data(data)
except:
return '程序出现了一点小问题,无法翻译'
class Youdao():
def translate(self, content, lan, tolan):
try:
# 解决反爬机制
u = 'fanyideskweb'
d = content
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
f = str(int(time.time() * 1000) + random.randint(1, 10)) # 时间戳
c = 'rY0D^0\'nM0}g5Mm1z%1G4'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest() # md5加密,生成一个随机数
head = {}
head[
'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
data = {}
# data 有道翻译数据表单
data['i'] = content
data['from'] = lan # 'AUTO'
data['to'] = tolan # 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = f # salt 与 sign 反爬机制 ,每次都会变化 salt就是时间戳
data['sign'] = sign # 使用的是u + d + f + c的md5的值。
data['ts'] = '1551506287219'
data['bv'] = '97ba7c7fb78632ae9b11dcf6be726aee'
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_REALTIME'
data['typoResult'] = 'False'
data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(url=url, data=data, headers=head, method='POST')
response = urllib.request.urlopen(request)
line = json.load(response) # 将得到的字符串转换成json格式
text = ''
for x in line['translateResult']:
text += x[0]['tgt']
# print('有道翻译结果为:',text)
return text
except:
return '程序出现了一点小问题,无法翻译'
class Youdao1():
def get_data(self, e, lan, tolan):
'''
构建data数据函数
:param e: 输入要翻译的内容
:return: 字典类型的data数据
'''
sjc = time.time()
ts = str(int(sjc * 1000))
salt = ts + str(int(random.random() * 10))
con = "fanyideskweb" + e + salt + "97_3(jkMYg@T[KZQmqjTK"
sign = hashlib.md5(con.encode(encoding='UTF-8')).hexdigest()
# 'from': 'AUTO',
# 'to': 'AUTO',
data = {
'i': e,
'from': lan,
'to': tolan,
'smartresult': 'dict',
'client': 'fanyideskweb',
'salt': salt,
'sign': sign,
'ts': ts,
'bv': '97ba7c7fb78632ae9b11dcf6be726aee',
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_REALTlME',
'typoResult': 'False'
}
return data
def get_para(self, e, lan, tolan):
'''
获取需要的参数
:param e: 输入字符串
:return:
'''
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/\
537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Cookie': 'OUTFOX_SEARCH_USER_ID=-1154806696@10.168.8.76; \
OUTFOX_SEARCH_USER_ID_NCOO=1227534676.2988937; \
JSESSIONID=aaa7LDLdy4Wbh9ECJb_Vw; ___rl__test__cookies=1563334957868',
'Referer': 'http://fanyi.youdao.com/'
}
return self.get_data(e, lan, tolan), header
def search(self, res):
'''
用于匹配响应的结果
:param res:
:return:
'''
import re
model = '"tgt":"(.*?)"'
rep = re.findall(model, res, re.S)
rep = rep[0]
return rep
def translate(self, content, lan, tolan):
try:
url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
data = self.get_para(content, lan, tolan)[0]
header = self.get_para(content, lan, tolan)[1]
response = requests.post(url, data=data, headers=header).text
result = self.search(response)
return result
except:
return '程序出现了一点小问题,无法翻译'
class Google():
def translate(self, word, from_lan, to_lan):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
}
url = 'https://translate.google.cn/translate_a/single?client=t&sl=auto&tl={}&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&tk={}&q={}'
if len(word) > 4891:
raise RuntimeError('The length of word should be less than 4891...')
target_language = to_lan
res = requests.get(url.format(target_language, self.getTk(word), word), headers=headers)
# print(res.json()[0][0][0])
return res.json()[0][0][0]
def getTk(self, word):
evaljs = js2py.EvalJs()
js_code = self.gg_js_code
evaljs.execute(js_code)
tk = evaljs.TL(word)
return tk
def isChinese(self, word):
for w in word:
if '\u4e00' <= w <= '\u9fa5':
return True
return False
gg_js_code = '''
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
'''
class Tencent():
def __init__(self):
self.api_url = 'https://fanyi.qq.com/api/translate'
self.headers = {
'Cookie': 'fy_guid=605ead81-f210-47eb-bd80-ac6ae5e7a2d8; '
'qtv=ed286a053ae88763; '
'qtk=wfMmjh3k/7Sr2xVNg/LtITgPRlnvGWBzP9a4FN0dn9PE7L5jDYiYJnW03MJLRUGHEFNCRhTfrp/V+wUj0dun1KkKNUUmS86A/wGVf6ydzhwboelTOs0hfHuF0ndtSoX+N3486tUMlm62VU4i856mqw==; ',
'Host': 'fanyi.qq.com',
'Origin': 'https://fanyi.qq.com',
'Referer': 'https://fanyi.qq.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/73.0.3683.86 Safari/537.36', }
self.fromlang = 'auto'
self.text = ''
self.tolang = 'en' # 设置默认为英语
self.sessionUuid = str(int(time.time() * 1000))
self.fy_guid, self.qtv, self.qtk = self.get_qtv_qtk()
self.headers['Cookie'] = self.headers['Cookie'].replace(
'605ead81-f210-47eb-bd80-ac6ae5e7a2d8', self.fy_guid)
self.headers['Cookie'] = self.headers['Cookie'].replace(
'ed286a053ae88763', self.qtv)
self.headers['Cookie'] = self.headers['Cookie'].replace(
'wfMmjh3k/7Sr2xVNg/LtITgPRlnvGWBzP9a4FN0dn9PE7L5jDYiYJnW03MJLRUGHEFNCRhTfrp/V+wUj0dun1KkKNUUmS86A/wGVf6ydzhwboelTOs0hfHuF0ndtSoX+N3486tUMlm62VU4i856mqw==',
self.qtk)
def get_filter(self, text):
if isinstance(text, list):
text = ''.join(text)
text = str(text)
text = text.strip()
filter_list = [
'\r', '\n', '\t', '\u3000', '\xa0', '\u2002',
'<br>', '<br/>', ' ', ' ', ' ', '>>', '"',
'展开全部', ' '
]
for fl in filter_list:
text = text.replace(fl, '')
return text
def get_qtv_qtk(self):
api_url = 'https://fanyi.qq.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/73.0.3683.86 Safari/537.36', }
res = requests.get(api_url, headers=headers)
data = res.text
fy_guid = res.cookies.get('fy_guid')
reg = re.compile(r'var qtv = "(.*?)"')
qtv = reg.search(data).group(1)
reg = re.compile(r'var qtk = "(.*?)"')
qtk = reg.search(data).group(1)
return fy_guid, qtv, qtk
def getHtml(self, url, headers, data):
try:
html = requests.post(url=url, data=data, headers=headers)
# print(html.text)
datas = html.json()['translate']['records']
if html != None and datas != None:
# 以文本的形式打印结果
trans_result = ''.join([data['targetText'] for data in datas])
return trans_result
except Exception:
return None
def translate(self, text):
data = {
'source': self.fromlang,
'target': self.tolang,
'sourceText': text,
'qtv': self.qtv,
'qtk': self.qtk,
'sessionUuid': self.sessionUuid, }
try:
result = self.getHtml(self.api_url, self.headers, data)
# print('腾讯翻译结果为:',result)
return result
except:
return '程序出现了一点小问题,无法翻译'
class SanLiuLing():
def translate(self, content, lan, tolan):
try:
eng = "0";
if lan == 'en' and tolan == 'zh':
eng = "0"
elif lan == 'zh' and tolan == 'en':
eng = "1"
else:
return;
url = 'https://fanyi.so.com/index/search'
query_string = {"eng": eng, "validate": "", "ignore_trans": "0", "query": content}
headers = {
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}
response = requests.post(url=url, data=query_string, headers=headers)
response.encoding = 'utf-8'
dict_ret = json.loads(response.text)
print(dict_ret['data']['fanyi'])
return dict_ret['data']['fanyi']
except:
return '程序出现了一点小问题,无法翻译'
languageMapCode = {
'检测语言': 'auto',
'阿尔巴尼亚语': 'sq',
'阿拉伯语': 'ar',
'阿姆哈拉语': 'am',
'阿塞拜疆语': 'az',
'爱尔兰语': 'ga',
'爱沙尼亚语': 'et',
'巴斯克语': 'eu',
'白俄罗斯语': 'be',
'保加利亚语': 'bg',
'冰岛语': 'is',
'波兰语': 'pl',
'波斯尼亚语': 'bs',
'波斯语': 'fa',
'布尔语(南非荷兰语)': 'af',
'丹麦语': 'da',
'德语': 'de',
'俄语': 'ru',
'法语': 'fr',
'菲律宾语': 'tl',
'芬兰语': 'fi',
'弗里西语': 'fy',
'高棉语': 'km',
'格鲁吉亚语': 'ka',
'古吉拉特语': 'gu',
'哈萨克语': 'kk',
'海地克里奥尔语': 'ht',
'韩语': 'ko',
'豪萨语': 'ha',
'荷兰语': 'nl',
'吉尔吉斯语': 'ky',
'加利西亚语': 'gl',
'加泰罗尼亚语': 'ca',
'捷克语': 'cs',
'卡纳达语': 'kn',
'科西嘉语': 'co',
'克罗地亚语': 'hr',
'库尔德语': 'ku',
'拉丁语': 'la',
'拉脱维亚语': 'lv',
'老挝语': 'lo',
'立陶宛语': 'lt',
'卢森堡语': 'lb',
'罗马尼亚语': 'ro',
'马尔加什语': 'mg',
'马耳他语': 'mt',
'马拉地语': 'mr',
'马拉雅拉姆语': 'ml',
'马来语': 'ms',
'马其顿语': 'mk',
'毛利语': 'mi',
'蒙古语': 'mn',
'孟加拉语': 'bn',
'缅甸语': 'my',
'苗语': 'hmn',
'南非科萨语': 'xh',
'南非祖鲁语': 'zu',
'尼泊尔语': 'ne',
'挪威语': 'no',
'旁遮普语': 'pa',
'葡萄牙语': 'pt',
'普什图语': 'ps',
'齐切瓦语': 'ny',
'日语': 'ja',
'瑞典语': 'sv',
'萨摩亚语': 'sm',
'塞尔维亚语': 'sr',
'塞索托语': 'st',
'僧伽罗语': 'si',
'世界语': 'eo',
'斯洛伐克语': 'sk',
'斯洛文尼亚语': 'sl',
'斯瓦希里语': 'sw',
'苏格兰盖尔语': 'gd',
'宿务语': 'ceb',
'索马里语': 'so',
'塔吉克语': 'tg',
'泰卢固语': 'te',
'泰米尔语': 'ta',
'泰语': 'th',
'土耳其语': 'tr',
'威尔士语': 'cy',
'乌尔都语': 'ur',
'乌克兰语': 'uk',
'乌兹别克语': 'uz',
'西班牙语': 'es',
'希伯来语': 'iw',
'希腊语': 'el',
'夏威夷语': 'haw',
'信德语': 'sd',
'匈牙利语': 'hu',
'修纳语': 'sn',
'亚美尼亚语': 'hy',
'伊博语': 'ig',
'意大利语': 'it',
'意第绪语': 'yi',
'印地语': 'hi',
'印尼巽他语': 'su',
'印尼语': 'id',
'印尼爪哇语': 'jw',
'英语': 'en',
'约鲁巴语': 'yo',
'越南语': 'vi',
'中文': 'zh-CN',
'中文(繁体)': 'zh-TW'
}
""" 获取语言代码 """
def get_language_code(language):
if language in languageMapCode:
return languageMapCode[language]
return ''
def translate(api, content):
lan, tolan = language(content)
if not content:
return
results = "kong"
if api == 'baidu':
results = Baidu().translate(content, lan, tolan)
elif api == 'youdao':
results = Youdao1().translate(content, lan, tolan)
elif api == 'google':
results = Google().translate(content, lan, tolan)
elif api == 'Ciba':
ciba = Ciba(content, lan, tolan)
results = ciba.translate()
elif api == 'bing':
results = Bing().translate(content, lan, tolan)
elif api == 'tencent':
results = Tencent().translate(content)
elif api == '360':
results = SanLiuLing().translate(content, lan, tolan)
return results
| 36.276461
| 1,459
| 0.512041
| 20,415
| 0.836372
| 0
| 0
| 268
| 0.01098
| 0
| 0
| 13,104
| 0.536851
|
c02c417a57c64011bc9f7af79d7ad7b2fc564c8d
| 132
|
py
|
Python
|
examples/basic.py
|
EmbarkStudios/Python-xNormal
|
a4f005220d31d1e9085a7cbcc1ef46e70cff2753
|
[
"BSD-2-Clause"
] | 52
|
2015-04-26T19:46:37.000Z
|
2021-12-23T01:45:16.000Z
|
examples/basic.py
|
EmbarkStudios/Python-xNormal
|
a4f005220d31d1e9085a7cbcc1ef46e70cff2753
|
[
"BSD-2-Clause"
] | 3
|
2015-10-22T08:07:38.000Z
|
2019-08-02T18:13:59.000Z
|
examples/basic.py
|
EmbarkStudios/Python-xNormal
|
a4f005220d31d1e9085a7cbcc1ef46e70cff2753
|
[
"BSD-2-Clause"
] | 10
|
2016-08-24T14:02:07.000Z
|
2021-11-10T02:40:47.000Z
|
import xNormal
xNormal.run("piano_high.obj", "piano_low.obj", "piano.png", width=256, height=256, gen_normals = True, gen_ao = True)
| 66
| 117
| 0.742424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.318182
|
c02c5be917c2e7c70614350c8ed104d79b0759b4
| 1,266
|
py
|
Python
|
jina/executors/evaluators/rank/recall.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
jina/executors/evaluators/rank/recall.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
jina/executors/evaluators/rank/recall.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
from typing import Sequence, Any, Optional
from . import BaseRankingEvaluator
class RecallEvaluator(BaseRankingEvaluator):
"""A :class:`RecallEvaluator` evaluates the Precision of the search.
It computes how many of the first given `eval_at` groundtruth are found in the matches
"""
metric = 'Recall@N'
def __init__(self,
eval_at: Optional[int] = None,
*args, **kwargs):
""""
:param eval_at: the point at which evaluation is computed, if None give, will consider all the input to evaluate
"""
super().__init__(*args, **kwargs)
self.eval_at = eval_at
def evaluate(self, actual: Sequence[Any], desired: Sequence[Any], *args, **kwargs) -> float:
""""
:param actual: the matched document identifiers from the request as matched by jina indexers and rankers
:param desired: the expected documents matches ids sorted as they are expected
:return the evaluation metric value for the request document
"""
if self.eval_at == 0:
return 0.0
actual_at_k = actual[:self.eval_at] if self.eval_at else actual
ret = len(set(actual_at_k).intersection(set(desired)))
return ret / len(desired)
| 38.363636
| 120
| 0.648499
| 1,184
| 0.935229
| 0
| 0
| 0
| 0
| 0
| 0
| 602
| 0.475513
|
c02c896bba97da4b352ffab0bd4675b7575d7153
| 7,995
|
py
|
Python
|
tests/tests_main.py
|
insilications/tqdm-clr
|
b09a24af7ffe5c85ed0e8e64b33059b43b1be020
|
[
"MIT"
] | 22,617
|
2015-06-03T20:26:05.000Z
|
2022-03-31T22:25:42.000Z
|
tests/tests_main.py
|
insilications/tqdm-clr
|
b09a24af7ffe5c85ed0e8e64b33059b43b1be020
|
[
"MIT"
] | 1,230
|
2015-06-03T13:56:41.000Z
|
2022-03-30T06:03:12.000Z
|
tests/tests_main.py
|
insilications/tqdm-clr
|
b09a24af7ffe5c85ed0e8e64b33059b43b1be020
|
[
"MIT"
] | 1,445
|
2015-06-03T14:01:33.000Z
|
2022-03-29T14:41:52.000Z
|
"""Test CLI usage."""
import logging
import subprocess # nosec
import sys
from functools import wraps
from os import linesep
from tqdm.cli import TqdmKeyError, TqdmTypeError, main
from tqdm.utils import IS_WIN
from .tests_tqdm import BytesIO, _range, closing, mark, raises
def restore_sys(func):
"""Decorates `func(capsysbin)` to save & restore `sys.(stdin|argv)`."""
@wraps(func)
def inner(capsysbin):
"""function requiring capsysbin which may alter `sys.(stdin|argv)`"""
_SYS = sys.stdin, sys.argv
try:
res = func(capsysbin)
finally:
sys.stdin, sys.argv = _SYS
return res
return inner
def norm(bytestr):
"""Normalise line endings."""
return bytestr if linesep == "\n" else bytestr.replace(linesep.encode(), b"\n")
@mark.slow
def test_pipes():
"""Test command line pipes"""
ls_out = subprocess.check_output(['ls']) # nosec
ls = subprocess.Popen(['ls'], stdout=subprocess.PIPE) # nosec
res = subprocess.Popen( # nosec
[sys.executable, '-c', 'from tqdm.cli import main; main()'],
stdin=ls.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = res.communicate()
assert ls.poll() == 0
# actual test:
assert norm(ls_out) == norm(out)
assert b"it/s" in err
if sys.version_info[:2] >= (3, 8):
test_pipes = mark.filterwarnings("ignore:unclosed file:ResourceWarning")(
test_pipes)
def test_main_import():
"""Test main CLI import"""
N = 123
_SYS = sys.stdin, sys.argv
# test direct import
sys.stdin = [str(i).encode() for i in _range(N)]
sys.argv = ['', '--desc', 'Test CLI import',
'--ascii', 'True', '--unit_scale', 'True']
try:
import tqdm.__main__ # NOQA, pylint: disable=unused-variable
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main_bytes(capsysbin):
"""Test CLI --bytes"""
N = 123
# test --delim
IN_DATA = '\0'.join(map(str, _range(N))).encode()
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
# sys.stdin.write(b'\xff') # TODO
sys.stdin.seek(0)
main(sys.stderr, ['--desc', 'Test CLI delim', '--ascii', 'True',
'--delim', r'\0', '--buf_size', '64'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(N) + "it" in err.decode("U8")
# test --bytes
IN_DATA = IN_DATA.replace(b'\0', b'\n')
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
sys.stdin.seek(0)
main(sys.stderr, ['--ascii', '--bytes=True', '--unit_scale', 'False'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(len(IN_DATA)) + "B" in err.decode("U8")
@mark.skipif(sys.version_info[0] == 2, reason="no caplog on py2")
def test_main_log(capsysbin, caplog):
"""Test CLI --log"""
_SYS = sys.stdin, sys.argv
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
try:
with caplog.at_level(logging.INFO):
main(sys.stderr, ['--log', 'INFO'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert not caplog.record_tuples
with caplog.at_level(logging.DEBUG):
main(sys.stderr, ['--log', 'DEBUG'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert caplog.record_tuples
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main(capsysbin):
"""Test misc CLI options"""
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
# test --tee
main(sys.stderr, ['--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert N <= len(err.split(b"\r")) < N + 5
len_err = len(err)
main(sys.stderr, ['--tee', '--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
# spaces to clear intermediate lines could increase length
assert len_err + len(norm(out)) <= len(err)
# test --null
main(sys.stderr, ['--null'])
out, err = capsysbin.readouterr()
assert not out and b"123/123" in err
# test integer --update
main(sys.stderr, ['--update'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum formula"
# test integer --update_to
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA.replace(b'\n', b'D'))
# test integer --update --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum"
# test integer --update_to --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update-to', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
# test float --update_to
sys.stdin = [(str(i / 2.0) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str((N - 1) / 2.0) + "it").encode() in err
assert (str(N / 2.0) + "it").encode() not in err
@mark.slow
@mark.skipif(IS_WIN, reason="no manpages on windows")
def test_manpath(tmp_path):
"""Test CLI --manpath"""
man = tmp_path / "tqdm.1"
assert not man.exists()
with raises(SystemExit):
main(argv=['--manpath', str(tmp_path)])
assert man.is_file()
@mark.slow
@mark.skipif(IS_WIN, reason="no completion on windows")
def test_comppath(tmp_path):
"""Test CLI --comppath"""
man = tmp_path / "tqdm_completion.sh"
assert not man.exists()
with raises(SystemExit):
main(argv=['--comppath', str(tmp_path)])
assert man.is_file()
# check most important options appear
script = man.read_text()
opts = {'--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
'--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
'--manpath', '--comppath'}
assert all(args in script for args in opts)
@restore_sys
def test_exceptions(capsysbin):
"""Test CLI Exceptions"""
N = 123
sys.stdin = [str(i) + '\n' for i in _range(N)]
IN_DATA = ''.join(sys.stdin).encode()
with raises(TqdmKeyError, match="bad_arg_u_ment"):
main(sys.stderr, argv=['-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_bool_value"):
main(sys.stderr, argv=['-ascii', '-unit_scale', 'invalid_bool_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_int_value"):
main(sys.stderr, argv=['-ascii', '--total', 'invalid_int_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmKeyError, match="Can only have one of --"):
main(sys.stderr, argv=['--update', '--update_to'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
# test SystemExits
for i in ('-h', '--help', '-v', '--version'):
with raises(SystemExit):
main(argv=[i])
| 32.632653
| 86
| 0.591245
| 0
| 0
| 0
| 0
| 6,887
| 0.861413
| 0
| 0
| 2,023
| 0.253033
|
c02ddc618f6444651370434e959eed89c5b43ed2
| 2,881
|
py
|
Python
|
plugins/commands.py
|
Kalpesh0/Project01
|
42383a3aa4a3f17ab69dd01357bfbb0740ba965b
|
[
"MIT"
] | null | null | null |
plugins/commands.py
|
Kalpesh0/Project01
|
42383a3aa4a3f17ab69dd01357bfbb0740ba965b
|
[
"MIT"
] | null | null | null |
plugins/commands.py
|
Kalpesh0/Project01
|
42383a3aa4a3f17ab69dd01357bfbb0740ba965b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @REQUEST_M0viz
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from script import script
@Client.on_message(filters.command(["start"]) & filters.private)
async def start(client, message):
try:
await message.reply_text(
text=script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data"),
],
[
InlineKeyboardButton(
"🖤JOIN SUPPORT GROUP🖤", url="https://t.me/REQUEST_M0viz")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
@Client.on_message(filters.command(["help"]) & filters.private)
async def help(client, message):
try:
await message.reply_text(
text=script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data"),
],
[
InlineKeyboardButton(
"💕DONATE US 💕", url="https://t.me/Harshsoni_08")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
@Client.on_message(filters.command(["about"]) & filters.private)
async def about(client, message):
try:
await message.reply_text(
text=script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data"),
],
[
InlineKeyboardButton(
"SHARE OUR GROUP 🖤🤙", url="http://t.me/share/url?url=Hey%20There%E2%9D%A4%EF%B8%8F%2C%0A%20%0A%20I%20Found%20A%20Really%20Awesome%20Group%20%20For%20Searching%20Movies%20Hope%20You%20will%20Join%20This%20Group%20Too😁😁👍%E2%9D%A4%EF%B8%8F%E2%9D%A4%EF%B8%8F%E2%9D%A4%EF%B8%8F%0A%20%0A%20Group%20Sharing%20Username%20Link%20%3A-%20%40REQUEST_M0viz")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
| 36.468354
| 373
| 0.532107
| 0
| 0
| 0
| 0
| 2,702
| 0.929161
| 2,508
| 0.862448
| 646
| 0.222146
|
c02e20b826436ffe36a314fe9b02b8f0b79df9d5
| 3,583
|
py
|
Python
|
gpjax/utils.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 44
|
2020-12-03T14:07:39.000Z
|
2022-03-14T17:45:34.000Z
|
gpjax/utils.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 28
|
2020-12-05T08:54:45.000Z
|
2022-03-01T09:56:50.000Z
|
gpjax/utils.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 7
|
2021-02-05T12:37:57.000Z
|
2022-03-13T13:00:20.000Z
|
from copy import deepcopy
from typing import Tuple
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve
from multipledispatch import dispatch
from .types import Array
def I(n: int) -> Array:
"""
Compute an n x n identity matrix.
:param n: The size of of the matrix.
:return: An n x n identity matrix.
"""
return jnp.eye(n)
def concat_dictionaries(a: dict, b: dict) -> dict:
"""
Append one dictionary below another. If duplicate keys exist, then the key-value pair of the second supplied
dictionary will be used.
"""
return {**a, **b}
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict
def sort_dictionary(base_dict: dict) -> dict:
"""
Sort a dictionary based on the dictionary's key values.
:param base_dict: The unsorted dictionary.
:return: A dictionary sorted alphabetically on the dictionary's keys.
"""
return dict(sorted(base_dict.items()))
@dispatch(jnp.DeviceArray)
def standardise(x: jnp.DeviceArray) -> Tuple[jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray]:
"""
Standardise a given matrix such that values are distributed according to a unit normal random variable. This is
primarily designed for standardising a training dataset.
:param x: A matrix of unstandardised values
:return: A matrix of standardised values
"""
xmean = jnp.mean(x, axis=0)
xstd = jnp.std(x, axis=0)
return (x - xmean) / xstd, xmean, xstd
@dispatch(jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray)
def standardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Standardise a given matrix with respect to a given mean and standard deviation. This is primarily designed for
standardising a test set of data with respect to the training data.
:param x: A matrix of unstandardised values
:param xmean: A precomputed mean vector
:param xstd: A precomputed standard deviation vector
:return: A matrix of standardised values
"""
return (x - xmean) / xstd
def unstandardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Unstandardise a given matrix with respect to a previously computed mean and standard deviation. This is designed
for remapping a matrix back onto its original scale.
:param x: A standardised matrix.
:param xmean: A mean vector.
:param xstd: A standard deviation vector.
:return: A matrix of unstandardised values.
"""
return (x * xstd) + xmean
def as_constant(parameter_set: dict, params: list) -> Tuple[dict, dict]:
base_params = deepcopy(parameter_set)
sparams = {}
for param in params:
sparams[param] = base_params[param]
del base_params[param]
return base_params, sparams
| 33.485981
| 117
| 0.706391
| 0
| 0
| 0
| 0
| 1,112
| 0.310354
| 0
| 0
| 2,109
| 0.588613
|
c02f4130e68bf1161bbd1638142bdd926a75ebe7
| 999
|
py
|
Python
|
tools/replace_version.py
|
jasmcaus/image-deep-learning-keras
|
11ff37867c2cb86a92aceb0ac24accb3607e3635
|
[
"MIT"
] | 681
|
2020-08-13T09:34:41.000Z
|
2022-03-19T15:38:04.000Z
|
tools/replace_version.py
|
jasmcaus/image-deep-learning-keras
|
11ff37867c2cb86a92aceb0ac24accb3607e3635
|
[
"MIT"
] | 30
|
2020-11-03T19:23:14.000Z
|
2021-10-13T17:19:34.000Z
|
tools/replace_version.py
|
jasmcaus/image-deep-learning-keras
|
11ff37867c2cb86a92aceb0ac24accb3607e3635
|
[
"MIT"
] | 127
|
2020-11-03T19:14:30.000Z
|
2022-03-17T12:01:32.000Z
|
import os
def replace_version(old_version, new_version):
if not isinstance(old_version, tuple) or not isinstance(new_version, tuple):
raise ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")
major, minor, micro = old_version[:3]
old_version = f'{major}.{minor}.{micro}'
major, minor, micro = new_version[:3]
new_version = f'{major}.{minor}.{micro}'
print(f"New version = {new_version}")
for root, _, files in os.walk('../caer'):
for file in files:
if file.endswith(('.py', '.cpp', '.c', '.h', '.hpp')):
with open(os.path.abspath(os.path.join(root, file)), 'r') as f:
new_text = f.read().replace('version ' + old_version, 'version ' + new_version)
with open(os.path.abspath(os.path.join(root, file)), 'w') as f:
print(os.path.abspath(os.path.join(root, file)))
f.write(new_text)
replace_version((1,8,0), (3,9,1))
| 39.96
| 99
| 0.585586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.212212
|
c03051f9b68bca498dcb3a06e10906ed145e2649
| 10,625
|
py
|
Python
|
DashExperiments/make_plot.py
|
magruener/reconstructing-proprietary-video-streaming-algorithms
|
29917b64e25a81561db7629fbd97e4a935146825
|
[
"MIT"
] | 9
|
2020-09-07T17:24:13.000Z
|
2022-03-12T23:41:47.000Z
|
DashExperiments/make_plot.py
|
magruener/reconstructing-proprietary-video-streaming-algorithms
|
29917b64e25a81561db7629fbd97e4a935146825
|
[
"MIT"
] | null | null | null |
DashExperiments/make_plot.py
|
magruener/reconstructing-proprietary-video-streaming-algorithms
|
29917b64e25a81561db7629fbd97e4a935146825
|
[
"MIT"
] | 2
|
2020-11-13T06:49:54.000Z
|
2021-01-28T12:24:20.000Z
|
import argparse
import math
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
import pandas as pd
import seaborn as sns
sns.set()
sns.set_context("talk")
NUM_BINS = 100
path = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
video_mappings = {}
video_mappings['300'] = '320x180x30_vmaf_score'
video_mappings['750'] = '640x360x30_vmaf_score'
video_mappings['1200'] = '768x432x30_vmaf_score'
video_mappings['1850'] = '1024x576x30_vmaf_score'
video_mappings['2850'] = '1280x720x30_vmaf_score'
video_mappings['4300'] = '1280x720x60_vmaf_score'
metric_list = ["reward_vmaf", "reward_br", "rebuf", "br_avg", "vmaf_avg", "switching_vmaf", "switching_br"]
#MINERVA
rebuf_penalty = 25
switching_penalty = 2.5
segment_lenght = 4.0
def load_csv():
video_info = pd.read_csv(path)
return video_info
pensieve_video_csv = load_csv()
def get_qoe(abr, trace):
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
logfile = os.path.join(logdir, abr + "_rewards_0.log")
reward_vmaf = 0
reward_bitrate = 0
total_rebuffering = 0.0
vmaf_avg = 0.0
vmaf_switching_avg = 0.0
bitrate_avg = 0.0
bitrate_switching_avg = 0.0
with open(logfile, "r") as fin:
reward_lines = fin.readlines()
if (len(reward_lines) != args.video_chunks):
if len(reward_lines) < args.video_chunks:
to_clean.append(logfile)
print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
print("Skip, please")
return None, None, None, None, None, None, None
for i, r_line in enumerate(reward_lines):
data = r_line.split()
if i == 0:
br = int(data[1])
br_previous = br
vmaf_previous = pensieve_video_csv.loc[i, video_mappings[str(br)]]
else: # skip first
br = int(data[1])
bitrate_avg += br
bitrate_switching_avg += abs(br - br_previous)
reward_bitrate += float(data[-1])
total_rebuffering += float(data[3])
vmaf_current = pensieve_video_csv.loc[i, video_mappings[str(br)]]
vmaf_avg += vmaf_current
vmaf_switching_avg += abs(vmaf_current - vmaf_previous)
reward_vmaf += (float(vmaf_current) -
rebuf_penalty*(float(data[3])) -
switching_penalty*(abs(vmaf_current - vmaf_previous)))
vmaf_previous = vmaf_current
br_previous = br
return reward_vmaf,\
reward_bitrate,\
total_rebuffering,\
bitrate_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_avg/(segment_lenght*args.video_chunks),\
bitrate_avg/args.video_chunks
#
#def get_qoe(abr, trace):
# logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
# logfile = os.path.join(logdir, abr + "_rewards_0.log")
#
# reward = 0
#
#
# with open(logfile, "r") as fin:
# reward_lines = fin.readlines()
#
# if (len(reward_lines) != args.video_chunks):
# if len(reward_lines) < args.video_chunks:
# to_clean.append(logfile)
# print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
# print("Skip, please")
# return None
#
# for i, r_line in enumerate(reward_lines):
# if i > 0: # skip first
# reward += float(r_line.split()[-1])
#
# return reward
def get_qoes(abrs_list, traces_list):
global_results = {}
for abr in abrs_list:
global_results[abr] = []
global_results[abr] = {}
global_results[abr]['reward_vmaf'] = []
global_results[abr]['reward_br'] = []
global_results[abr]['rebuf'] = []
global_results[abr]['switching_br'] = []
global_results[abr]['switching_vmaf'] = []
global_results[abr]['vmaf_avg'] = []
global_results[abr]['br_avg'] = []
for trace in traces_list:
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
global_results[abr]['reward_vmaf'].append(reward_vmaf)
global_results[abr]['reward_br'].append(reward_br)
global_results[abr]['rebuf'].append(rebuf)
global_results[abr]['switching_br'].append(switching_br)
global_results[abr]['switching_vmaf'].append(switching_vmaf)
global_results[abr]['vmaf_avg'].append(vmaf_avg)
global_results[abr]['br_avg'].append(br_avg)
return global_results
def get_qoes_partial(abrs_list, traces_list):
total_experiments_expected = len(args.abrs) * len(args.traces)
experiments_executed_so_far = 0
partial_results = {}
for abr in abrs_list:
partial_results[abr] = {}
partial_results[abr]['reward_vmaf'] = []
partial_results[abr]['reward_br'] = []
partial_results[abr]['rebuf'] = []
partial_results[abr]['switching_br'] = []
partial_results[abr]['switching_vmaf'] = []
partial_results[abr]['vmaf_avg'] = []
partial_results[abr]['br_avg'] = []
for trace in traces_list:
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
if os.path.exists(logdir):
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
partial_results[abr]['reward_vmaf'].append(reward_vmaf)
partial_results[abr]['reward_br'].append(reward_br)
partial_results[abr]['rebuf'].append(rebuf)
partial_results[abr]['switching_br'].append(switching_br)
partial_results[abr]['switching_vmaf'].append(switching_vmaf)
partial_results[abr]['vmaf_avg'].append(vmaf_avg)
partial_results[abr]['br_avg'].append(br_avg)
experiments_executed_so_far += 1
if partial_results[abr] == []:
del partial_results[abr]
print("Experiment executed: {}/{}".format(experiments_executed_so_far, total_experiments_expected))
return partial_results
def plot_cdf(results, reward_key):
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
def average_of_the_best():
avg_best = -1000000000000
abr_best = ''
for scheme in results.keys():
avg_tmp = np.mean(results[scheme][reward_key])
if avg_best < avg_tmp:
avg_best = avg_tmp
abr_best = scheme
print("Best provider in average is {} with {}".format(abr_best, avg_best))
return abs(avg_best)
schemes = []
norm = average_of_the_best()
markers = ['.', ',', 'o', 'v', '^', '>', '<', 's', 'x', 'D', 'd', '*', '_', '']
for i, scheme in enumerate(results.keys()):
values = [float(i)/norm for i in results[scheme][reward_key]]
values, base = np.histogram(values, bins=len(values))
cumulative = np.cumsum(values)
cumulative = [float(i) / len(values) * 100 for i in cumulative]
marker_index = i % len(markers)
ax.plot(base[:-1], cumulative, linewidth=3, marker=markers[marker_index], markevery=2, markersize=15)
schemes.append(scheme)
ax.legend(schemes, loc=2)
ax.set_xlim(-1.0, 1.8)
plt.ylabel('CDF')
plt.xlabel('total reward')
fig.savefig(os.path.join(args.store_dir, 'cdf_{}.png'.format(reward_key)))
def plot_bar(results, metric):
results_metric_avg = {}
for scheme in results.keys():
results_metric_avg[scheme] = np.mean(results[scheme][metric])
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
y_pos = np.arange(len(results_metric_avg.keys()))
ax.bar(y_pos, results_metric_avg.values())
ax.set_xticks(y_pos)
ax.set_xticklabels(results_metric_avg.keys())
fig.savefig(os.path.join(args.store_dir, 'bar_{}.png'.format(metric)))
def clean():
timestamps = []
for c in to_clean:
timestamp_creation = os.path.getmtime(c)
timestamps.append(timestamp_creation)
print("File {} was created at {}".format(c, timestamp_creation))
timestamps.sort()
if not args.include_last and len(timestamps) >= 1:
print("Skipping file created at {}: might be still running".format(timestamps[-1]))
del timestamps[-1]
removing = []
for t in timestamps:
for c in to_clean:
if os.path.getmtime(c) == t:
print("Removing {}".format(os.path.dirname(os.path.dirname(c))))
removing.append(os.path.dirname(os.path.dirname(c)))
for r in removing:
shutil.rmtree(r)
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('result_dir', help='result directory', type=str)
parser.add_argument('store_dir', help='result directory', type=str)
parser.add_argument('video_chunks', help='result directory', type=int)
parser.add_argument("--abrs", nargs="+", help='ABR list')
parser.add_argument("--traces", nargs="+", help='Traces list')
parser.add_argument('--partial', action="store_true", help="get the partial results")
parser.add_argument('--allow_cleaning', action="store_true", help="if enabled, cleans the experiments that failed, a part of the most recent one (might still be running")
parser.add_argument('--include_last', action="store_true", help="if enabled, also the last is getting cleaned")
# args need to be global for simplicity
global args
args = parser.parse_args()
global to_clean
to_clean = []
if not os.path.exists(args.store_dir):
os.makedirs(args.store_dir)
if args.partial:
res = get_qoes_partial(args.abrs, args.traces)
else:
res = get_qoes(args.abrs, args.traces)
for metric in metric_list:
if "reward" in metric:
plot_cdf(res, metric)
plot_bar(res, metric)
if args.allow_cleaning:
print("Executing cleaning")
clean()
if __name__ == "__main__":
main()
| 34.836066
| 174
| 0.609318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,216
| 0.208565
|
c0319cff4f20f991c3fca5acb4a16d11e559fce4
| 936
|
py
|
Python
|
aiopoke/objects/utility/common_models/encounter.py
|
beastmatser/aiopokeapi
|
6ffe10bf8db0b6349cabf5b5b01b738214f805d0
|
[
"MIT"
] | 3
|
2021-10-03T13:49:47.000Z
|
2022-03-31T03:12:30.000Z
|
aiopoke/objects/utility/common_models/encounter.py
|
beastmatser/aiopokeapi
|
6ffe10bf8db0b6349cabf5b5b01b738214f805d0
|
[
"MIT"
] | 3
|
2022-01-18T07:31:08.000Z
|
2022-01-18T07:32:09.000Z
|
aiopoke/objects/utility/common_models/encounter.py
|
beastmatser/aiopokeapi
|
6ffe10bf8db0b6349cabf5b5b01b738214f805d0
|
[
"MIT"
] | 1
|
2022-01-19T12:35:09.000Z
|
2022-01-19T12:35:09.000Z
|
from typing import TYPE_CHECKING, Any, Dict, List
from aiopoke.utils.minimal_resources import MinimalResource
from aiopoke.utils.resource import Resource
if TYPE_CHECKING:
from aiopoke.objects.resources import EncounterConditionValue, EncounterMethod
class Encounter(Resource):
min_level: int
max_level: int
condition_values: List[MinimalResource["EncounterConditionValue"]]
chance: int
method: MinimalResource["EncounterMethod"]
def __init__(
self,
*,
min_level: int,
max_level: int,
condition_values: List[Dict[str, Any]],
chance: int,
method: Dict[str, Any],
):
self.min_level = min_level
self.max_level = max_level
self.condition_values = [
MinimalResource(**condition_value) for condition_value in condition_values
]
self.chance = chance
self.method = MinimalResource(**method)
| 28.363636
| 86
| 0.681624
| 676
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.044872
|
c03228b2ca7a1e5710fd3d833f2457053dd2e585
| 834
|
py
|
Python
|
2015/solutions/day1.py
|
rsizem2/aoc_2020
|
aa2dbf72a4c44930755bd9cc132ad7854f742f09
|
[
"MIT"
] | null | null | null |
2015/solutions/day1.py
|
rsizem2/aoc_2020
|
aa2dbf72a4c44930755bd9cc132ad7854f742f09
|
[
"MIT"
] | null | null | null |
2015/solutions/day1.py
|
rsizem2/aoc_2020
|
aa2dbf72a4c44930755bd9cc132ad7854f742f09
|
[
"MIT"
] | null | null | null |
def read_file(test = True):
if test:
filename = '../tests/day1.txt'
else:
filename = '../input/day1.txt'
with open(filename) as file:
temp = list()
for line in file:
temp.append(line.strip())
return temp
def puzzle1():
temp = read_file(False)[0]
floor = 0
for char in temp:
if char == '(':
floor += 1
elif char == ')':
floor -= 1
else:
raise ValueError
print(floor)
def puzzle2():
temp = read_file(False)[0]
floor = 0
for i, char in enumerate(temp, start = 1):
if char == '(':
floor += 1
elif char == ')':
floor -= 1
else:
raise ValueError
if floor == -1:
break
print(i)
puzzle1()
puzzle2()
| 20.85
| 46
| 0.464029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.059952
|
c032d43f5d12902206b5df36fccb87158ca21d3e
| 466
|
py
|
Python
|
setup.py
|
Kamuish/StarSearch
|
63e5f6ee544ab1d48ae5b0d8e9067cedccc40d1e
|
[
"MIT"
] | null | null | null |
setup.py
|
Kamuish/StarSearch
|
63e5f6ee544ab1d48ae5b0d8e9067cedccc40d1e
|
[
"MIT"
] | null | null | null |
setup.py
|
Kamuish/StarSearch
|
63e5f6ee544ab1d48ae5b0d8e9067cedccc40d1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='starsearch',
version='0.3',
description='Package to dig into the ESO archives',
author='João Camacho',
author_email='joao.camacho@astro.up.pt',
license='MIT',
url='https://github.com/jdavidrcamacho/starsearch',
packages=['starsearch'],
install_requires=[
'numpy',
'astroquery',
"astropy",
],
)
| 24.526316
| 57
| 0.592275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.496788
|
c033f2c7fd8a3e95e76135943dd54b89791b98ca
| 4,192
|
py
|
Python
|
test/integration/test_genomes.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
test/integration/test_genomes.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 6
|
2021-11-11T20:57:49.000Z
|
2021-12-10T15:30:33.000Z
|
test/integration/test_genomes.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
import os
import tempfile
from unittest.mock import patch
from galaxy.exceptions import (
ObjectNotFound,
ReferenceDataError,
)
from galaxy_test.driver import integration_util
BUILDS_DATA = (
"?\tunspecified (?)",
"hg_test\tdescription of hg_test",
"hg_test_nolen\tdescription of hg_test_nolen",
)
LEN_DATA = (
"chr1\t248956422",
"chr2\t242193529",
"chr3\t198295559",
)
def get_key(has_len_file=True):
pos = 1 if has_len_file else 2
return BUILDS_DATA[pos].split("\t")[0]
class GenomesTestCase(integration_util.IntegrationTestCase):
@classmethod
def handle_galaxy_config_kwds(cls, config):
genomes_dir = cls.temp_config_dir("test_genomes")
os.makedirs(genomes_dir)
cls._setup_builds_file(config, genomes_dir)
cls._setup_len_file(config, genomes_dir)
@classmethod
def _setup_builds_file(cls, config, genomes_dir):
"""Create builds file + set config option."""
builds_file_path = os.path.join(genomes_dir, "builds.txt")
config["builds_file_path"] = builds_file_path
with open(builds_file_path, "w") as f:
f.write("\n".join(BUILDS_DATA))
@classmethod
def _setup_len_file(cls, config, genomes_dir):
"""Create len file + set config option."""
config["len_file_path"] = genomes_dir # the config option is a dir
key = get_key()
len_file_path = os.path.join(genomes_dir, f"{key}.len")
with open(len_file_path, "w") as f:
f.write("\n".join(LEN_DATA))
def test_index(self):
response = self._get("genomes")
self._assert_status_code_is(response, 200)
rval = response.json()
expected_data = [item.split("\t")[::-1] for item in BUILDS_DATA]
assert rval == expected_data
def test_show_valid(self):
key = get_key()
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 200)
rval = response.json()
assert rval["id"] == key
assert len(rval["chrom_info"]) == len(LEN_DATA)
def test_show_valid_no_refdata(self):
key = get_key(has_len_file=False)
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_show_invalid(self):
response = self._get("genomes/invalid")
self._assert_status_code_is(response, 404)
assert response.json()["err_code"] == ObjectNotFound.err_code.code
def test_sequences(self):
class RefDataMock:
sequence = "test-value"
key = get_key()
with patch.object(self._app.genomes, "has_reference_data", return_value=True), patch.object(
self._app.genomes, "_get_reference_data", return_value=RefDataMock()
):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 200)
assert response.content == bytes(RefDataMock.sequence, "utf-8")
def test_sequences_no_data(self):
key = get_key()
with patch.object(self._app.genomes, "has_reference_data", return_value=False):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_indexes(self):
mock_key, mock_content, index_type, suffix = "mykey", "mydata", "fasta_indexes", ".fai"
# write some data to a tempfile
with tempfile.NamedTemporaryFile(dir=self._tempdir, suffix=suffix, mode="w", delete=False) as tf:
tf.write(mock_content)
# make a mock containing the path to the tempfile
tmpfile_path = tf.name[: -len(suffix)] # chop off the extention
mock_data = [[mock_key, tmpfile_path]]
with patch.object(self._app.tool_data_tables.data_tables[index_type], "data", new=mock_data):
response = self._get(f"genomes/{mock_key}/indexes?type={index_type}")
self._assert_status_code_is(response, 200)
assert response.content == bytes(mock_content, "utf-8")
| 37.765766
| 105
| 0.659351
| 3,672
| 0.875954
| 0
| 0
| 947
| 0.225906
| 0
| 0
| 796
| 0.189885
|
c034484825d157d2b2d547cd6cfeff947673d5f5
| 2,310
|
py
|
Python
|
examples/exersice2DimRed.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
examples/exersice2DimRed.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
examples/exersice2DimRed.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
def darius1(numberDirectory):
path = ""
if(numberDirectory == 1):
directorys = [
['training_setA/training/', 'p0']
]
if(numberDirectory == 2):
directorys = [
['training_setB/training/', 'p1']
]
if(numberDirectory == 3):
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training/', 'p1']
]
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
dfs.append(df_temp)
df = pd.concat(dfs)
#df_no_nan = df.dropna()
df_nan_zwero = df.replace(np.NaN, 0)
df_nan_zwero.head(n=50)
df_nan_none = df.replace(np.NaN, None)
df_nan_none.head(n=50)
df_nan_mean = df.fillna(df.mean())
df_nan_mean.head(n=50)
df_nan_none_2= df.where(pd.notnull(df), None)
df_nan_mean.head(n=50)
#df.shape
#df.head(n=80)
############################################################
# initializing the pacmap instance
# Setting n_neighbors to "None" leads to a default choice shown below in "parameter" section
embedding = pacmap.PaCMAP(n_dims=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(df_nan_none_2.values, init="pca")
# visualize the embedding
#fig, ax = plt.subplots(1, 1, figsize=(6, 6))
#ax.scatter(X_transformed[:, 0], X_transformed[:, 1], cmap="Spectral", s=0.6)
plt.scatter(X_transformed[:, 0], X_transformed[:, 1], cmap="Spectral")
plt.show()
#############################################################
X_embedded = TSNE(n_components=2, learning_rate='auto',init='random').fit_transform(df.values)
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(X_transformed[:, 0], X_embedded[:, 1], cmap="Spectral", c=list(df.columns), s=0.6)
#############################################################
| 29.615385
| 98
| 0.587446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 754
| 0.326407
|
c0347d378ceb67aeed162b5a86aeec563c7f0a79
| 5,757
|
py
|
Python
|
release.py
|
jhofmann/yubiauth
|
724feb45b54db196af406edf87f2bfcc2e849842
|
[
"BSD-2-Clause"
] | 17
|
2015-01-06T16:28:55.000Z
|
2021-11-21T15:26:01.000Z
|
release.py
|
DalavanCloud/yubiauth
|
42292de043f8e106384796ff233be0b2dc930f60
|
[
"BSD-2-Clause"
] | 4
|
2015-09-11T14:00:14.000Z
|
2017-05-25T15:00:17.000Z
|
release.py
|
DalavanCloud/yubiauth
|
42292de043f8e106384796ff233be0b2dc930f60
|
[
"BSD-2-Clause"
] | 9
|
2015-03-11T22:37:47.000Z
|
2022-03-01T21:17:35.000Z
|
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from distutils import log
from distutils.core import Command
from distutils.errors import DistutilsSetupError
import os
import re
from datetime import date
class release(Command):
description = "create and release a new version"
user_options = [
('keyid', None, "GPG key to sign with"),
('skip-tests', None, "skip running the tests"),
('pypi', None, "publish to pypi"),
]
boolean_options = ['skip-tests', 'pypi']
def initialize_options(self):
self.keyid = None
self.skip_tests = 0
self.pypi = 0
def finalize_options(self):
self.cwd = os.getcwd()
self.fullname = self.distribution.get_fullname()
self.name = self.distribution.get_name()
self.version = self.distribution.get_version()
def _verify_version(self):
with open('NEWS', 'r') as news_file:
line = news_file.readline()
now = date.today().strftime('%Y-%m-%d')
if not re.search(r'Version %s \(released %s\)' % (self.version, now),
line):
raise DistutilsSetupError("Incorrect date/version in NEWS!")
def _verify_tag(self):
if os.system('git tag | grep -q "^%s\$"' % self.fullname) == 0:
raise DistutilsSetupError(
"Tag '%s' already exists!" % self.fullname)
def _sign(self):
if os.path.isfile('dist/%s.tar.gz.asc' % self.fullname):
# Signature exists from upload, re-use it:
sign_opts = ['--output dist/%s.tar.gz.sig' % self.fullname,
'--dearmor dist/%s.tar.gz.asc' % self.fullname]
else:
# No signature, create it:
sign_opts = ['--detach-sign', 'dist/%s.tar.gz' % self.fullname]
if self.keyid:
sign_opts.insert(1, '--default-key ' + self.keyid)
self.execute(os.system, ('gpg ' + (' '.join(sign_opts)),))
if os.system('gpg --verify dist/%s.tar.gz.sig' % self.fullname) != 0:
raise DistutilsSetupError("Error verifying signature!")
def _tag(self):
tag_opts = ['-s', '-m ' + self.fullname, self.fullname]
if self.keyid:
tag_opts[0] = '-u ' + self.keyid
self.execute(os.system, ('git tag ' + (' '.join(tag_opts)),))
def _do_call_publish(self, cmd):
self._published = os.system(cmd) == 0
def _publish(self):
web_repo = os.getenv('YUBICO_GITHUB_REPO')
if web_repo and os.path.isdir(web_repo):
artifacts = [
'dist/%s.tar.gz' % self.fullname,
'dist/%s.tar.gz.sig' % self.fullname
]
cmd = '%s/publish %s %s %s' % (
web_repo, self.name, self.version, ' '.join(artifacts))
self.execute(self._do_call_publish, (cmd,))
if self._published:
self.announce("Release published! Don't forget to:", log.INFO)
self.announce("")
self.announce(" (cd %s && git push)" % web_repo, log.INFO)
self.announce("")
else:
self.warn("There was a problem publishing the release!")
else:
self.warn("YUBICO_GITHUB_REPO not set or invalid!")
self.warn("This release will not be published!")
def run(self):
if os.getcwd() != self.cwd:
raise DistutilsSetupError("Must be in package root!")
self._verify_version()
self._verify_tag()
self.execute(os.system, ('git2cl > ChangeLog',))
if not self.skip_tests:
self.run_command('check')
# Nosetests calls sys.exit(status)
try:
self.run_command('nosetests')
except SystemExit as e:
if e.code != 0:
raise DistutilsSetupError("There were test failures!")
self.run_command('sdist')
if self.pypi:
cmd_obj = self.distribution.get_command_obj('upload')
cmd_obj.sign = True
if self.keyid:
cmd_obj.identity = self.keyid
self.run_command('upload')
self._sign()
self._tag()
self._publish()
self.announce("Release complete! Don't forget to:", log.INFO)
self.announce("")
self.announce(" git push && git push --tags", log.INFO)
self.announce("")
| 38.125828
| 78
| 0.605176
| 4,226
| 0.734063
| 0
| 0
| 0
| 0
| 0
| 0
| 2,399
| 0.41671
|
c034cf5f5c4b3712b5f752c6874beaede0ef7f49
| 10,949
|
py
|
Python
|
fabfile.py
|
8081594571/bgtools_web
|
f99389788f6e8db0d1b7781f41af819efd7e9dc2
|
[
"MIT"
] | 1
|
2020-10-01T15:56:12.000Z
|
2020-10-01T15:56:12.000Z
|
fabfile.py
|
Arvindvishwakarma/bgtools_web
|
82b03c49e00a6ffcc563289c68bcf2a7a6985633
|
[
"MIT"
] | null | null | null |
fabfile.py
|
Arvindvishwakarma/bgtools_web
|
82b03c49e00a6ffcc563289c68bcf2a7a6985633
|
[
"MIT"
] | 1
|
2020-10-01T06:53:41.000Z
|
2020-10-01T06:53:41.000Z
|
# Credit goes to https://bitbucket.org/spookylukey/django-fabfile-starter/src
import os
import datetime as dt
from io import StringIO
import json
import posixpath
import fabric
import requests
from fabsettings import (USER, HOST, DJANGO_APP_NAME,
DJANGO_APPS_DIR, LOGS_ROOT_DIR,
APP_PORT, GUNICORN_WORKERS, DJANGO_PROJECT_NAME,
STAGING_APP_PORT)
def upload_template(c, filename, destination, context=None, template_dir=None):
"""
Render and upload a template text file to a remote host.
"""
text = None
template_dir = template_dir or os.getcwd()
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir))
context = context if context is not None else {}
text = jenv.get_template(filename).render(**context)
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
# text = text.encode('utf-8')
# Upload the file.
return c.put(
StringIO(text),
destination,
)
def venv(c):
"""
Runs a command in a virtualenv (which has been specified using
the virtualenv context manager
"""
return c.prefix("source {}/bin/activate".format(c.config.bgtools.VENV_DIR))
def install_dependencies(c):
ensure_virtualenv(c)
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run("pip install -U -r requirements.txt")
def file_exists(c, path):
print('checking existence of: {}: {}'.format(path, bool(c.run('stat {}'.format(path), hide=True, warn=True))))
return c.run('stat {}'.format(path), hide=True, warn=True).ok
def ensure_virtualenv(c):
args = c.config.bgtools
ensure_dir(c, args.SRC_DIR)
if file_exists(c, args.VENV_DIR):
return
with c.cd(args.DJANGO_APP_ROOT):
c.run("virtualenv --no-site-packages --python={} {}".format(
args.PYTHON_BIN, args.venv_subdir))
c.run("echo {} > {}/lib/{}/site-packages/projectsource.pth".format(
args.SRC_DIR, args.venv_subdir, args.PYTHON_BIN))
def ensure_dir(c, d):
print('checking existence of {} on {}'.format(d, c))
if not file_exists(c, d):
# note that the parent directory needs to already exist, usually by making a custom app
# with the correct name in the webfaction control panel
print('making {}'.format(d))
c.run("mkdir -p {}".format(d))
def copy_settings(c):
args = c.config.bgtools
with c.cd(args.LOCAL_DIR):
fname = 'settings_{}.py'.format(args.mode)
c.local('cp {} bgtools/bgtools/private_settings.py'.format(fname))
c.local('echo STAGING={} >> bgtools/bgtools/private_settings.py'.format('True' if args.staging else False))
def rsync(c, src, dest):
args = c.config.bgtools
c.local('rsync -avz {} {}:{}'.format(src,
args.host,
dest))
def rsync_source(c):
"""
rsync the source over to the server
"""
args = c.config.bgtools
rsync(c, os.path.join(args.LOCAL_DIR, 'bgtools'), args.DJANGO_APP_ROOT)
def collect_static(c):
"""
Collect django static content on server
"""
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run('python manage.py collectstatic --no-input')
def checkout_and_install_libs(c):
args = c.config.bgtools
libs = json.load(open('libs.json'))
ensure_dir(c, args.CHECKOUT_DIR)
with c.cd(args.CHECKOUT_DIR):
for lib, params in libs.items():
print('handling ' + lib)
libdir = params['repo']
if libdir != 'local':
params['branch'] = args.branch
else:
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(params['path'], params['name']),
args.CHECKOUT_DIR)
with c.cd(params['name']), venv(c):
c.run('pip install -U .')
continue
github_url = 'https://github.com/{}/{}'.format(params['owner'], params['repo'])
if not file_exists(c, libdir):
c.run('git clone {}.git'.format(github_url))
with c.cd(libdir):
c.run('git fetch origin')
if args.mode == 'debug' or args.tag == 'head':
c.run('git checkout {}'.format(params['branch']))
c.run('git pull')
version = c.run('git rev-parse {}'.format(params['branch'])).stdout
version_url = '{}/commits/{}'.format(github_url, version)
elif args.mode == 'release':
tag = args.tag
if tag == 'latest':
tag = c.run('git tag -l "v*" --sort=-v:refname').stdout.split()[0]
c.run('git checkout {}'.format(tag))
version = tag
version_url = '{}/releases/tag/{}'.format(github_url, tag)
for src, target in params.get('extras', []):
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(args.LOCAL_DIR, 'extras', lib, src),
posixpath.join(args.CHECKOUT_DIR, libdir, target))
with venv(c):
c.run('pip install -U .')
with c.cd(args.SRC_DIR):
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(params['owner'],
params['repo']))
changelog = r.json()
changelog = [{'url': ch['html_url'],
'date': dt.datetime.strptime(ch['published_at'][:10], '%Y-%m-%d').date(),
'name': ch['name'],
'tag': ch['tag_name'],
'description': ch['body']}
for ch in changelog]
for tname, context in [('version', {'version': version, 'url': version_url}),
('changelog', {'changelog': changelog})]:
print('uploading {}_{}.html'.format(lib, tname))
upload_template(c, '{}_template.html'.format(tname),
posixpath.join(args.SRC_DIR,
DJANGO_APP_NAME,
'templates',
DJANGO_APP_NAME,
'{}_{}.html'.format(lib, tname)),
context=context,
template_dir=posixpath.join(args.LOCAL_DIR, 'templates'))
@fabric.task
def stop_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Stop the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
c.run("kill $(cat {})".format(c.config.bgtools.GUNICORN_PIDFILE))
def _webserver_command(c):
args = c.config.bgtools
return ('{venv_dir}/bin/gunicorn '
'--error-logfile={error_logfile} '
'--access-logfile={access_logfile} '
'--capture-output '
'-b 127.0.0.1:{port} '
'-D -w {workers} --pid {pidfile} '
'{wsgimodule}:application').format(
**{'venv_dir': args.VENV_DIR,
'pidfile': args.GUNICORN_PIDFILE,
'wsgimodule': args.WSGI_MODULE,
'port': APP_PORT if not args.staging else STAGING_APP_PORT,
'workers': GUNICORN_WORKERS,
'error_logfile': args.GUNICORN_ERROR_LOGFILE,
'access_logfile': args.GUNICORN_ACCESS_LOGFILE}
)
@fabric.task
def start_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Starts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
start_webserver_internal(c)
def start_webserver_internal(c):
print('starting new webserver: "{}"'.format(_webserver_command(c)))
with c.cd(c.config.bgtools.SRC_DIR):
c.run(_webserver_command(c), pty=False, echo=True)
@fabric.task(hosts=[HOST])
def restart_webserver(c, mode=None, tag=None, staging=None, branch=None):
"""
Restarts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
restart_webserver_internal(c)
def restart_webserver_internal(c):
args = c.config.bgtools
if file_exists(c, args.GUNICORN_PIDFILE):
print('killing existing webserver')
c.run("kill -HUP $(cat {})".format(args.GUNICORN_PIDFILE), echo=True)
else:
start_webserver_internal(c)
def populate_arg(args, existing, argname):
return existing if existing is not None else args[argname]
def populate_args(c, **kwargs):
args = c.config.bgtools
# env.use_ssh_config = True
for k, v in kwargs.items():
print('setting {} to {}'.format(k, populate_arg(args, v, k)))
setattr(args, k, populate_arg(args, v, k))
project = DJANGO_PROJECT_NAME
if args.staging:
project += '_staging'
args.DJANGO_APP_ROOT = posixpath.join(DJANGO_APPS_DIR, project)
# Python version
args.PYTHON_BIN = "python3.5"
# env.PYTHON_PREFIX = "" # e.g. /usr/local Use "" for automatic
# env.PYTHON_FULL_PATH = (posixpath.join(env.PYTHON_PREFIX, 'bin', env.PYTHON_BIN)
# if env.PYTHON_PREFIX else env.PYTHON_BIN)
args.GUNICORN_PIDFILE = posixpath.join(args.DJANGO_APP_ROOT, 'gunicorn.pid')
args.GUNICORN_ERROR_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_error_{}.log'.format(project))
args.GUNICORN_ACCESS_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_access_{}.log'.format(project))
args.SRC_DIR = posixpath.join(args.DJANGO_APP_ROOT, DJANGO_PROJECT_NAME)
args.VENV_DIR = posixpath.join(args.DJANGO_APP_ROOT, args.venv_subdir)
args.CHECKOUT_DIR = posixpath.join(args.DJANGO_APP_ROOT, 'checkouts')
args.WSGI_MODULE = '{}.wsgi'.format(DJANGO_PROJECT_NAME)
args.LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
@fabric.task(hosts=[HOST])
def deploy(c, mode=None, staging=True, tag=None, branch=None):
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
print(c.config.bgtools)
copy_settings(c)
rsync_source(c)
install_dependencies(c)
checkout_and_install_libs(c)
collect_static(c)
restart_webserver_internal(c)
| 38.017361
| 115
| 0.574664
| 0
| 0
| 0
| 0
| 1,223
| 0.1117
| 0
| 0
| 2,810
| 0.256644
|
c034fca0ee726969b9b040225228ff287755ee94
| 5,273
|
py
|
Python
|
Deep Thumbnail Face Classification and Verification/models/ShuffleNetV2.py
|
roycechan/portfolio
|
5e6a916031d2a3c60d2757483fc4765941d6f1f0
|
[
"MIT"
] | 1
|
2022-03-14T04:59:54.000Z
|
2022-03-14T04:59:54.000Z
|
Deep Thumbnail Face Classification and Verification/models/ShuffleNetV2.py
|
roycechan/portfolio
|
5e6a916031d2a3c60d2757483fc4765941d6f1f0
|
[
"MIT"
] | null | null | null |
Deep Thumbnail Face Classification and Verification/models/ShuffleNetV2.py
|
roycechan/portfolio
|
5e6a916031d2a3c60d2757483fc4765941d6f1f0
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.autograd import Variable
import config
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def conv(in_channels, out_channels, kernel_size, stride):
conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
return conv
def channel_shuffle(x, num_groups):
N, C, H, W = x.size()
x_reshape = x.reshape(N, num_groups, C // num_groups, H, W)
x_permute = x_reshape.permute(0, 2, 1, 3, 4)
return x_permute.reshape(N, C, H, W)
class BasicUnit(nn.Module):
def __init__(self, in_channels, splits=2, groups=2):
super(BasicUnit, self).__init__()
self.in_channels = in_channels
self.splits = splits
self.groups = groups
in_channels = int(in_channels / self.splits)
self.right = nn.Sequential(*[
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=False, groups=in_channels),
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True)
])
init_weights(self)
def forward(self, x):
split = torch.split(x, int(self.in_channels / self.splits), dim=1)
x_left, x_right = split
x_right = self.right(x_right)
x = torch.cat([x_left, x_right], dim=1)
out = channel_shuffle(x, self.groups)
# print("Basic Unit", out.size())
return out
class DownUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=2):
super(DownUnit, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.left = nn.Sequential(*[
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=3, stride=2, bias=False, groups=self.in_channels),
nn.BatchNorm2d(self.in_channels),
nn.Conv2d(self.in_channels, self.out_channels // 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.out_channels // 2),
nn.ReLU(inplace=True)
])
self.right = nn.Sequential(*[
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=3, stride=2, bias=False, groups=self.in_channels),
nn.BatchNorm2d(self.in_channels),
nn.Conv2d(self.in_channels, self.out_channels // 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.out_channels // 2),
nn.ReLU(inplace=True)
])
init_weights(self)
def forward(self, x):
x_left = self.left(x)
x_right = self.right(x)
x = torch.cat([x_left, x_right], dim=1)
out = channel_shuffle(x, self.groups)
# print("Down Unit", out.size())
return out
class ShuffleNetV2(nn.Module):
def __init__(self, n_class, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = config.net_size[net_size]
num_blocks = config.net_blocks
self.conv1 = conv(in_channels=3, out_channels=out_channels[0],
kernel_size=config.conv1_kernel_size,
stride=config.conv1_stride)
self.in_channels = out_channels[0]
self.stage2 = self._make_stage(out_channels[1], num_blocks[0])
self.stage3 = self._make_stage(out_channels[2], num_blocks[1])
# self.stage4 = self._make_stage(out_channels[3], num_blocks[2])
self.conv5 = conv(in_channels=out_channels[2],
out_channels=out_channels[3],
kernel_size=config.conv5_kernel_size,
stride=config.conv5_stride)
self.global_pool = nn.AvgPool2d(kernel_size=config.global_pool_kernel_size)
self.fc = nn.Linear(out_channels[3], n_class)
def _make_stage(self, out_channels, num_blocks):
stage = []
stage.append(DownUnit(self.in_channels, out_channels))
for i in range(num_blocks):
stage.append(BasicUnit(out_channels))
self.in_channels = out_channels # update in_channels for next iter
return nn.Sequential(*stage)
def forward(self, x):
out = self.conv1(x)
out = self.stage2(out)
out = self.stage3(out)
# out = self.stage4(out)
out = self.conv5(out)
out = self.global_pool(out)
out = out.view(out.size(0), -1) # flatten
out = self.fc(out)
return out
def test():
net = ShuffleNetV2(2300, 2)
x = Variable(torch.randn(3, 3, 32, 32))
y = net(x)
print("end", y.size())
if __name__ == '__main__':
test()
| 36.365517
| 120
| 0.616537
| 4,323
| 0.819837
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.040015
|
c036324f468e909b938249cc16b70ee9b1588b7d
| 6,264
|
py
|
Python
|
warhorn_api.py
|
jagerkin/warbot
|
d30851a454b9eef45d5d4d095ae63e846229153d
|
[
"Apache-2.0"
] | 1
|
2021-12-23T05:09:01.000Z
|
2021-12-23T05:09:01.000Z
|
warhorn_api.py
|
jagerkin/warbot
|
d30851a454b9eef45d5d4d095ae63e846229153d
|
[
"Apache-2.0"
] | 1
|
2021-12-23T05:00:24.000Z
|
2021-12-23T05:00:24.000Z
|
warhorn_api.py
|
jagerkin/warbot
|
d30851a454b9eef45d5d4d095ae63e846229153d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Michael Olson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Warhorn GraphQL client."""
import collections.abc
import datetime
import logging
from typing import AsyncGenerator, Dict, Optional, Sequence, Tuple, Union
import pytz
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
from gql.transport.aiohttp import log as gql_logger
_QUERY = '''\
{{
eventSessions(
events: ["{slug}"],
startsAfter: "{startsAfter}") {{
nodes {{
status
scenario {{
name
}}
scenarioOffering {{
customName
}}
signupUrl
uuid
slot {{
timezone
startsAt
endsAt
}}
}}
}}
}}'''
_GQLNode = Optional[Union[str, Dict[str, '_GQLNode'], Sequence['_GQLNode']]]
class GraphNode:
"""Wrapper for GraphQL nodes that don't make the type system (or me) cry."""
__slots__ = ('_node', )
def __init__(self, node: _GQLNode) -> None:
"""Init a GraphNode.
Args:
node: GraphQL result node.
"""
self._node: _GQLNode = node
def path(self, *path: str) -> 'GraphNode': # pylint: disable=used-before-assignment
"""Resolve a path under this node.
Args:
path: Sequence of key values to lookup.
Returns:
Node navigated to, or a None node if no such node existed.
"""
node = self._node
for p in path:
if not isinstance(node, dict):
return GraphNode(None)
node = node.get(p)
return GraphNode(node)
@property
def str(self) -> str:
"""Return the node as a string if it is one, else ''."""
if isinstance(self._node, str):
return self._node
return ''
@property
def tuple(self) -> Tuple['GraphNode', ...]:
"""Return the node as a Tuple of GraphNodes if it's a sequence, else an empty tuple."""
if isinstance(self._node, collections.abc.Sequence):
return tuple(GraphNode(e) for e in self._node)
return tuple()
def _strings_exists(*strings: str) -> bool:
"""Check that all of the strings exist and none of them are just the str 'None'."""
for s in strings:
if s in ('', 'None'):
return False
return True
class Game:
"""Game holds the key information about a Warhorn D&D session."""
__slots__ = 'uuid', 'name', 'url', 'status', 'starts', 'ends'
def __init__(self, session: GraphNode) -> None:
"""Init new Game.
Args:
session: Warhorn GraphQL session node to extract game data from.
Throws:
ValueError: in the event of key missing values, like a start time.
"""
self.uuid: str = session.path('uuid').str
"""Warhorn session UUID."""
self.name: str = (
session.path('scenarioOffering', 'customName').str
or session.path('scenario', 'name').str)
"""Game scenario name."""
self.url = session.path('signupUrl').str
"""Warhorn session signup URL."""
self.status: str = session.path('status').str
"""Warhorn session status. (e.g. PUBLISHED, DRAFT, CANCELED)"""
starts = session.path('slot', 'startsAt').str
ends = session.path('slot', 'endsAt').str
tz_str = session.path('slot', 'timezone').str or 'US/Pacific'
if not _strings_exists(self.uuid, self.name, self.status, self.url, starts, ends, tz_str):
raise ValueError(f'Missing key values for game session: {session}')
tz = pytz.timezone(tz_str)
self.starts: datetime.datetime = datetime.datetime.fromisoformat(starts).astimezone(tz)
"""Game start time."""
self.ends: datetime.datetime = datetime.datetime.fromisoformat(ends).astimezone(tz)
"""Game end time."""
@property
def time(self) -> str:
"""String describing game start/end time."""
return f'{self.starts:%-I:%M%p} - {self.ends:%-I:%M%p %Z %b %d, %Y}'
def __repr__(self) -> str:
return f'Game("{self.name}", {self.time}, {self.status}, uuid: {self.uuid})'
class WarhornAPI: # pylint: disable=too-few-public-methods
"""Warhorn client API."""
def __init__(self, url: str='https://warhorn.net/graphql', token: str='') -> None:
"""Init Warhorn client.
Args:
url: Warhorn GraphQL endpoint.
"""
headers = {}
if token:
headers['Authorization'] = f'Bearer {token}'
self._transport = AIOHTTPTransport(url=url, headers=headers)
self._client = Client(transport=self._transport, fetch_schema_from_transport=False)
gql_logger.setLevel(logging.WARNING) # type: ignore
async def get_games(
self, slug: str, starts_after: Optional[datetime.datetime]=None
) -> AsyncGenerator[Game, None]:
"""Query Warhorn for games.
Args:
slug: identifying string for the warhorn event.
starts_after: Only return Games beginning after this time.
Returns:
Generator of games.
"""
starts_after = starts_after if starts_after else datetime.datetime.now()
q = _QUERY.format(slug=slug, startsAfter=starts_after.isoformat())
query = gql(q)
result = GraphNode(await self._client.execute_async(query)) # type: ignore
for session in result.path('eventSessions', 'nodes').tuple:
status = session.path('status').str
if status not in ('PUBLISHED', 'DRAFT', 'CANCELED'):
logging.warn('Unexpected sessions status: %s', session)
if status != 'PUBLISHED':
continue
yield Game(session)
| 32.968421
| 98
| 0.608397
| 4,729
| 0.754949
| 995
| 0.158844
| 650
| 0.103768
| 995
| 0.158844
| 3,059
| 0.488346
|
c036c5b85abcd0ef620f9e8bbff718b557b0b6ee
| 13,750
|
py
|
Python
|
regnerf/internal/models.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
regnerf/internal/models.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
regnerf/internal/models.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
import functools
from typing import Any, Callable
from flax import linen as nn
import gin
from internal import mip, utils # pylint: disable=g-multiple-import
import jax
from jax import random
import jax.numpy as jnp
@gin.configurable
class MipNerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
config: Any = None # A Config class, must be set upon construction.
num_samples: int = 128 # The number of samples per level.
num_levels: int = 2 # The number of sampling levels.
stop_level_grad: bool = True # If True, don't backprop across levels.
use_viewdirs: bool = True # If True, use view directions as input.
genspace_fn: Callable[Ellipsis, Any] = None # The genspace() curve function.
ray_shape: str = 'cone' # The shape of cast rays ('cone' or 'cylinder').
disable_integration: bool = False # If True, use PE instead of IPE.
single_jitter: bool = False # If True, jitter whole rays instead of samples.
@nn.compact
def __call__(
self,
rng,
rays,
resample_padding,
compute_extras,
):
"""The mip-NeRF Model.
Args:
rng: random number generator (or None for deterministic output).
rays: util.Rays, a pytree of ray origins, directions, and viewdirs.
resample_padding: float, the histogram padding to use when resampling.
compute_extras: bool, if True, compute extra quantities besides color.
Returns:
ret: list, [*(rgb, distance, acc)]
"""
# Construct the MLP.
mlp = MLP()
renderings = []
for i_level in range(self.num_levels):
if rng is None:
key = None
else:
key, rng = random.split(rng)
if i_level == 0:
# Stratified sampling along rays
t_vals, samples = mip.sample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
self.num_samples,
rays.near,
rays.far,
self.genspace_fn,
self.ray_shape,
self.single_jitter,
)
else:
t_vals, samples = mip.resample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
t_vals,
weights,
self.ray_shape,
self.stop_level_grad,
resample_padding,
self.single_jitter,
)
if self.disable_integration:
samples = (samples[0], jnp.zeros_like(samples[1]))
# Point attribute predictions
if self.use_viewdirs:
(rgb, density, normals) = mlp(rng, samples, rays.viewdirs)
else:
(rgb, density, normals) = mlp(rng, samples, None)
# Volumetric rendering.
weights, _, _, delta = mip.compute_alpha_weights(
density, t_vals, rays.directions)
rendering = mip.volumetric_rendering(
rgb,
weights,
normals,
t_vals,
self.config.white_background,
self.config.vis_num_rays,
compute_extras,
delta,
)
renderings.append(rendering)
return renderings
def construct_mipnerf(rng, rays, config):
"""Construct a Neural Radiance Field.
Args:
rng: jnp.ndarray. Random number generator.
rays: an example of input Rays.
config: A Config class.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
# Grab just 10 rays, to minimize memory overhead during construction.
ray = jax.tree_map(lambda x: jnp.reshape(x, [-1, x.shape[-1]])[:10], rays)
model = MipNerfModel(config=config)
init_variables = model.init(
rng, rng=None, rays=ray, resample_padding=0., compute_extras=False)
return model, init_variables
def cosine_easing_window(alpha, min_freq_log2=0, max_freq_log2=16):
"""Eases in each frequency one by one with a cosine.
This is equivalent to taking a Tukey window and sliding it to the right
along the frequency spectrum.
Args:
alpha: will ease in each frequency as alpha goes from 0.0 to num_freqs.
min_freq_log2: the lower frequency band.
max_freq_log2: the upper frequency band.
Returns:
A 1-d numpy array with num_sample elements containing the window.
"""
num_bands = max_freq_log2 - min_freq_log2
bands = jnp.linspace(min_freq_log2, max_freq_log2, num_bands)
x = jnp.clip(alpha - bands, 0.0, 1.0)
values = 0.5 * (1 + jnp.cos(jnp.pi * x + jnp.pi))
# always set first 4 freqs to 1
values = values.reshape(-1)
values = jnp.concatenate([jnp.ones_like(values[:4]), values[4:]])
values = jnp.repeat(values.reshape(-1, 1), 3, axis=1).reshape(-1)
return jnp.stack([values, values])
@gin.configurable
class MLP(nn.Module):
"""A simple MLP."""
net_depth: int = 8 # The depth of the first part of MLP.
net_width: int = 256 # The width of the first part of MLP.
net_depth_viewdirs: int = 1 # The depth of the second part of MLP.
net_width_viewdirs: int = 128 # The width of the second part of MLP.
net_activation: Callable[Ellipsis, Any] = nn.relu # The activation function.
# Initializer for the weights of the MLP.
weight_init: Callable[Ellipsis, Any] = jax.nn.initializers.glorot_uniform()
skip_layer: int = 4 # Add a skip connection to the output of every N layers.
num_rgb_channels: int = 3 # The number of RGB channels.
min_deg_point: int = 0 # Min degree of positional encoding for 3D points.
max_deg_point: int = 16 # Max degree of positional encoding for 3D points.
deg_view: int = 4 # Degree of positional encoding for viewdirs.
density_activation: Callable[Ellipsis, Any] = nn.softplus # Density activation.
density_noise: float = 0. # Standard deviation of noise added to raw density.
density_bias: float = -1. # The shift added to raw densities pre-activation.
rgb_activation: Callable[Ellipsis, Any] = nn.sigmoid # The RGB activation.
rgb_padding: float = 0.001 # Padding added to the RGB outputs.
disable_normals: bool = False # If True, don't bother computing normals.
@nn.compact
def __call__(self, rng, samples, viewdirs=None):
"""Evaluate the MLP.
Args:
rng: random number generator (or None for deterministic output).
samples: a tuple containing:
- mean: [..., num_samples, 3], coordinate means, and
- cov: [..., num_samples, 3{, 3}], coordinate covariance matrices.
viewdirs: jnp.ndarray(float32), [batch, 3], if not None, this variable
will be part of the input to the second part of the MLP concatenated
with the output vector of the first part of the MLP. If None, only the
first part of the MLP will be used with input x. In the original paper,
this variable is the view direction.
Returns:
rgb: jnp.ndarray(float32), with a shape of [..., num_rgb_channels].
density: jnp.ndarray(float32), with a shape of [...].
normals: jnp.ndarray(float32), with a shape of [..., 3].
"""
dense_layer = functools.partial(nn.Dense, kernel_init=self.weight_init)
def predict_density(rng, means, covs):
"""Helper function to output density."""
# Encode input positions
inputs = mip.integrated_pos_enc(
(means, covs), self.min_deg_point, self.max_deg_point)
# Evaluate network to output density
x = inputs
for i in range(self.net_depth):
x = dense_layer(self.net_width)(x)
x = self.net_activation(x)
if i % self.skip_layer == 0 and i > 0:
x = jnp.concatenate([x, inputs], axis=-1)
raw_density = dense_layer(1)(x)[Ellipsis, 0] # Hardcoded to a single channel.
# Add noise to regularize the density predictions if needed.
if (rng is not None) and (self.density_noise > 0):
key, rng = random.split(rng)
raw_density += self.density_noise * random.normal(
key, raw_density.shape, dtype=raw_density.dtype)
# Apply bias and activation to raw density
density = self.density_activation(raw_density + self.density_bias)
return density, x
means, covs = samples
if self.disable_normals:
density, x = predict_density(rng, means, covs)
normals = jnp.full_like(means, fill_value=jnp.nan)
else:
# Flatten the input so value_and_grad can be vmap'ed.
means_flat = means.reshape([-1, means.shape[-1]])
covs_flat = covs.reshape([-1] + list(covs.shape[len(means.shape) - 1:]))
# Evaluate the network and its gradient on the flattened input.
predict_density_and_grad_fn = jax.vmap(
jax.value_and_grad(predict_density, argnums=1, has_aux=True),
in_axes=(None, 0, 0))
(density_flat, x_flat), density_grad_flat = (
predict_density_and_grad_fn(rng, means_flat, covs_flat))
# Unflatten the output.
density = density_flat.reshape(means.shape[:-1])
x = x_flat.reshape(list(means.shape[:-1]) + [x_flat.shape[-1]])
density_grad = density_grad_flat.reshape(means.shape)
# Compute surface normals as negative normalized density gradient
eps = jnp.finfo(jnp.float32).eps
normals = -density_grad / jnp.sqrt(
jnp.maximum(jnp.sum(density_grad**2, axis=-1, keepdims=True), eps))
if viewdirs is not None:
viewdirs_enc = mip.pos_enc(
viewdirs, min_deg=0, max_deg=self.deg_view, append_identity=True)
# Output of the first part of MLP.
bottleneck = dense_layer(self.net_width)(x)
viewdirs_enc = jnp.broadcast_to(
viewdirs_enc[Ellipsis, None, :],
list(bottleneck.shape[:-1]) + [viewdirs_enc.shape[-1]])
x = jnp.concatenate([bottleneck, viewdirs_enc], axis=-1)
# Here use 1 extra layer to align with the original nerf model.
for _ in range(self.net_depth_viewdirs):
x = dense_layer(self.net_width_viewdirs)(x)
x = self.net_activation(x)
rgb = self.rgb_activation(dense_layer(self.num_rgb_channels)(x))
rgb = rgb * (1 + 2 * self.rgb_padding) - self.rgb_padding
return (rgb, density, normals)
def render_image(render_fn, rays, rng, config):
"""Render all the pixels of an image (in test mode).
Args:
render_fn: function, jit-ed render function.
rays: a `Rays` pytree, the rays to be rendered.
rng: jnp.ndarray, random number generator (used in training mode only).
config: A Config class.
Returns:
rgb: jnp.ndarray, rendered color image.
disp: jnp.ndarray, rendered disparity image.
acc: jnp.ndarray, rendered accumulated weights per pixel.
"""
height, width = rays.origins.shape[:2]
num_rays = height * width
rays = jax.tree_map(lambda r: r.reshape((num_rays, -1)), rays)
host_id = jax.host_id()
chunks = []
idx0s = range(0, num_rays, config.render_chunk_size)
for i_chunk, idx0 in enumerate(idx0s):
# pylint: disable=cell-var-from-loop
if i_chunk % max(1, len(idx0s) // 10) == 0:
print(f'Rendering chunk {i_chunk}/{len(idx0s)-1}')
chunk_rays = (
jax.tree_map(lambda r: r[idx0:idx0 + config.render_chunk_size], rays))
actual_chunk_size = chunk_rays.origins.shape[0]
rays_remaining = actual_chunk_size % jax.device_count()
if rays_remaining != 0:
padding = jax.device_count() - rays_remaining
chunk_rays = jax.tree_map(
lambda r: jnp.pad(r, ((0, padding), (0, 0)), mode='edge'), chunk_rays)
else:
padding = 0
# After padding the number of chunk_rays is always divisible by host_count.
rays_per_host = chunk_rays.origins.shape[0] // jax.host_count()
start, stop = host_id * rays_per_host, (host_id + 1) * rays_per_host
chunk_rays = jax.tree_map(lambda r: utils.shard(r[start:stop]), chunk_rays)
chunk_renderings = render_fn(rng, chunk_rays)
# Unshard the renderings
chunk_renderings = [{k: utils.unshard(v[0], padding)
for k, v in r.items()}
for r in chunk_renderings]
chunk_rendering = chunk_renderings[-1]
keys = [k for k in chunk_renderings[0] if k.find('ray_') == 0]
for k in keys:
chunk_rendering[k] = [r[k] for r in chunk_renderings]
chunks.append(chunk_rendering)
rendering = {}
for k in chunks[0]:
if isinstance(chunks[0][k], list):
rendering[k] = [r[k] for r in chunks]
ds = range(len(rendering[k][0]))
rendering[k] = [jnp.concatenate([r[d] for r in rendering[k]]) for d in ds]
else:
rendering[k] = jnp.concatenate([r[k] for r in chunks])
rendering[k] = (
rendering[k].reshape((height, width) + chunks[0][k].shape[1:]))
# After all of the ray bundles have been concatenated together, extract a
# new random bundle (deterministically) from the concatenation that is the
# same size as one of the individual bundles.
keys = [k for k in rendering if k.find('ray_') == 0]
if keys:
ray_idx = random.permutation(
random.PRNGKey(0), rendering[keys[0]][0].shape[0])[:config.vis_num_rays]
for k in keys:
rendering[k] = [r[ray_idx] for r in rendering[k]]
return rendering
| 38.300836
| 84
| 0.663273
| 8,196
| 0.596073
| 0
| 0
| 8,232
| 0.598691
| 0
| 0
| 5,349
| 0.389018
|
c03730c3fe56f310fa37ff5662b46d4ef0a1326f
| 13,948
|
py
|
Python
|
Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/Tools/DCC/Maya/constants.py
|
prophetl33t/o3de
|
eaeeb883eee1594b1b93327f6909eebd1a826caf
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/Tools/DCC/Maya/constants.py
|
prophetl33t/o3de
|
eaeeb883eee1594b1b93327f6909eebd1a826caf
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/Tools/DCC/Maya/constants.py
|
prophetl33t/o3de
|
eaeeb883eee1594b1b93327f6909eebd1a826caf
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
Module Documentation:
< DCCsi >:: Tools/DCC/Maya/constants.py
This module contains default values for commony used constants & strings.
We can make an update here easily that is propogated elsewhere.
"""
# -------------------------------------------------------------------------
# built-ins
import sys
import os
import site
import timeit
import inspect
from os.path import expanduser
from pathlib import Path
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
_START = timeit.default_timer() # start tracking
# global scope
_MODULENAME = 'Tools.DCC.Maya.constants'
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Maya is frozen
# module path when frozen
_MODULE_PATH = Path(os.path.abspath(inspect.getfile(inspect.currentframe())))
_LOGGER.debug('_MODULE_PATH: {}'.format(_MODULE_PATH))
_PATH_DCCSI_TOOLS_MAYA = Path(_MODULE_PATH.parent)
_PATH_DCCSI_TOOLS_MAYA = Path(os.getenv('PATH_DCCSI_TOOLS_MAYA',
_PATH_DCCSI_TOOLS_MAYA.as_posix()))
_PATH_DCCSI_TOOLS_DCC = Path(_PATH_DCCSI_TOOLS_MAYA.parent)
_PATH_DCCSI_TOOLS_DCC = Path(os.getenv('PATH_DCCSI_TOOLS_DCC',
_PATH_DCCSI_TOOLS_DCC.as_posix()))
_PATH_DCCSI_TOOLS = Path(_PATH_DCCSI_TOOLS_DCC.parent)
_PATH_DCCSI_TOOLS = Path(os.getenv('PATH_DCCSI_TOOLS',
_PATH_DCCSI_TOOLS.as_posix()))
# we need to set up basic access to the DCCsi
_PATH_DCCSIG = Path(_PATH_DCCSI_TOOLS.parent)
_PATH_DCCSIG = Path(os.getenv('PATH_DCCSIG', _PATH_DCCSIG.as_posix()))
site.addsitedir(_PATH_DCCSIG.as_posix())
_LOGGER.debug('_PATH_DCCSIG: {}'.format(_PATH_DCCSIG.as_posix()))
# this is the shared default requirements.txt file to install for python 3.6.x+
DCCSI_PYTHON_REQUIREMENTS = Path(_PATH_DCCSIG, 'requirements.txt').as_posix()
# if using maya 2020 or less with py2.7 override with and use the one here:
# "DccScriptingInterface\Tools\DCC\Maya\requirements.txt"
# now we have azpy api access
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
from azpy.constants import ENVAR_DCCSI_LOGLEVEL
from azpy.constants import ENVAR_DCCSI_GDEBUGGER
from azpy.constants import FRMT_LOG_LONG
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
from azpy.constants import * # but here are the specific ones we are gonna use
from azpy.constants import PATH_PROGRAMFILES_X64
from azpy.constants import TAG_PY_MAJOR
from azpy.constants import TAG_PY_MINOR
from azpy.constants import PATH_USER_HOME
from azpy.constants import PATH_USER_O3DE
from azpy.constants import ENVAR_O3DE_DEV
from azpy.constants import PATH_O3DE_DEV
from azpy.constants import ENVAR_PATH_DCCSIG
from azpy.constants import PATH_DCCSIG
from azpy.constants import ENVAR_DCCSI_LOG_PATH
from azpy.constants import PATH_DCCSI_LOG_PATH
from azpy.constants import ENVAR_DCCSI_PY_VERSION_MAJOR
from azpy.constants import ENVAR_DCCSI_PY_VERSION_MINOR
from azpy.constants import ENVAR_PATH_DCCSI_PYTHON_LIB
from azpy.constants import STR_PATH_DCCSI_PYTHON_LIB
from azpy.constants import PATH_DCCSI_PYTHON_LIB
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# dcc: Maya ENVAR constants
ENVAR_DCCSI_PY_VERSION_MAJOR=str("DCCSI_PY_VERSION_MAJOR")
ENVAR_DCCSI_PY_VERSION_MINOR=str("DCCSI_PY_VERSION_MINOR")
ENVAR_DCCSI_PY_VERSION_RELEASE=str("DCCSI_PY_VERSION_RELEASE")
ENVAR_MAYA_NO_CONSOLE_WINDOW = str("MAYA_NO_CONSOLE_WINDOW")
ENVAR_MAYA_SHOW_OUTPUT_WINDOW = str("MAYA_SHOW_OUTPUT_WINDOW")
TAG_O3DE_DCC_MAYA_MEL = 'dccsi_setup.mel'
TAG_MAYA_WORKSPACE = 'workspace.mel'
ENVAR_DCCSI_PY_MAYA = str('DCCSI_PY_MAYA')
ENVAR_MAYA_VERSION = str('MAYA_VERSION')
ENVAR_MAYA_LOCATION = str('MAYA_LOCATION')
ENVAR_PATH_DCCSI_TOOLS_MAYA = str('PATH_DCCSI_TOOLS_MAYA')
ENVAR_MAYA_MODULE_PATH = str('MAYA_MODULE_PATH')
ENVAR_MAYA_BIN_PATH = str('MAYA_BIN_PATH')
ENVAR_DCCSI_MAYA_PLUG_IN_PATH = str('DCCSI_MAYA_PLUG_IN_PATH')
ENVAR_MAYA_PLUG_IN_PATH = str('MAYA_PLUG_IN_PATH')
ENVAR_DCCSI_MAYA_SHELF_PATH = str('DCCSI_MAYA_SHELF_PATH')
ENVAR_MAYA_SHELF_PATH = str('MAYA_SHELF_PATH')
ENVAR_DCCSI_MAYA_XBMLANGPATH = str('DCCSI_MAYA_XBMLANGPATH')
ENVAR_XBMLANGPATH = str('XBMLANGPATH')
ENVAR_DCCSI_MAYA_SCRIPT_MEL_PATH = str('DCCSI_MAYA_SCRIPT_MEL_PATH')
ENVAR_DCCSI_MAYA_SCRIPT_PY_PATH = str('DCCSI_MAYA_SCRIPT_PY_PATH')
ENVAR_DCCSI_MAYA_SCRIPT_PATH = str("DCCSI_MAYA_SCRIPT_PATH")
ENVAR_MAYA_SCRIPT_PATH = str('MAYA_SCRIPT_PATH')
ENVAR_DCCSI_MAYA_SET_CALLBACKS = str('DCCSI_MAYA_SET_CALLBACKS')
ENVAR_MAYA_VP2_DEVICE_OVERRIDE=str("MAYA_VP2_DEVICE_OVERRIDE")
ENVAR_MAYA_OGS_DEVICE_OVERRIDE=str("MAYA_OGS_DEVICE_OVERRIDE")
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Maya consts
#USER_HOME = Path.home()
# mimicing all values from: "DccScriptingInterface\Tools\Dev\Windows\Env_DCC_Maya.bat"
# note: these are just default values, they are only initially CONST
# if/when imported from here (constants.py)
DCCSI_PY_VERSION_MAJOR = 3
DCCSI_PY_VERSION_MINOR = 7
DCCSI_PY_VERSION_RELEASE = 7
# override with maya defaults
PATH_DCCSI_PYTHON_LIB = STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
DCCSI_PY_VERSION_MAJOR,
DCCSI_PY_VERSION_MINOR)
# not actually a maya envar, to do: could rename DCCSI_MAYA_VERSION
MAYA_VERSION=2022
# is a maya envar
MAYA_PROJECT = _PATH_DCCSIG.as_posix()
PATH_DCCSI_TOOLS_MAYA = _PATH_DCCSI_TOOLS_MAYA.as_posix()
# is a maya envar
MAYA_MODULE_PATH = _PATH_DCCSI_TOOLS_MAYA.as_posix()
# is a maya envar
MAYA_LOCATION = Path(PATH_PROGRAMFILES_X64,'Autodesk', 'Maya{}'.format(MAYA_VERSION)).as_posix()
# is a maya envar
MAYA_BIN_PATH = Path(MAYA_LOCATION, 'bin').as_posix()
DCCSI_MAYA_SET_CALLBACKS = True
# is a maya envar
MAYA_NO_CONSOLE_WINDOW = False
MAYA_SHOW_OUTPUT_WINDOW = True
DCCSI_MAYA_EXE = Path(MAYA_BIN_PATH, 'maya.exe')
DCCSI_MAYABATCH_EXE = Path(MAYA_BIN_PATH, 'mayabatch.exe')
DCCSI_PY_MAYA = Path(MAYA_BIN_PATH, 'mayapy.exe')
# this is transient and will always track the exe this script is executing on
O3DE_PY_EXE = Path(sys.executable).as_posix()
DCCSI_PY_IDE = Path(DCCSI_PY_MAYA).as_posix()
DCCSI_MAYA_PLUG_IN_PATH = Path(PATH_DCCSI_TOOLS_MAYA,'plugins').as_posix()
# is a maya envar
MAYA_PLUG_IN_PATH = Path(DCCSI_MAYA_PLUG_IN_PATH).as_posix() # extend %MAYA_PLUG_IN_PATH%
# to do: remove or extend next PR, technically there can be more then one plugin path
#while MAYA_PLUG_IN_PATH:
#if ENVAR_MAYA_PLUG_IN_PATH in os.environ:
#maya_plug_pathlist = os.getenv(ENVAR_MAYA_PLUG_IN_PATH).split(os.pathsep)
#maya_plug_new_pathlist = maya_plug_pathlist.copy()
#maya_plug_new_pathlist.insert(0, Path(DCCSI_MAYA_PLUG_IN_PATH).as_posix())
#os.environ[ENVAR_MAYA_PLUG_IN_PATH] = os.pathsep.join(maya_plug_new_pathlist)
#else:
#os.environ[ENVAR_MAYA_PLUG_IN_PATH] = DCCSI_MAYA_PLUG_IN_PATH
#MAYA_PLUG_IN_PATH = os.getenv(ENVAR_MAYA_PLUG_IN_PATH, "< NOT SET >")
#break
DCCSI_MAYA_SHELF_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Prefs', 'Shelves').as_posix()
DCCSI_MAYA_XBMLANGPATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Prefs', 'icons').as_posix()
# is a maya envar
# maya resources, very oddly named
XBMLANGPATH = Path(DCCSI_MAYA_XBMLANGPATH).as_posix() # extend %XBMLANGPATH%
# to do: remove or extend next PR, technically there can be more then one resource path specified
#while XBMLANGPATH:
#if ENVAR_XBMLANGPATH in os.environ:
#maya_xbm_pathlist = os.getenv(ENVAR_XBMLANGPATH).split(os.pathsep)
#maya_xbm_new_pathlist = maya_xbm_pathlist.copy()
#maya_xbm_new_pathlist.insert(0, Path(DCCSI_MAYA_XBMLANGPATH).as_posix())
#os.environ[ENVAR_XBMLANGPATH] = os.pathsep.join(maya_xbm_new_pathlist)
#else:
#os.environ[ENVAR_XBMLANGPATH] = DCCSI_MAYA_XBMLANGPATH
#XBMLANGPATH = os.getenv(ENVAR_XBMLANGPATH, "< NOT SET >")
#break
DCCSI_MAYA_SCRIPT_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts').as_posix()
DCCSI_MAYA_SCRIPT_MEL_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts', 'Mel').as_posix()
DCCSI_MAYA_SCRIPT_PY_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts', 'Python').as_posix()
MAYA_SCRIPT_PATH = Path(DCCSI_MAYA_SCRIPT_PATH).as_posix() # extend %MAYA_SCRIPT_PATH%
# to do: remove or extend next PR, technically there can be more then one script path specified
#while MAYA_SCRIPT_PATH:
#if ENVAR_MAYA_SCRIPT_PATH in os.environ:
#maya_script_pathlist = os.getenv(ENVAR_MAYA_SCRIPT_PATH).split(os.pathsep)
#maya_script_new_pathlist = maya_script_pathlist.copy()
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_MEL_PATH)
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_PY_PATH)
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_PATH)
#os.environ[ENVAR_MAYA_SCRIPT_PATH] = os.pathsep.join(maya_script_new_pathlist)
#else:
#os.environ[ENVAR_MAYA_SCRIPT_PATH] = os.pathsep.join( (DCCSI_MAYA_SCRIPT_PATH,
#DCCSI_MAYA_SCRIPT_PY_PATH,
#DCCSI_MAYA_SCRIPT_MEL_PATH) )
#MAYA_SCRIPT_PATH = os.getenv(ENVAR_MAYA_SCRIPT_PATH, "< NOT SET >")
#break
# is a maya envar
MAYA_VP2_DEVICE_OVERRIDE="VirtualDeviceDx11"
MAYA_OGS_DEVICE_OVERRIDE="VirtualDeviceDx11"
DCCSI_MAYA_WIKI_URL = 'https://github.com/o3de/o3de/wiki/O3DE-DCCsi-Tools-DCC-Maya'
# reference, here is a list of Maya envars
# https://github.com/mottosso/Maya-Environment-Variables/blob/master/README.md
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as a standalone script"""
# happy print
_LOGGER.info(STR_CROSSBAR)
_LOGGER.info('~ {}.py ... Running script as __main__'.format(_MODULENAME))
_LOGGER.info(STR_CROSSBAR)
# global debug stuff
_DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, True)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, True)
_DCCSI_LOGLEVEL = int(env_bool(ENVAR_DCCSI_LOGLEVEL, _logging.INFO))
if _DCCSI_GDEBUG:
# override loglevel if runnign debug
_DCCSI_LOGLEVEL = _logging.DEBUG
# configure basic logger
# note: not using a common logger to reduce cyclical imports
_logging.basicConfig(level=_DCCSI_LOGLEVEL,
format=FRMT_LOG_LONG,
datefmt='%m-%d %H:%M')
# re-configure basic logger for debug
_LOGGER = _logging.getLogger(_MODULENAME)
# this is just a debug developer convenience print (for testing acess)
import pkgutil
_LOGGER.info('Current working dir: {0}'.format(os.getcwd()))
search_path = ['.'] # set to None to see all modules importable from sys.path
all_modules = [x[1] for x in pkgutil.iter_modules(path=search_path)]
_LOGGER.info('All Available Modules in working dir: {0}'.format(all_modules))
# override based on current executable
PATH_DCCSI_PYTHON_LIB = STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
sys.version_info.major,
sys.version_info.minor)
PATH_DCCSI_PYTHON_LIB = Path(PATH_DCCSI_PYTHON_LIB).as_posix()
# test anything procedurally generated
_LOGGER.info('Testing procedural env paths ...')
from pathlib import Path
_stash_dict = {}
_stash_dict['O3DE_DEV'] = Path(PATH_O3DE_DEV)
_stash_dict['PATH_DCCSIG'] = Path(PATH_DCCSIG)
_stash_dict['DCCSI_AZPY_PATH'] = Path(PATH_DCCSI_AZPY_PATH)
_stash_dict['PATH_DCCSI_TOOLS'] = Path(PATH_DCCSI_TOOLS)
_stash_dict['PATH_DCCSI_PYTHON_LIB'] = Path(PATH_DCCSI_PYTHON_LIB)
_stash_dict['PATH_DCCSI_TOOLS_MAYA'] = Path(PATH_DCCSI_TOOLS_MAYA)
_stash_dict['MAYA_LOCATION'] = Path(MAYA_LOCATION)
_stash_dict['DCCSI_MAYA_EXE'] = Path(DCCSI_MAYA_EXE)
_stash_dict['DCCSI_PY_MAYA'] = Path(DCCSI_PY_MAYA)
_stash_dict['MAYA_SCRIPT_PATH'] = Path(MAYA_SCRIPT_PATH)
# ---------------------------------------------------------------------
# py 2 and 3 compatible iter
def get_items(dict_object):
for key in dict_object:
yield key, dict_object[key]
for key, value in get_items(_stash_dict):
# check if path exists
try:
value.exists()
_LOGGER.info('{0}: {1}'.format(key, value))
except Exception as e:
_LOGGER.warning('FAILED PATH: {}'.format(e))
# custom prompt
sys.ps1 = "[{}]>>".format(_MODULENAME)
_LOGGER.debug('{0} took: {1} sec'.format(_MODULENAME, timeit.default_timer() - _START))
# --- END -----------------------------------------------------------------
| 41.144543
| 99
| 0.674649
| 0
| 0
| 99
| 0.007098
| 0
| 0
| 0
| 0
| 6,743
| 0.483438
|
c03733662ac655fa4e1af62db62b069a9399ac49
| 1,958
|
py
|
Python
|
lib/data/finetune_imagenet.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | 1
|
2020-11-29T15:59:07.000Z
|
2020-11-29T15:59:07.000Z
|
lib/data/finetune_imagenet.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | null | null | null |
lib/data/finetune_imagenet.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | null | null | null |
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from lib.data.data_util import ImageNetPolicy, ToBGRTensor
from lib.config import cfg
from lib.data.transformer_v2 import get_transforms
def finetune_imagenet():
transformation = get_transforms(input_size=cfg.dataset.finetune_size, test_size=cfg.dataset.finetune_size,
kind='full', crop=True, need=('train', 'val'), backbone=None)
transform_train = transformation['val_train']
transform_test = transformation['val_test']
train_data = datasets.ImageFolder(
cfg.dataset.train_dir,
transform_train
)
test_data = datasets.ImageFolder(
cfg.dataset.test_dir,
transform_test
)
if cfg.ddp.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_data)
else:
train_sampler = None
test_sampler = None
if cfg.ddp.distributed:
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=cfg.dataset.batch_size,
sampler=train_sampler,
pin_memory=True, num_workers=cfg.dataset.workers)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=cfg.dataset.batch_size,
sampler=test_sampler,
pin_memory=True, num_workers=cfg.dataset.workers)
else:
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=cfg.dataset.batch_size,
shuffle=True,
pin_memory=True, num_workers=cfg.dataset.workers)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=cfg.dataset.batch_size,
shuffle=False,
pin_memory=True, num_workers=cfg.dataset.workers)
return [train_loader, test_loader], [train_sampler, test_sampler], 1000
| 35.6
| 110
| 0.689479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.019918
|
c03974668d2a1ee4545cf6fd342d588c2d650bb4
| 6,519
|
py
|
Python
|
test/acceptance/test_kamma.py
|
marceljanerfont/kamma
|
a1dfaf06475ebb2feb50ac1e6fd8eb79b2beda68
|
[
"MIT"
] | 1
|
2017-06-05T04:40:01.000Z
|
2017-06-05T04:40:01.000Z
|
test/acceptance/test_kamma.py
|
marceljanerfont/kamma
|
a1dfaf06475ebb2feb50ac1e6fd8eb79b2beda68
|
[
"MIT"
] | 2
|
2017-06-29T14:23:59.000Z
|
2017-06-29T14:24:58.000Z
|
test/acceptance/test_kamma.py
|
marceljanerfont/kamma
|
a1dfaf06475ebb2feb50ac1e6fd8eb79b2beda68
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
from multiprocessing import Manager
from random import randint
import logging
import sys
import os
import copy
import shutil
# add kamma path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import kamma
TEST_PATH = "test_queue"
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)-8s] [%(name)-10s] [%(lineno)-4d] %(message)s'))
logger_kamma = logging.getLogger('kamma.app')
logger_kamma.handlers = [handler]
# logger_kamma.setLevel(logging.DEBUG)
logger_fqueue = logging.getLogger('kamma.queue')
logger_fqueue.handlers = [handler]
# logger_fqueue.setLevel(logging.DEBUG)
logger_task = logging.getLogger('kamma.task')
logger_task.handlers = [handler]
# logger_task.setLevel(logging.DEBUG)
logger = logging.getLogger('test')
logger.handlers = [handler]
logger.setLevel(logging.DEBUG)
def _clear_queue():
try:
shutil.rmtree(TEST_PATH)
except Exception:
pass
# it should be out of the class scope, otherwise
# python tries to pickle all class and its manager and then
# the serialization will fail
the_manager = None
class KammaTestsCheckOrder(unittest.TestCase):
def setUp(self):
_clear_queue()
self.callbacks = [self.task0, self.task1, self.task2, self.task3, self.task4, self.task5]
# Manager is necessary because it is modified from different threads
the_manager = Manager()
self.cb_indexs = the_manager.list()
for i in range(0, 100):
self.cb_indexs.append(randint(0, 5))
def tearDown(self):
_clear_queue()
def _taskx(self, task_id, data):
logger.debug("running '{}', remaining {} tasks".format(task_id, len(self.cb_indexs)))
self.assertEqual(task_id, data['id'], "{} data: {}, tasks: {}".format(task_id, data, self.cb_indexs))
self.assertEqual(task_id, self.callbacks[self.cb_indexs[0]].__name__)
self.cb_indexs.pop(0)
def task0(self, data):
self._taskx('task0', data)
def task1(self, data):
self._taskx('task1', data)
def task2(self, data):
self._taskx('task2', data)
def task3(self, data):
self._taskx('task3', data)
def task4(self, data):
self._taskx('task4', data)
def task5(self, data):
self._taskx('task5', data)
def test_usual_case(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_task_callback(callback=self.task0, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task1, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task2, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task3, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task4, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task5, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
cloned_cb_indexs = copy.deepcopy(self.cb_indexs)
worker.run_async()
for i in cloned_cb_indexs:
worker.push_task(callback=self.callbacks[i], data={'id': self.callbacks[i].__name__})
worker.wait_empty_event()
self.assertEqual(0, worker.pending())
worker.stop()
self.assertEqual(0, len(self.cb_indexs))
class KammaTestsExceptionsInKamma(unittest.TestCase):
def setUp(self):
_clear_queue()
def tearDown(self):
_clear_queue()
def task(self):
pass
def test_exception_pushtask_TaskNotRegistered(self):
worker = kamma.Worker(queue_path=TEST_PATH)
self.assertRaises(kamma.TaskNotRegistered, lambda: worker.push_task(callback=self.task))
# worker.wait()
worker.stop()
class KammaTestsExceptionsInTask(unittest.TestCase):
def setUp(self):
_clear_queue()
the_manager = Manager()
self.count = the_manager.list()
self.count.append(0)
self.num_failures = 3
def tearDown(self):
_clear_queue()
def task0(self):
self.count[0] = self.count[0] + 1
if self.count[0] < self.num_failures:
raise Exception('I don\'t want to work, try {}'.format(self.count[0]))
def test_exception_in_task(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_task_callback(callback=self.task0, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(self.num_failures+1))
worker.push_task(callback=self.task0)
worker.run_async()
worker.wait_empty_event()
worker.stop()
self.assertEqual(self.num_failures, self.count[0])
class KammaTestsOnAbortion(unittest.TestCase):
def setUp(self):
_clear_queue()
self.abortion_called = False
self.failure_called = False
def tearDown(self):
_clear_queue()
def task_abort(self):
raise kamma.AbortTask("I'm indisposed")
def task_failure(self):
raise Exception("Boom")
def on_abortion(self, json_task, reason):
self.abortion_called = True
def on_failure(self, json_task, retry_stopped):
self.failure_called = True
def test_on_abortion(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_on_abortion(self.on_abortion)
worker.add_task_callback(self.task_abort)
worker.run_async()
worker.push_task(self.task_abort)
worker.wait_empty_event()
worker.stop()
self.assertTrue(self.abortion_called)
def test_on_failure(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_on_failure(self.on_failure)
worker.add_task_callback(self.task_failure, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.run_async()
worker.push_task(self.task_failure)
worker.wait_empty_event()
worker.stop()
self.assertTrue(self.failure_called)
if __name__ == '__main__':
unittest.main()
| 33.953125
| 144
| 0.666973
| 5,157
| 0.791072
| 0
| 0
| 0
| 0
| 0
| 0
| 689
| 0.105691
|
c03a48434d8a8fb57465d077a992cea579fd3c43
| 855
|
py
|
Python
|
wrappers/python/demo_mp_sync.py
|
Qworg/libfreenect
|
4cca607b37debdd006c3e693954292da11402a7e
|
[
"Apache-2.0"
] | 10
|
2020-03-09T02:31:01.000Z
|
2021-12-14T18:29:27.000Z
|
wrappers/python/demo_mp_sync.py
|
Qworg/libfreenect
|
4cca607b37debdd006c3e693954292da11402a7e
|
[
"Apache-2.0"
] | null | null | null |
wrappers/python/demo_mp_sync.py
|
Qworg/libfreenect
|
4cca607b37debdd006c3e693954292da11402a7e
|
[
"Apache-2.0"
] | 1
|
2018-06-23T04:58:30.000Z
|
2018-06-23T04:58:30.000Z
|
#!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import frame_convert
import signal
keep_running = True
def get_depth():
return frame_convert.pretty_depth(freenect.sync_get_depth()[0])
def get_video():
return freenect.sync_get_video()[0]
def handler(signum, frame):
"""Sets up the kill handler, catches SIGINT"""
global keep_running
keep_running = False
mp.ion()
mp.gray()
mp.figure(1)
image_depth = mp.imshow(get_depth(), interpolation='nearest', animated=True)
mp.figure(2)
image_rgb = mp.imshow(get_video(), interpolation='nearest', animated=True)
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
while keep_running:
mp.figure(1)
image_depth.set_data(get_depth())
mp.figure(2)
image_rgb.set_data(get_video())
mp.draw()
mp.waitforbuttonpress(0.01)
| 21.375
| 76
| 0.730994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.139181
|
c03ae4d1c246454dbef54627c8b2804bc08c11f8
| 1,189
|
py
|
Python
|
codes/models/modules/LPIPS/compute_dists.py
|
DinJerr/BasicSR
|
b992a386e63daed5193b775080b9066ff2421d85
|
[
"Apache-2.0"
] | 5
|
2020-06-07T18:07:45.000Z
|
2020-09-06T02:13:52.000Z
|
codes/models/modules/LPIPS/compute_dists.py
|
DinJerr/BasicSR
|
b992a386e63daed5193b775080b9066ff2421d85
|
[
"Apache-2.0"
] | null | null | null |
codes/models/modules/LPIPS/compute_dists.py
|
DinJerr/BasicSR
|
b992a386e63daed5193b775080b9066ff2421d85
|
[
"Apache-2.0"
] | 1
|
2020-06-28T05:55:41.000Z
|
2020-06-28T05:55:41.000Z
|
#import models
from models.modules.LPIPS import perceptual_loss as models
####################
# metric
####################
model = None
def calculate_lpips(img1_im, img2_im, use_gpu=False, net='squeeze', spatial=False):
'''calculate Perceptual Metric using LPIPS
img1_im, img2_im: BGR image from [0,255]
img1, img2: BGR image from [-1,1]
'''
global model
## Initializing the model
# squeeze is much smaller, needs less RAM to load and execute in CPU during training
if model is None:
model = models.PerceptualLoss(model='net-lin',net=net,use_gpu=use_gpu,spatial=spatial)
# Load images to tensors
img1 = models.im2tensor(img1_im[:,:,::-1]) # RGB image from [-1,1]
img2 = models.im2tensor(img2_im[:,:,::-1]) # RGB image from [-1,1]
if(use_gpu):
img1 = img1.cuda()
img2 = img2.cuda()
# Compute distance
if spatial==False:
dist01 = model.forward(img2,img1)
else:
dist01 = model.forward(img2,img1).mean() # Add .mean, if using add spatial=True
#print('Distance: %.3f'%dist01) #%.8f
return dist01
def cleanup():
global model
model = None
| 27.651163
| 94
| 0.612279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.408747
|
c03b78905f8ecc14f0212e38dfa62f635acd9408
| 59,338
|
py
|
Python
|
msgraph-cli-extensions/v1_0/sites_v1_0/azext_sites_v1_0/vendored_sdks/sites/models/_sites_enums.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/sites_v1_0/azext_sites_v1_0/vendored_sdks/sites/models/_sites_enums.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/sites_v1_0/azext_sites_v1_0/vendored_sdks/sites/models/_sites_enums.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Enum100(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum101(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum102(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
NAME = "name"
NAME_DESC = "name desc"
class Enum103(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum104(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum105(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum106(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum107(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
class Enum108(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
CONTENT_TYPE = "contentType"
SHAREPOINT_IDS = "sharepointIds"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum109(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum110(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
CONTENT_TYPE = "contentType"
SHAREPOINT_IDS = "sharepointIds"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum111(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum112(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum113(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum114(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
AUDIO = "audio"
CONTENT = "content"
C_TAG = "cTag"
DELETED = "deleted"
FILE = "file"
FILE_SYSTEM_INFO = "fileSystemInfo"
FOLDER = "folder"
IMAGE = "image"
LOCATION = "location"
PACKAGE = "package"
PENDING_OPERATIONS = "pendingOperations"
PHOTO = "photo"
PUBLICATION = "publication"
REMOTE_ITEM = "remoteItem"
ROOT = "root"
SEARCH_RESULT = "searchResult"
SHARED = "shared"
SHAREPOINT_IDS = "sharepointIds"
SIZE = "size"
SPECIAL_FOLDER = "specialFolder"
VIDEO = "video"
WEB_DAV_URL = "webDavUrl"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
WORKBOOK = "workbook"
ANALYTICS = "analytics"
CHILDREN = "children"
LIST_ITEM = "listItem"
PERMISSIONS = "permissions"
SUBSCRIPTIONS = "subscriptions"
THUMBNAILS = "thumbnails"
VERSIONS = "versions"
class Enum115(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
WORKBOOK = "workbook"
ANALYTICS = "analytics"
CHILDREN = "children"
LIST_ITEM = "listItem"
PERMISSIONS = "permissions"
SUBSCRIPTIONS = "subscriptions"
THUMBNAILS = "thumbnails"
VERSIONS = "versions"
class Enum116(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
PUBLICATION = "publication"
PUBLICATION_DESC = "publication desc"
class Enum117(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
PUBLICATION = "publication"
FIELDS = "fields"
class Enum118(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
FIELDS = "fields"
class Enum119(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
PUBLICATION = "publication"
FIELDS = "fields"
class Enum120(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
FIELDS = "fields"
class Enum121(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
APPLICATION_ID = "applicationId"
APPLICATION_ID_DESC = "applicationId desc"
CHANGE_TYPE = "changeType"
CHANGE_TYPE_DESC = "changeType desc"
CLIENT_STATE = "clientState"
CLIENT_STATE_DESC = "clientState desc"
CREATOR_ID = "creatorId"
CREATOR_ID_DESC = "creatorId desc"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_DESC = "encryptionCertificate desc"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
ENCRYPTION_CERTIFICATE_ID_DESC = "encryptionCertificateId desc"
EXPIRATION_DATE_TIME = "expirationDateTime"
EXPIRATION_DATE_TIME_DESC = "expirationDateTime desc"
INCLUDE_RESOURCE_DATA = "includeResourceData"
INCLUDE_RESOURCE_DATA_DESC = "includeResourceData desc"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LATEST_SUPPORTED_TLS_VERSION_DESC = "latestSupportedTlsVersion desc"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
LIFECYCLE_NOTIFICATION_URL_DESC = "lifecycleNotificationUrl desc"
NOTIFICATION_URL = "notificationUrl"
NOTIFICATION_URL_DESC = "notificationUrl desc"
RESOURCE = "resource"
RESOURCE_DESC = "resource desc"
class Enum122(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
APPLICATION_ID = "applicationId"
CHANGE_TYPE = "changeType"
CLIENT_STATE = "clientState"
CREATOR_ID = "creatorId"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
EXPIRATION_DATE_TIME = "expirationDateTime"
INCLUDE_RESOURCE_DATA = "includeResourceData"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
NOTIFICATION_URL = "notificationUrl"
RESOURCE = "resource"
class Enum123(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
APPLICATION_ID = "applicationId"
CHANGE_TYPE = "changeType"
CLIENT_STATE = "clientState"
CREATOR_ID = "creatorId"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
EXPIRATION_DATE_TIME = "expirationDateTime"
INCLUDE_RESOURCE_DATA = "includeResourceData"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
NOTIFICATION_URL = "notificationUrl"
RESOURCE = "resource"
class Enum127(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum128(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum129(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum130(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum131(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum132(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum133(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum134(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum135(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum65(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum66(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum68(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum69(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum70(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum71(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
BOOLEAN = "boolean"
BOOLEAN_DESC = "boolean desc"
CALCULATED = "calculated"
CALCULATED_DESC = "calculated desc"
CHOICE = "choice"
CHOICE_DESC = "choice desc"
COLUMN_GROUP = "columnGroup"
COLUMN_GROUP_DESC = "columnGroup desc"
CURRENCY = "currency"
CURRENCY_DESC = "currency desc"
DATE_TIME = "dateTime"
DATE_TIME_DESC = "dateTime desc"
DEFAULT_VALUE = "defaultValue"
DEFAULT_VALUE_DESC = "defaultValue desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
ENFORCE_UNIQUE_VALUES_DESC = "enforceUniqueValues desc"
GEOLOCATION = "geolocation"
GEOLOCATION_DESC = "geolocation desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INDEXED = "indexed"
INDEXED_DESC = "indexed desc"
LOOKUP = "lookup"
LOOKUP_DESC = "lookup desc"
NAME = "name"
NAME_DESC = "name desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PERSON_OR_GROUP = "personOrGroup"
PERSON_OR_GROUP_DESC = "personOrGroup desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
REQUIRED = "required"
REQUIRED_DESC = "required desc"
TEXT = "text"
TEXT_DESC = "text desc"
class Enum72(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum73(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum74(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
GROUP = "group"
GROUP_DESC = "group desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INHERITED_FROM = "inheritedFrom"
INHERITED_FROM_DESC = "inheritedFrom desc"
NAME = "name"
NAME_DESC = "name desc"
ORDER = "order"
ORDER_DESC = "order desc"
PARENT_ID = "parentId"
PARENT_ID_DESC = "parentId desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
SEALED = "sealed"
SEALED_DESC = "sealed desc"
class Enum75(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum76(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum77(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum78(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum79(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
NAME = "name"
NAME_DESC = "name desc"
class Enum80(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum81(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum82(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum83(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum84(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DRIVE_TYPE = "driveType"
DRIVE_TYPE_DESC = "driveType desc"
OWNER = "owner"
OWNER_DESC = "owner desc"
QUOTA = "quota"
QUOTA_DESC = "quota desc"
SHARE_POINT_IDS = "sharePointIds"
SHARE_POINT_IDS_DESC = "sharePointIds desc"
SYSTEM = "system"
SYSTEM_DESC = "system desc"
class Enum85(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum86(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum87(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum88(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum89(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LIST = "list"
LIST_DESC = "list desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SYSTEM = "system"
SYSTEM_DESC = "system desc"
class Enum90(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
LIST = "list"
SHAREPOINT_IDS = "sharepointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum91(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum92(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
LIST = "list"
SHAREPOINT_IDS = "sharepointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum93(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum94(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
BOOLEAN = "boolean"
BOOLEAN_DESC = "boolean desc"
CALCULATED = "calculated"
CALCULATED_DESC = "calculated desc"
CHOICE = "choice"
CHOICE_DESC = "choice desc"
COLUMN_GROUP = "columnGroup"
COLUMN_GROUP_DESC = "columnGroup desc"
CURRENCY = "currency"
CURRENCY_DESC = "currency desc"
DATE_TIME = "dateTime"
DATE_TIME_DESC = "dateTime desc"
DEFAULT_VALUE = "defaultValue"
DEFAULT_VALUE_DESC = "defaultValue desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
ENFORCE_UNIQUE_VALUES_DESC = "enforceUniqueValues desc"
GEOLOCATION = "geolocation"
GEOLOCATION_DESC = "geolocation desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INDEXED = "indexed"
INDEXED_DESC = "indexed desc"
LOOKUP = "lookup"
LOOKUP_DESC = "lookup desc"
NAME = "name"
NAME_DESC = "name desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PERSON_OR_GROUP = "personOrGroup"
PERSON_OR_GROUP_DESC = "personOrGroup desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
REQUIRED = "required"
REQUIRED_DESC = "required desc"
TEXT = "text"
TEXT_DESC = "text desc"
class Enum95(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum96(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum97(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
GROUP = "group"
GROUP_DESC = "group desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INHERITED_FROM = "inheritedFrom"
INHERITED_FROM_DESC = "inheritedFrom desc"
NAME = "name"
NAME_DESC = "name desc"
ORDER = "order"
ORDER_DESC = "order desc"
PARENT_ID = "parentId"
PARENT_ID_DESC = "parentId desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
SEALED = "sealed"
SEALED_DESC = "sealed desc"
class Enum98(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum99(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Get1ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get2ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get3ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get5ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Get6ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Get7ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get8ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class MicrosoftGraphActionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
PENDING = "pending"
CANCELED = "canceled"
ACTIVE = "active"
DONE = "done"
FAILED = "failed"
NOT_SUPPORTED = "notSupported"
class MicrosoftGraphAttendeeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
REQUIRED = "required"
OPTIONAL = "optional"
RESOURCE = "resource"
class MicrosoftGraphAutomaticRepliesStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DISABLED = "disabled"
ALWAYS_ENABLED = "alwaysEnabled"
SCHEDULED = "scheduled"
class MicrosoftGraphBodyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
TEXT = "text"
HTML = "html"
class MicrosoftGraphCalendarColor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LIGHT_BLUE = "lightBlue"
LIGHT_GREEN = "lightGreen"
AUTO = "auto"
LIGHT_ORANGE = "lightOrange"
LIGHT_GRAY = "lightGray"
LIGHT_YELLOW = "lightYellow"
LIGHT_TEAL = "lightTeal"
LIGHT_PINK = "lightPink"
LIGHT_BROWN = "lightBrown"
LIGHT_RED = "lightRed"
MAX_COLOR = "maxColor"
class MicrosoftGraphCalendarRoleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
FREE_BUSY_READ = "freeBusyRead"
LIMITED_READ = "limitedRead"
READ = "read"
WRITE = "write"
DELEGATE_WITHOUT_PRIVATE_EVENT_ACCESS = "delegateWithoutPrivateEventAccess"
DELEGATE_WITH_PRIVATE_EVENT_ACCESS = "delegateWithPrivateEventAccess"
CUSTOM = "custom"
class MicrosoftGraphCategoryColor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PRESET0 = "preset0"
PRESET1 = "preset1"
NONE = "none"
PRESET2 = "preset2"
PRESET3 = "preset3"
PRESET4 = "preset4"
PRESET5 = "preset5"
PRESET6 = "preset6"
PRESET7 = "preset7"
PRESET8 = "preset8"
PRESET9 = "preset9"
PRESET10 = "preset10"
PRESET11 = "preset11"
PRESET12 = "preset12"
PRESET13 = "preset13"
PRESET14 = "preset14"
PRESET15 = "preset15"
PRESET16 = "preset16"
PRESET17 = "preset17"
PRESET18 = "preset18"
PRESET19 = "preset19"
PRESET20 = "preset20"
PRESET21 = "preset21"
PRESET22 = "preset22"
PRESET23 = "preset23"
PRESET24 = "preset24"
class MicrosoftGraphChannelMembershipType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STANDARD = "standard"
PRIVATE = "private"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphChatMessageImportance(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "normal"
HIGH = "high"
URGENT = "urgent"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphChatMessagePolicyViolationDlpActionTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
NOTIFY_SENDER = "notifySender"
BLOCK_ACCESS = "blockAccess"
BLOCK_ACCESS_EXTERNAL = "blockAccessExternal"
class MicrosoftGraphChatMessagePolicyViolationUserActionTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
OVERRIDE = "override"
REPORT_FALSE_POSITIVE = "reportFalsePositive"
class MicrosoftGraphChatMessagePolicyViolationVerdictDetailsTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ALLOW_FALSE_POSITIVE_OVERRIDE = "allowFalsePositiveOverride"
ALLOW_OVERRIDE_WITHOUT_JUSTIFICATION = "allowOverrideWithoutJustification"
ALLOW_OVERRIDE_WITH_JUSTIFICATION = "allowOverrideWithJustification"
class MicrosoftGraphChatMessageType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MESSAGE = "message"
CHAT_EVENT = "chatEvent"
TYPING = "typing"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphComplianceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
COMPLIANT = "compliant"
NONCOMPLIANT = "noncompliant"
CONFLICT = "conflict"
ERROR = "error"
IN_GRACE_PERIOD = "inGracePeriod"
CONFIG_MANAGER = "configManager"
class MicrosoftGraphComplianceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
NOT_APPLICABLE = "notApplicable"
COMPLIANT = "compliant"
REMEDIATED = "remediated"
NON_COMPLIANT = "nonCompliant"
ERROR = "error"
CONFLICT = "conflict"
NOT_ASSIGNED = "notAssigned"
class MicrosoftGraphDayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SUNDAY = "sunday"
MONDAY = "monday"
TUESDAY = "tuesday"
WEDNESDAY = "wednesday"
THURSDAY = "thursday"
FRIDAY = "friday"
SATURDAY = "saturday"
class MicrosoftGraphDelegateMeetingMessageDeliveryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SEND_TO_DELEGATE_AND_INFORMATION_TO_PRINCIPAL = "sendToDelegateAndInformationToPrincipal"
SEND_TO_DELEGATE_AND_PRINCIPAL = "sendToDelegateAndPrincipal"
SEND_TO_DELEGATE_ONLY = "sendToDelegateOnly"
class MicrosoftGraphDeviceEnrollmentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
USER_ENROLLMENT = "userEnrollment"
DEVICE_ENROLLMENT_MANAGER = "deviceEnrollmentManager"
APPLE_BULK_WITH_USER = "appleBulkWithUser"
APPLE_BULK_WITHOUT_USER = "appleBulkWithoutUser"
WINDOWS_AZURE_AD_JOIN = "windowsAzureADJoin"
WINDOWS_BULK_USERLESS = "windowsBulkUserless"
WINDOWS_AUTO_ENROLLMENT = "windowsAutoEnrollment"
WINDOWS_BULK_AZURE_DOMAIN_JOIN = "windowsBulkAzureDomainJoin"
WINDOWS_CO_MANAGEMENT = "windowsCoManagement"
class MicrosoftGraphDeviceManagementExchangeAccessState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
UNKNOWN = "unknown"
ALLOWED = "allowed"
BLOCKED = "blocked"
QUARANTINED = "quarantined"
class MicrosoftGraphDeviceManagementExchangeAccessStateReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
UNKNOWN = "unknown"
EXCHANGE_GLOBAL_RULE = "exchangeGlobalRule"
EXCHANGE_INDIVIDUAL_RULE = "exchangeIndividualRule"
EXCHANGE_DEVICE_RULE = "exchangeDeviceRule"
EXCHANGE_UPGRADE = "exchangeUpgrade"
EXCHANGE_MAILBOX_POLICY = "exchangeMailboxPolicy"
OTHER = "other"
COMPLIANT = "compliant"
NOT_COMPLIANT = "notCompliant"
NOT_ENROLLED = "notEnrolled"
UNKNOWN_LOCATION = "unknownLocation"
MFA_REQUIRED = "mfaRequired"
AZURE_AD_BLOCK_DUE_TO_ACCESS_POLICY = "azureADBlockDueToAccessPolicy"
COMPROMISED_PASSWORD = "compromisedPassword"
DEVICE_NOT_KNOWN_WITH_MANAGED_APP = "deviceNotKnownWithManagedApp"
class MicrosoftGraphDeviceRegistrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_REGISTERED = "notRegistered"
REGISTERED = "registered"
REVOKED = "revoked"
KEY_CONFLICT = "keyConflict"
APPROVAL_PENDING = "approvalPending"
CERTIFICATE_RESET = "certificateReset"
NOT_REGISTERED_PENDING_ENROLLMENT = "notRegisteredPendingEnrollment"
UNKNOWN = "unknown"
class MicrosoftGraphEventType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SINGLE_INSTANCE = "singleInstance"
OCCURRENCE = "occurrence"
EXCEPTION = "exception"
SERIES_MASTER = "seriesMaster"
class MicrosoftGraphExternalAudienceScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
CONTACTS_ONLY = "contactsOnly"
ALL = "all"
class MicrosoftGraphFollowupFlagStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_FLAGGED = "notFlagged"
COMPLETE = "complete"
FLAGGED = "flagged"
class MicrosoftGraphFreeBusyStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FREE = "free"
TENTATIVE = "tentative"
UNKNOWN = "unknown"
BUSY = "busy"
OOF = "oof"
WORKING_ELSEWHERE = "workingElsewhere"
class MicrosoftGraphGiphyRatingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STRICT = "strict"
MODERATE = "moderate"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphImportance(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LOW = "low"
NORMAL = "normal"
HIGH = "high"
class MicrosoftGraphInferenceClassificationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FOCUSED = "focused"
OTHER = "other"
class MicrosoftGraphLocationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "default"
CONFERENCE_ROOM = "conferenceRoom"
HOME_ADDRESS = "homeAddress"
BUSINESS_ADDRESS = "businessAddress"
GEO_COORDINATES = "geoCoordinates"
STREET_ADDRESS = "streetAddress"
HOTEL = "hotel"
RESTAURANT = "restaurant"
LOCAL_BUSINESS = "localBusiness"
POSTAL_ADDRESS = "postalAddress"
class MicrosoftGraphLocationUniqueIdType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
LOCATION_STORE = "locationStore"
DIRECTORY = "directory"
PRIVATE = "private"
BING = "bing"
class MicrosoftGraphManagedAppFlaggedReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ROOTED_DEVICE = "rootedDevice"
class MicrosoftGraphManagedDeviceOwnerType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
COMPANY = "company"
PERSONAL = "personal"
class MicrosoftGraphManagedDevicePartnerReportedHealthState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
ACTIVATED = "activated"
DEACTIVATED = "deactivated"
SECURED = "secured"
LOW_SEVERITY = "lowSeverity"
MEDIUM_SEVERITY = "mediumSeverity"
HIGH_SEVERITY = "highSeverity"
UNRESPONSIVE = "unresponsive"
COMPROMISED = "compromised"
MISCONFIGURED = "misconfigured"
class MicrosoftGraphManagementAgentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
EAS = "eas"
MDM = "mdm"
EAS_MDM = "easMdm"
INTUNE_CLIENT = "intuneClient"
EAS_INTUNE_CLIENT = "easIntuneClient"
CONFIGURATION_MANAGER_CLIENT = "configurationManagerClient"
CONFIGURATION_MANAGER_CLIENT_MDM = "configurationManagerClientMdm"
CONFIGURATION_MANAGER_CLIENT_MDM_EAS = "configurationManagerClientMdmEas"
UNKNOWN = "unknown"
JAMF = "jamf"
GOOGLE_CLOUD_DEVICE_POLICY_CONTROLLER = "googleCloudDevicePolicyController"
class MicrosoftGraphMessageActionFlag(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ANY = "any"
CALL = "call"
DO_NOT_FORWARD = "doNotForward"
FOLLOW_UP = "followUp"
FYI = "fyi"
FORWARD = "forward"
NO_RESPONSE_NECESSARY = "noResponseNecessary"
READ = "read"
REPLY = "reply"
REPLY_TO_ALL = "replyToAll"
REVIEW = "review"
class MicrosoftGraphOnenotePatchActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
REPLACE = "Replace"
APPEND = "Append"
DELETE = "Delete"
INSERT = "Insert"
PREPEND = "Prepend"
class MicrosoftGraphOnenotePatchInsertPosition(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AFTER = "After"
BEFORE = "Before"
class MicrosoftGraphOnenoteSourceService(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
ONE_DRIVE = "OneDrive"
ONE_DRIVE_FOR_BUSINESS = "OneDriveForBusiness"
ON_PREM_ONE_DRIVE_FOR_BUSINESS = "OnPremOneDriveForBusiness"
class MicrosoftGraphOnenoteUserRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
OWNER = "Owner"
CONTRIBUTOR = "Contributor"
NONE = "None"
READER = "Reader"
class MicrosoftGraphOnlineMeetingProviderType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
SKYPE_FOR_BUSINESS = "skypeForBusiness"
SKYPE_FOR_CONSUMER = "skypeForConsumer"
TEAMS_FOR_BUSINESS = "teamsForBusiness"
class MicrosoftGraphOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
RUNNING = "Running"
COMPLETED = "Completed"
FAILED = "Failed"
class MicrosoftGraphPhoneType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
HOME = "home"
BUSINESS = "business"
MOBILE = "mobile"
OTHER = "other"
ASSISTANT = "assistant"
HOME_FAX = "homeFax"
BUSINESS_FAX = "businessFax"
OTHER_FAX = "otherFax"
PAGER = "pager"
RADIO = "radio"
class MicrosoftGraphPlannerPreviewType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTOMATIC = "automatic"
NO_PREVIEW = "noPreview"
CHECKLIST = "checklist"
DESCRIPTION = "description"
REFERENCE = "reference"
class MicrosoftGraphPolicyPlatformType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ANDROID = "android"
I_OS = "iOS"
MAC_OS = "macOS"
WINDOWS_PHONE81 = "windowsPhone81"
WINDOWS81_AND_LATER = "windows81AndLater"
WINDOWS10_AND_LATER = "windows10AndLater"
ANDROID_WORK_PROFILE = "androidWorkProfile"
ALL = "all"
class MicrosoftGraphRecurrencePatternType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DAILY = "daily"
WEEKLY = "weekly"
ABSOLUTE_MONTHLY = "absoluteMonthly"
RELATIVE_MONTHLY = "relativeMonthly"
ABSOLUTE_YEARLY = "absoluteYearly"
RELATIVE_YEARLY = "relativeYearly"
class MicrosoftGraphRecurrenceRangeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
END_DATE = "endDate"
NO_END = "noEnd"
NUMBERED = "numbered"
class MicrosoftGraphResponseType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ORGANIZER = "organizer"
TENTATIVELY_ACCEPTED = "tentativelyAccepted"
ACCEPTED = "accepted"
DECLINED = "declined"
NOT_RESPONDED = "notResponded"
class MicrosoftGraphScheduleChangeRequestActor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SENDER = "sender"
RECIPIENT = "recipient"
MANAGER = "manager"
SYSTEM = "system"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphScheduleChangeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PENDING = "pending"
APPROVED = "approved"
DECLINED = "declined"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphScheduleEntityTheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
WHITE = "white"
BLUE = "blue"
GREEN = "green"
PURPLE = "purple"
PINK = "pink"
YELLOW = "yellow"
GRAY = "gray"
DARK_BLUE = "darkBlue"
DARK_GREEN = "darkGreen"
DARK_PURPLE = "darkPurple"
DARK_PINK = "darkPink"
DARK_YELLOW = "darkYellow"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphSelectionLikelihoodInfo(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_SPECIFIED = "notSpecified"
HIGH = "high"
class MicrosoftGraphSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "normal"
PERSONAL = "personal"
PRIVATE = "private"
CONFIDENTIAL = "confidential"
class MicrosoftGraphStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ACTIVE = "active"
UPDATED = "updated"
DELETED = "deleted"
IGNORED = "ignored"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAppDistributionMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STORE = "store"
ORGANIZATION = "organization"
SIDELOADED = "sideloaded"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAsyncOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INVALID = "invalid"
NOT_STARTED = "notStarted"
IN_PROGRESS = "inProgress"
SUCCEEDED = "succeeded"
FAILED = "failed"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAsyncOperationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INVALID = "invalid"
CLONE_TEAM = "cloneTeam"
ARCHIVE_TEAM = "archiveTeam"
UNARCHIVE_TEAM = "unarchiveTeam"
CREATE_TEAM = "createTeam"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamSpecialization(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
EDUCATION_STANDARD = "educationStandard"
EDUCATION_CLASS = "educationClass"
EDUCATION_PROFESSIONAL_LEARNING_COMMUNITY = "educationProfessionalLearningCommunity"
EDUCATION_STAFF = "educationStaff"
HEALTHCARE_STANDARD = "healthcareStandard"
HEALTHCARE_CARE_COORDINATION = "healthcareCareCoordination"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamVisibilityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PRIVATE = "private"
PUBLIC = "public"
HIDDEN_MEMBERSHIP = "hiddenMembership"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTimeOffReasonIconType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
CAR = "car"
CALENDAR = "calendar"
RUNNING = "running"
PLANE = "plane"
FIRST_AID = "firstAid"
DOCTOR = "doctor"
NOT_WORKING = "notWorking"
CLOCK = "clock"
JURY_DUTY = "juryDuty"
GLOBE = "globe"
CUP = "cup"
PHONE = "phone"
WEATHER = "weather"
UMBRELLA = "umbrella"
PIGGY_BANK = "piggyBank"
DOG = "dog"
CAKE = "cake"
TRAFFIC_CONE = "trafficCone"
PIN = "pin"
SUNNY = "sunny"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphWebsiteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
OTHER = "other"
HOME = "home"
WORK = "work"
BLOG = "blog"
PROFILE = "profile"
class MicrosoftGraphWeekIndex(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FIRST = "first"
SECOND = "second"
THIRD = "third"
FOURTH = "fourth"
LAST = "last"
class MicrosoftGraphWorkbookOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "notStarted"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED = "failed"
| 30.259052
| 119
| 0.701945
| 58,532
| 0.986417
| 0
| 0
| 0
| 0
| 0
| 0
| 19,069
| 0.321362
|
c03be9166bd151ec0d6a3cb24a69aeb0b4160c8e
| 456
|
py
|
Python
|
evntbus/decorators.py
|
jmwri/eventbus
|
fe91ab2486b99bffb0232c23d45d0c5dedce1b42
|
[
"MIT"
] | null | null | null |
evntbus/decorators.py
|
jmwri/eventbus
|
fe91ab2486b99bffb0232c23d45d0c5dedce1b42
|
[
"MIT"
] | null | null | null |
evntbus/decorators.py
|
jmwri/eventbus
|
fe91ab2486b99bffb0232c23d45d0c5dedce1b42
|
[
"MIT"
] | null | null | null |
import typing
if typing.TYPE_CHECKING:
from evntbus.bus import Bus
def listen_decorator(evntbus: 'Bus'):
class ListenDecorator(object):
def __init__(self, event: typing.Type, priority: int = 5):
self.event = event
self.priority = priority
def __call__(self, f: typing.Callable) -> typing.Callable:
evntbus.listen(self.event, f, self.priority)
return f
return ListenDecorator
| 25.333333
| 66
| 0.644737
| 311
| 0.682018
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.010965
|
c03c898e35d62712b812e780c7c19ccba395542b
| 1,481
|
py
|
Python
|
src/shortcircuit/model/crestprocessor.py
|
farshield/shortcircu
|
87d45ea85b78e3e7da72b7b44755dc429b4fdf5a
|
[
"MIT"
] | 35
|
2016-06-22T20:07:31.000Z
|
2021-04-07T11:02:08.000Z
|
src/shortcircuit/model/crestprocessor.py
|
farshield/shortcircu
|
87d45ea85b78e3e7da72b7b44755dc429b4fdf5a
|
[
"MIT"
] | 15
|
2016-06-17T09:36:02.000Z
|
2020-10-30T11:39:07.000Z
|
src/shortcircuit/model/crestprocessor.py
|
farshield/shortcircu
|
87d45ea85b78e3e7da72b7b44755dc429b4fdf5a
|
[
"MIT"
] | 16
|
2016-10-02T16:09:18.000Z
|
2021-05-29T02:51:14.000Z
|
# crestprocessor.py
import threading
from PySide import QtCore
from crest.crest import Crest
class CrestProcessor(QtCore.QObject):
"""
CREST Middle-ware
"""
login_response = QtCore.Signal(str)
logout_response = QtCore.Signal()
location_response = QtCore.Signal(str)
destination_response = QtCore.Signal(bool)
def __init__(self, implicit, client_id, client_secret, parent=None):
super(CrestProcessor, self).__init__(parent)
self.crest = Crest(implicit, client_id, client_secret, self._login_callback, self._logout_callback)
def login(self):
return self.crest.start_server()
def logout(self):
self.crest.logout()
def get_location(self):
server_thread = threading.Thread(target=self._get_location)
server_thread.setDaemon(True)
server_thread.start()
def _get_location(self):
location = self.crest.get_char_location()
self.location_response.emit(location)
def set_destination(self, sys_id):
server_thread = threading.Thread(target=self._set_destination, args=(sys_id, ))
server_thread.setDaemon(True)
server_thread.start()
def _set_destination(self, sys_id):
response = self.crest.set_char_destination(sys_id)
self.destination_response.emit(response)
def _login_callback(self, char_name):
self.login_response.emit(char_name)
def _logout_callback(self):
self.logout_response.emit()
| 29.62
| 107
| 0.704929
| 1,384
| 0.934504
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.035111
|
c03d2bdffd5f75d12bc1d6868d5c20f3a01b1c33
| 4,496
|
py
|
Python
|
src/commands/pipelines.py
|
vicobits/sawi-cli
|
0e3717e0e3d853599b87f8ea147a3f1e9566344b
|
[
"MIT"
] | 1
|
2019-05-02T05:16:07.000Z
|
2019-05-02T05:16:07.000Z
|
src/commands/pipelines.py
|
vicobits/wise-cli
|
0e3717e0e3d853599b87f8ea147a3f1e9566344b
|
[
"MIT"
] | null | null | null |
src/commands/pipelines.py
|
vicobits/wise-cli
|
0e3717e0e3d853599b87f8ea147a3f1e9566344b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
from src.commands.project import Project
from src.commands.server import Server
from src.commands.config import WebServer
from src.common.context import CommandContext
from src.common.decorators import settings, update_config_file
class Pipeline:
@staticmethod
@settings(allow_sudo=True)
def update(context: CommandContext):
context.connection.sudo('apt-get update')
context.connection.sudo('apt-get upgrade -y')
@staticmethod
@settings(allow_sudo=True)
def deps(context: CommandContext):
Server.deps(context)
@staticmethod
@settings(allow_sudo=True)
def setup_server(context: CommandContext):
Server.deps(context)
Server.user(context)
Server.group(context)
Server.layout(context)
Server.create_db(context)
Server.fix_permissions(context)
Server.git(context)
Server.add_remote(context)
Server.web_server(context)
Server.gunicorn(context)
Server.supervisor(context)
Server.fix_permissions(context)
Server.letsencrypt(context)
@staticmethod
@settings(allow_sudo=True)
def clean_server(context: CommandContext):
"""
Uninstall app in selected server(s)
"""
Server.clean(context)
@staticmethod
@settings(allow_sudo=True)
def restart_server(context: CommandContext):
"""
Restart all app services.
"""
Server.restart_services(context)
@staticmethod
@settings()
def deploy(context: CommandContext):
Project.push(context)
Project.environment(context)
Project.install(context)
Project.clean(context)
@staticmethod
@settings(allow_sudo=True)
def fix_permissions(context: CommandContext):
Server.fix_permissions(context)
@staticmethod
@settings()
def add_remote(context: CommandContext):
Server.add_remote(context)
@staticmethod
@settings(allow_sudo=True)
def createsuperuser(context: CommandContext):
"""
Create a project superuser in selected server(s).
"""
Project.create_superuser(context)
@staticmethod
@settings()
def run_command(context: CommandContext, command):
Project.run_command(context, command)
@staticmethod
@settings()
def migrate(context: CommandContext):
Project.migrate(context)
@staticmethod
@settings()
def load_fixtures(context: CommandContext):
Project.load_fixtures(context)
@staticmethod
@settings(only_local=True)
def upload_sshkey(context: CommandContext):
"""
Upload SSH key to server.
"""
Project.upload_key(context)
@staticmethod
@settings(allow_sudo=True)
def setup_ssl(context: CommandContext, artifact=None):
if artifact:
if not context.config.https:
is_agree = input(
'We will change the value of [https] in your config file, Are you agree? Y/n: '
) or 'n'
if is_agree.upper() == "Y":
update_config_file(key="https", value=True)
if artifact == 'renew':
Server.renew_ssl(context)
elif artifact == WebServer.NGINX.value:
Server.nginx(context)
else:
click.echo(click.style('[{0}] doesn\'t implemented'.format(artifact), fg='red'))
else:
Server.certbot(context)
Server.letsencrypt(context)
@staticmethod
@settings(allow_sudo=True)
def server_language(context: CommandContext):
if context.connection.run('echo $LANG').ok:
context.connection.sudo('echo "LANG=C.UTF-8" >> /etc/environment')
if context.connection.run('echo $LC_CTYPE').ok:
context.connection.sudo('echo "LC_CTYPE=C.UTF-8" >> /etc/environment')
if context.connection.run('echo $LC_ALL').ok:
context.connection.sudo('echo "LC_ALL=C.UTF-8" >> /etc/environment')
@staticmethod
@settings(allow_sudo=True)
def reset_db(context: CommandContext):
Server.reset_db(context)
# @classmethod
# def make_backup(cls):
# Global.set_user(superuser=True)
# with settings(hide('warnings'), warn_only=True):
# execute(Project.backup, hosts=env.hosts)
# execute(Project.download_backup, hosts=env.hosts)
#
| 28.636943
| 99
| 0.635899
| 4,222
| 0.939057
| 0
| 0
| 3,837
| 0.853425
| 0
| 0
| 830
| 0.184609
|
c03e4bfd7eee3d8023944a7e3e5535ae1233ba11
| 1,341
|
py
|
Python
|
build.py
|
jmetzz/coffee-chatbot
|
da7e76d9532c8e5e38a47a19ffed1f1e27601766
|
[
"MIT"
] | null | null | null |
build.py
|
jmetzz/coffee-chatbot
|
da7e76d9532c8e5e38a47a19ffed1f1e27601766
|
[
"MIT"
] | null | null | null |
build.py
|
jmetzz/coffee-chatbot
|
da7e76d9532c8e5e38a47a19ffed1f1e27601766
|
[
"MIT"
] | null | null | null |
from pybuilder.core import use_plugin, init
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
name = "ActionServerPybuilder"
default_task = ['install_dependencies', 'analyze', 'publish']
@init
def set_properties(project):
project.build_depends_on('tblib')
project.build_depends_on('mockito')
project.build_depends_on('parameterized')
project.build_depends_on('responses')
@init
def initialize_flake8_plugin(project):
project.build_depends_on("flake8")
project.set_property('unittest_module_glob', 'test_*')
project.set_property("flake8_verbose_output", True)
project.set_property("flake8_break_build", True)
project.set_property("flake8_max_line_length", 120)
project.set_property("flake8_exclude_patterns", None)
project.set_property("flake8_include_test_sources", False)
project.set_property("flake8_include_scripts", False)
@init
def initialize_coverage_plugin(project):
project.set_property('coverage_break_build', False)
project.set_property('coverage_threshold_warn', 80)
# for now, code coverage does not break the build
# as we do Python, a scripted language, you have to aim for 100% coverage!
project.set_property("coverage_exceptions", ['endpoint'])
| 35.289474
| 78
| 0.774049
| 0
| 0
| 0
| 0
| 1,039
| 0.774795
| 0
| 0
| 580
| 0.432513
|
c03eda4a030a4816bf3db4784bc7ac9588f4b176
| 4,278
|
py
|
Python
|
electrumsv/devices/hw_wallet/plugin.py
|
tuoshao/electrumsv
|
5f0132cafa2c90bb36c8a574874e027e44a637e6
|
[
"MIT"
] | 1
|
2021-12-28T10:52:11.000Z
|
2021-12-28T10:52:11.000Z
|
electrumsv/devices/hw_wallet/plugin.py
|
SomberNight/electrumsv
|
28262e3cab7b73e4960466f8aee252975953acf8
|
[
"MIT"
] | null | null | null |
electrumsv/devices/hw_wallet/plugin.py
|
SomberNight/electrumsv
|
28262e3cab7b73e4960466f8aee252975953acf8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.util import versiontuple
from .cmdline import CmdLineHandler
class HW_PluginBase(object):
hid_lock = threading.Lock()
def __init__(self, device_kind):
self.device = self.keystore_class.device
self.name = device_kind
self.logger = logs.get_logger(device_kind)
def create_keystore(self, d):
keystore = self.keystore_class(d)
keystore.plugin = self
# This should be replaced when a window is opened in the gui
keystore.gui_handler = CmdLineHandler()
return keystore
def is_enabled(self):
return True
def get_library_version(self) -> str:
"""Returns the version of the 3rd party python library
for the hw wallet. For example '0.9.0'
Returns 'unknown' if library is found but cannot determine version.
Raises 'ImportError' if library is not found.
Raises 'LibraryFoundButUnusable' if found but there was a problem (includes version num).
"""
raise NotImplementedError()
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown' or
versiontuple(library_version) < self.minimum_library or
hasattr(self, "maximum_library") and
versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
max_version_str = (version_str(self.maximum_library)
if hasattr(self, "maximum_library") else "inf")
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), max_version_str))
self.logger.warning(self.libraries_available_message)
return False
return True
def get_library_not_available_message(self) -> str:
if hasattr(self, 'libraries_available_message'):
message = self.libraries_available_message
else:
message = _("Missing libraries for {}.").format(self.name)
message += '\n' + _("Make sure you install it with python3")
return message
def enumerate_devices(self):
raise NotImplementedError
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
super().__init__()
self.library_version = library_version
| 39.611111
| 97
| 0.679056
| 2,917
| 0.681861
| 0
| 0
| 0
| 0
| 0
| 0
| 1,940
| 0.453483
|
c0418cbebf8e032e1171fe327cac277a1bbb13e1
| 631
|
py
|
Python
|
api_service/tests/test_model_ids.py
|
seattleflu/Seattle-Flu-Incidence-Mapper
|
2b72e53da974874b98e1811cdb77e170c33999f1
|
[
"MIT"
] | 6
|
2019-03-22T18:28:04.000Z
|
2021-02-23T03:53:19.000Z
|
api_service/tests/test_model_ids.py
|
seattleflu/Seattle-Flu-Incidence-Mapper
|
2b72e53da974874b98e1811cdb77e170c33999f1
|
[
"MIT"
] | 103
|
2019-04-03T15:30:06.000Z
|
2021-11-15T17:48:22.000Z
|
api_service/tests/test_model_ids.py
|
seattleflu/incidence-mapper
|
2b72e53da974874b98e1811cdb77e170c33999f1
|
[
"MIT"
] | 6
|
2019-07-01T04:43:44.000Z
|
2021-02-13T21:46:18.000Z
|
import unittest
from seattle_flu_incidence_mapper.utils import get_model_id
class TestGetIdFromQuery(unittest.TestCase):
def test_ids_match_expected(self):
ids = {
"f39442e6883958971ecc1d0213c59f91": {"model_type":"inla","observed":["encountered_week","flu_shot","PUMA5CE","sampling_location"],"pathogen":["vic"]},
"29cc23488ba96c938113852c28b55c13": {"model_type":"inla latent","observed":["encountered_week","pathogen","PUMA5CE"],"pathogen":["vic"]}
}
for id, query_obj in ids.items():
gen_id = get_model_id(query_obj)
self.assertEqual(id, gen_id)
| 39.4375
| 163
| 0.681458
| 551
| 0.873217
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.402536
|
c04331c5a5c72cc4fd22977bf1a531a2facdca4e
| 445
|
py
|
Python
|
Cleaning.py
|
TharindraParanagama/MovieClassification
|
2cdee9a2aaf1f55d0a59b20181e69c524c4d5895
|
[
"MIT"
] | null | null | null |
Cleaning.py
|
TharindraParanagama/MovieClassification
|
2cdee9a2aaf1f55d0a59b20181e69c524c4d5895
|
[
"MIT"
] | null | null | null |
Cleaning.py
|
TharindraParanagama/MovieClassification
|
2cdee9a2aaf1f55d0a59b20181e69c524c4d5895
|
[
"MIT"
] | null | null | null |
import csv
input = open('MovieI.csv', 'rb')
output = open('MovieO.csv', 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
for i in range(len(row)):
if(row[0]==''):
break
elif(row[1]==''):
break
elif(row[2]==''):
break
elif(row[3]==''):
break
elif(row[4]==''):
break
else :writer.writerow(row)
input.close()
output.close()
| 21.190476
| 33
| 0.483146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.094382
|
c0451d8d32195eb2257b24e61657609915f300f2
| 87
|
py
|
Python
|
venues/apps.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 18
|
2018-12-06T01:46:37.000Z
|
2021-10-17T10:37:17.000Z
|
venues/apps.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 194
|
2018-11-04T12:50:49.000Z
|
2022-01-06T22:43:43.000Z
|
venues/apps.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 7
|
2019-03-18T05:36:06.000Z
|
2020-12-25T03:27:29.000Z
|
from django.apps import AppConfig
class VenuesConfig(AppConfig):
name = "venues"
| 14.5
| 33
| 0.747126
| 50
| 0.574713
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.091954
|
c045d1511440dddecfef10dbcd54c672252a332e
| 1,137
|
py
|
Python
|
problems/remove-duplicates-from-sorted-list.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/remove-duplicates-from-sorted-list.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/remove-duplicates-from-sorted-list.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
"""
The key is to use a set to remember if we seen the node or not.
Next, think about how we are going to *remove* the duplicate node?
The answer is to simply link the previous node to the next node.
So we need to keep a pointer `prev` on the previous node as we iterate the linked list.
So, the solution.
Create a set `seen`. #[1]
Point pointer `prev` on the first node. `cuur` on the second.
Now we iterate trough the linked list.
* For every node, we add its value to `seen`. Move `prev` and `curr` forward. #[2]
* If we seen the node, we *remove* the `curr` node. Then move the curr forward. #[3]
Return the `head`
"""
class Solution(object):
def deleteDuplicates(self, head):
if head is None or head.next is None: return head
prev = head
curr = head.next
seen = set() #[1]
seen.add(prev.val)
while curr:
if curr.val not in seen: #[2]
seen.add(curr.val)
curr = curr.next
prev = prev.next
else: #[3]
prev.next = curr.next #remove
curr = curr.next
return head
| 34.454545
| 88
| 0.602463
| 504
| 0.443272
| 0
| 0
| 0
| 0
| 0
| 0
| 650
| 0.57168
|
c046ab37f041136a24de450d5779fbb10cbaed54
| 3,344
|
py
|
Python
|
corehq/apps/analytics/signals.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/apps/analytics/signals.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/apps/analytics/signals.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from corehq.apps.analytics.tasks import (
track_user_sign_in_on_hubspot,
HUBSPOT_COOKIE,
update_hubspot_properties,
identify,
update_subscription_properties_by_domain, get_subscription_properties_by_user)
from corehq.apps.analytics.utils import get_meta
from corehq.apps.registration.views import ProcessRegistrationView
from corehq.util.decorators import handle_uncaught_exceptions
from corehq.util.python_compatibility import soft_assert_type_text
from corehq.util.soft_assert import soft_assert
from django.dispatch import receiver
from django.urls import reverse
from corehq.apps.users.models import CouchUser
from corehq.apps.accounting.signals import subscription_upgrade_or_downgrade
from corehq.apps.domain.signals import commcare_domain_post_save
from corehq.apps.users.signals import couch_user_post_save
from corehq.apps.analytics.utils import get_instance_string
_no_cookie_soft_assert = soft_assert(to=['{}@{}'.format('cellowitz', 'dimagi.com'),
'{}@{}'.format('biyeun', 'dimagi.com'),
'{}@{}'.format('jschweers', 'dimagi.com')],
send_to_ops=False)
@receiver(couch_user_post_save)
def user_save_callback(sender, **kwargs):
couch_user = kwargs.get("couch_user", None)
if couch_user and couch_user.is_web_user():
properties = {}
properties.update(get_subscription_properties_by_user(couch_user))
properties.update(get_domain_membership_properties(couch_user))
identify.delay(couch_user.username, properties)
update_hubspot_properties.delay(couch_user, properties)
@receiver(commcare_domain_post_save)
@receiver(subscription_upgrade_or_downgrade)
def domain_save_callback(sender, domain, **kwargs):
if isinstance(domain, six.string_types):
soft_assert_type_text(domain)
domain_name = domain
else:
domain_name = domain.name
update_subscription_properties_by_domain(domain_name)
def get_domain_membership_properties(couch_user):
env = get_instance_string()
return {
"{}number_of_project_spaces".format(env): len(couch_user.domains),
"{}project_spaces_list".format(env): '\n'.join(couch_user.domains),
}
@receiver(user_logged_in)
@handle_uncaught_exceptions(mail_admins=True)
def track_user_login(sender, request, user, **kwargs):
if settings.ANALYTICS_IDS.get('HUBSPOT_API_ID'):
couch_user = CouchUser.from_django_user(user)
if couch_user and couch_user.is_web_user():
if not request or HUBSPOT_COOKIE not in request.COOKIES:
# API calls, form submissions etc.
user_confirming = request.path.startswith(reverse(ProcessRegistrationView.urlname))
if user_confirming:
_no_cookie_soft_assert(False, 'User confirmed account but had no cookie')
else:
return
meta = get_meta(request)
track_user_sign_in_on_hubspot.delay(couch_user, request.COOKIES.get(HUBSPOT_COOKIE),
meta, request.path)
| 39.341176
| 99
| 0.720993
| 0
| 0
| 0
| 0
| 1,695
| 0.506878
| 0
| 0
| 246
| 0.073565
|
c046c72c4e753549e8ec891d9f48179094bc06ed
| 775
|
py
|
Python
|
manage.py
|
BeyondLam/Flask_Blog_Python3
|
274c932e9ea28bb6c83335e408a2cd9f1cf4fcb6
|
[
"Apache-2.0"
] | 2
|
2019-10-25T16:35:41.000Z
|
2019-10-26T10:54:00.000Z
|
manage.py
|
BeyondLam/Flask_Blog_Python3
|
274c932e9ea28bb6c83335e408a2cd9f1cf4fcb6
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
BeyondLam/Flask_Blog_Python3
|
274c932e9ea28bb6c83335e408a2cd9f1cf4fcb6
|
[
"Apache-2.0"
] | null | null | null |
from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app("develop")
manager = Manager(app)
Migrate(app, db)
manager.add_command("db", MigrateCommand)
# 初始化管理员账号数据,添加manager命令
@manager.command
def create_admin():
from app.models import Admin
from config_message.constant import ADMIN_USERNAME, ADMIN_PASSWORD, ADMIN_AVATAR_URL, ADMIN_POWER
try:
admin_new = Admin(username=ADMIN_USERNAME, password=ADMIN_PASSWORD, avatar=ADMIN_AVATAR_URL,
power=ADMIN_POWER)
db.session.add(admin_new)
db.session.commit()
print("初始化成功")
except:
print("初始化失败")
db.session.rollback()
if __name__ == '__main__':
manager.run()
| 27.678571
| 101
| 0.707097
| 0
| 0
| 0
| 0
| 496
| 0.602673
| 0
| 0
| 109
| 0.132442
|
c047ab7812a83340a4a3ccb035cf5db37d2b6b67
| 2,954
|
py
|
Python
|
qiling/qiling/cc/intel.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/qiling/cc/intel.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/qiling/cc/intel.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
from unicorn.x86_const import (
UC_X86_REG_AX, UC_X86_REG_EAX, UC_X86_REG_RAX, UC_X86_REG_RCX,
UC_X86_REG_RDI, UC_X86_REG_RDX, UC_X86_REG_RSI, UC_X86_REG_R8,
UC_X86_REG_R9, UC_X86_REG_R10
)
from qiling import Qiling
from . import QlCommonBaseCC
class QlIntelBaseCC(QlCommonBaseCC):
"""Calling convention base class for Intel-based systems.
Supports arguments passing over registers and stack.
"""
def __init__(self, ql: Qiling):
retreg = {
16: UC_X86_REG_AX,
32: UC_X86_REG_EAX,
64: UC_X86_REG_RAX
}[ql.archbit]
super().__init__(ql, retreg)
def unwind(self, nslots: int) -> int:
# no cleanup; just pop out the return address
return self.ql.arch.stack_pop()
class QlIntel64(QlIntelBaseCC):
"""Calling convention base class for Intel-based 64-bit systems.
"""
@staticmethod
def getNumSlots(argbits: int) -> int:
return max(argbits, 64) // 64
class QlIntel32(QlIntelBaseCC):
"""Calling convention base class for Intel-based 32-bit systems.
"""
@staticmethod
def getNumSlots(argbits: int) -> int:
return max(argbits, 32) // 32
def getRawParam(self, slot: int, nbits: int = None) -> int:
__super_getparam = super().getRawParam
if nbits == 64:
lo = __super_getparam(slot)
hi = __super_getparam(slot + 1)
val = (hi << 32) | lo
else:
val = __super_getparam(slot, nbits)
return val
class amd64(QlIntel64):
"""Default calling convention for POSIX (x86-64).
First 6 arguments are passed in regs, the rest are passed on the stack.
"""
_argregs = (UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 10
class ms64(QlIntel64):
"""Default calling convention for Windows and UEFI (x86-64).
First 4 arguments are passed in regs, the rest are passed on the stack.
Each stack frame starts with a shadow space in size of 4 items, corresponding
to the first arguments passed in regs.
"""
_argregs = (UC_X86_REG_RCX, UC_X86_REG_RDX, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 12
_shadow = 4
class macosx64(QlIntel64):
"""Default calling convention for Mac OS (x86-64).
First 6 arguments are passed in regs, the rest are passed on the stack.
"""
_argregs = (UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_RCX, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 10
class cdecl(QlIntel32):
"""Calling convention used by all operating systems (x86).
All arguments are passed on the stack.
The caller is resopnsible to unwind the stack.
"""
_argregs = (None, ) * 16
class stdcall(QlIntel32):
"""Calling convention used by all operating systems (x86).
All arguments are passed on the stack.
The callee is resopnsible to unwind the stack.
"""
_argregs = (None, ) * 16
def unwind(self, nslots: int) -> int:
retaddr = super().unwind(nslots)
self.ql.reg.arch_sp += (nslots * self._asize)
return retaddr
| 26.854545
| 122
| 0.728842
| 2,586
| 0.875423
| 0
| 0
| 168
| 0.056872
| 0
| 0
| 1,215
| 0.411307
|
c048a21dfcef4ce86fe3963107c1c071b1d5b9b1
| 2,639
|
py
|
Python
|
Alexa_Dynamo.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
Alexa_Dynamo.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
Alexa_Dynamo.py
|
gnomesoup/pyDynamo
|
dea046e96f7973fcb6c28a274a3092b246457551
|
[
"Unlicense",
"MIT"
] | null | null | null |
### ----------- Python Code ------------###
import csv
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
import pandas as pd
### ------------- Start Alexa Stuff ---------###
app = Flask(__name__)
ask = Ask(app, "/")
#logging.getLogger("flask_ask").setLevel(logging.DEBUG)
### ----------- Switch Function --------------###
def setSwitch(Switchboard, switch, new_state):
switch_df = pd.read_csv(Switchboard + ".csv")
switch_df = switch_df.set_index('switch')
switch_df.set_value(switch,'state',new_state)
switch_df.to_csv(Switchboard + ".csv")
### ----------- Switch Function --------------###
def ReadInfo(Switchboard):
info_df = pd.read_csv(Switchboard + ".csv")
count = info_df.loc[0, 'Count']
return count
### ----------- Launch Skill --------------###
@ask.launch
def start_skill():
welcome_message = 'Hello, what would you like to ask the architect'
return question(welcome_message)
### -------------- Say Hello --------------- ####
@ask.intent("hello")
def hello():
setSwitch('C:\\sfdug\\Alexa','switch00', '1')
msg = "Hello San Francisco Dynamo user group"
return statement(msg)
### -------------- Create Points --------------- ####
@ask.intent("CreatePoints")
def CreatePoints():
setSwitch('C:\\sfdug\\Alexa','switch01', '1')
msg = "I am creating the points for the Janet Echelman sculptor"
return statement(msg)
### -------------- Create Connection --------------- ####
@ask.intent("CreateConnection")
def CreateConnection():
setSwitch('C:\\sfdug\\Alexa','switch02', '1')
msg = "I am creating a connection between the points"
return statement(msg)
### -------------- Create Framing --------------- ####
@ask.intent("CreateFraming")
def CreateFraming():
setSwitch('C:\\sfdug\\Alexa','switch03', '1')
msg = "I am creating the framing for the Janet Echelman sculptor"
return statement(msg)
### -------------- Reset --------------- ####
@ask.intent("Reset")
def Reset():
setSwitch('C:\\sfdug\\Alexa','switch01', '0')
setSwitch('C:\\sfdug\\Alexa','switch02', '0')
setSwitch('C:\\sfdug\\Alexa','switch03', '0')
msg = "I have reset Revvit"
return statement(msg)
### -------------- Count Framing --------------- ####
@ask.intent("CountFraming")
def CountFraming():
info = ReadInfo('C:\\sfdug\\AlexaRead')
msg = "I have counted: {}".format(info)
return statement(msg)
### --------------- Port for Ngrok -------------##
if __name__ == '__main__':
port = 9000 #the custom port you want
app.run(host='0.0.0.0', port=port)
app.run(debug=True)
| 30.333333
| 71
| 0.575597
| 0
| 0
| 0
| 0
| 1,262
| 0.478211
| 0
| 0
| 1,362
| 0.516105
|
c04935b8a935560d2540de8efce949baca20ee57
| 846
|
py
|
Python
|
HW/hklearn/model.py
|
leguiart/Machine-Learning
|
2fd3c583fbfd8fc3ee12c9106db7b4dfa29bc253
|
[
"MIT"
] | null | null | null |
HW/hklearn/model.py
|
leguiart/Machine-Learning
|
2fd3c583fbfd8fc3ee12c9106db7b4dfa29bc253
|
[
"MIT"
] | null | null | null |
HW/hklearn/model.py
|
leguiart/Machine-Learning
|
2fd3c583fbfd8fc3ee12c9106db7b4dfa29bc253
|
[
"MIT"
] | null | null | null |
import abc
'''
Interfaz sobre la cual todo modelo implementa.
Todo modelo dentro de la biblioteca hklearn implementa
los siguientes comportamientos:
-fit : Entrena el modelo con un a matriz de ejemplos X y sus respectivas etiquetas y
-predict : El modelo entrenado, predice con base en una entrada X
de ejemplos
'''
class ModelInterface(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'fit') and
callable(subclass.fit) and
hasattr(subclass, 'predict') and
callable(subclass.predict))
@ModelInterface.register
class Model:
"""Entrena modelo"""
def fit(self, X, y):
pass
"""Prediccion con base en el modelo entrenado"""
def predict(self, X):
pass
| 31.333333
| 89
| 0.640662
| 463
| 0.547281
| 0
| 0
| 439
| 0.518913
| 0
| 0
| 417
| 0.492908
|
c0499e4593031598062f2a6d6d126c43c5ef1d2d
| 35,951
|
py
|
Python
|
pecos/utils/smat_util.py
|
UniqueUpToPermutation/pecos
|
52dba0b6a1d5d0809838ac9ddb6c02a93da2624e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2021-07-28T21:09:58.000Z
|
2021-09-24T03:37:45.000Z
|
pecos/utils/smat_util.py
|
UniqueUpToPermutation/pecos
|
52dba0b6a1d5d0809838ac9ddb6c02a93da2624e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pecos/utils/smat_util.py
|
UniqueUpToPermutation/pecos
|
52dba0b6a1d5d0809838ac9ddb6c02a93da2624e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-09-24T04:00:47.000Z
|
2021-09-24T04:00:47.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import collections
import numpy as np
import scipy.sparse as smat
def cs_matrix(arg1, mat_type, shape=None, dtype=None, copy=False, check_contents=False):
"""Custom compressed sparse matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct compressed sparse matrix
mat_type (type): the matrix type to construct, one of [scipy.sparse.csr_matrix | scipy.sparse.csc_matrix]
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
check_contents (bool, optional): whether to check array contents to determine dtype, defaults to False
Returns:
compressed sparse matrix in mat_type
"""
(data, indices, indptr) = arg1
indices_dtype = smat.sputils.get_index_dtype(indices, check_contents=check_contents)
indptr_dtype = smat.sputils.get_index_dtype(indptr, check_contents=check_contents)
ret = mat_type(shape, dtype=dtype)
# Read matrix dimensions given, if any
if shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(ret.indptr) - 1
minor_dim = ret.indices.max() + 1
except Exception:
raise ValueError("unable to infer matrix dimensions")
else:
shape = ret._swap((major_dim, minor_dim))
ret.indices = np.array(indices, copy=copy, dtype=indices_dtype)
ret.indptr = np.array(indptr, copy=copy, dtype=indptr_dtype)
ret.data = np.array(data, copy=copy, dtype=dtype)
return ret
def csr_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csr_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csr_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csr_matrix
"""
return cs_matrix(arg1, smat.csr_matrix, shape=shape, dtype=dtype, copy=copy)
def csc_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csc_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csc_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csc_matrix
"""
return cs_matrix(arg1, smat.csc_matrix, shape=shape, dtype=dtype, copy=copy)
def save_matrix(tgt, mat):
"""Save dense or sparse matrix to file.
Args:
tgt (str): path to save the matrix
mat (numpy.ndarray or scipy.sparse.spmatrix): target matrix to save
"""
assert isinstance(tgt, str), "tgt for save_matrix must be a str, but got {}".format(type(tgt))
with open(tgt, "wb") as tgt_file:
if isinstance(mat, np.ndarray):
np.save(tgt_file, mat, allow_pickle=False)
elif isinstance(mat, smat.spmatrix):
smat.save_npz(tgt_file, mat, compressed=False)
else:
raise NotImplementedError("Save not implemented for matrix type {}".format(type(mat)))
def load_matrix(src, dtype=None):
"""Load dense or sparse matrix from file.
Args:
src (str): path to load the matrix.
dtype (numpy.dtype, optional): if given, convert matrix dtype. otherwise use default type.
Returns:
mat (numpy.ndarray or scipy.sparse.spmatrix): loaded matrix
Notes:
If underlying matrix is {"csc", "csr", "bsr"}, indices will be sorted.
"""
if not isinstance(src, str):
raise ValueError("src for load_matrix must be a str")
mat = np.load(src)
# decide whether it's dense or sparse
if isinstance(mat, np.ndarray):
pass
elif isinstance(mat, np.lib.npyio.NpzFile):
# Ref code: https://github.com/scipy/scipy/blob/v1.4.1/scipy/sparse/_matrix_io.py#L19-L80
matrix_format = mat["format"].item()
if not isinstance(matrix_format, str):
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode("ascii")
try:
cls = getattr(smat, "{}_matrix".format(matrix_format))
except AttributeError:
raise ValueError("Unknown matrix format {}".format(matrix_format))
if matrix_format in ("csc", "csr", "bsr"):
mat = cls((mat["data"], mat["indices"], mat["indptr"]), shape=mat["shape"])
# This is in-place operation
mat.sort_indices()
elif matrix_format == "dia":
mat = cls((mat["data"], mat["offsets"]), shape=mat["shape"])
elif matrix_format == "coo":
mat = cls((mat["data"], (mat["row"], mat["col"])), shape=mat["shape"])
else:
raise NotImplementedError(
"Load is not implemented for sparse matrix of format {}.".format(matrix_format)
)
else:
raise TypeError("load_feature_matrix encountered unknown input format {}".format(type(mat)))
if dtype is None:
return mat
else:
return mat.astype(dtype)
def transpose(mat):
"""Transpose a dense/sparse matrix.
Args:
X (np.ndarray, spmatrix): input matrix to be transposed.
Returns:
transposed X
"""
if not isinstance(mat, smat.spmatrix):
raise ValueError("mat must be a smat.spmatrix type")
if isinstance(mat, smat.csr_matrix):
return csc_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
elif isinstance(mat, smat.csc_matrix):
return csr_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
else:
return mat.T
def sorted_csr_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a row-sorted CSR matrix from a COO sparse matrix.
Nonzero elements in each row of the returned CSR matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
csr = smat.csr_matrix((val, (row_idx, col_idx)), shape=shape)
csr.sort_indices()
for i in range(shape[0]):
rng = slice(csr.indptr[i], csr.indptr[i + 1])
sorted_idx = np.argsort(-csr.data[rng], kind="mergesort")
csr.indices[rng] = csr.indices[rng][sorted_idx]
csr.data[rng] = csr.data[rng][sorted_idx]
if only_topk is not None:
assert isinstance(only_topk, int), f"Wrong type: type(only_topk) = {type(only_topk)}"
only_topk = max(min(1, only_topk), only_topk)
nnz_of_insts = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(shape[0], dtype=csr.indices.dtype), nnz_of_insts)
selected_idx = (np.arange(len(csr.data)) - csr.indptr[row_idx]) < only_topk
row_idx = row_idx[selected_idx]
col_idx = csr.indices[selected_idx]
val = csr.data[selected_idx]
indptr = np.cumsum(np.bincount(row_idx + 1, minlength=(shape[0] + 1)))
csr = csr_matrix((val, col_idx, indptr), shape=shape, dtype=val.dtype)
return csr
def sorted_csc_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a column-sorted CSC matrix from a COO sparse matrix.
Nonzero elements in each col of the returned CSC matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
csr = sorted_csr_from_coo(shape[::-1], col_idx, row_idx, val, only_topk=None)
return transpose(csr)
def binarized(X, inplace=False):
"""Binarize a dense/sparse matrix. All nonzero elements become 1.
Args:
X (np.ndarray, spmatrix): input matrix to binarize
inplace (bool, optional): if True do the binarization in-place, else return a copy. Default False
Returns:
binarized X
"""
if not isinstance(X, (np.ndarray, smat.spmatrix)):
raise NotImplementedError(
"this function only support X being np.ndarray or scipy.sparse.spmatrix."
)
if not inplace:
X = X.copy()
if isinstance(X, smat.spmatrix):
X.data[:] = 1
else:
X[:] = 1
return X
def sorted_csr(csr, only_topk=None):
"""Return a copy of input CSR matrix where nonzero elements in each row is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csr (csr_matrix): input csr_matrix to sort
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
if not isinstance(csr, smat.csr_matrix):
raise ValueError("the input matrix must be a csr_matrix.")
row_idx = np.repeat(np.arange(csr.shape[0], dtype=np.uint32), csr.indptr[1:] - csr.indptr[:-1])
return sorted_csr_from_coo(csr.shape, row_idx, csr.indices, csr.data, only_topk)
def sorted_csc(csc, only_topk=None):
"""Return a copy of input CSC matrix where nonzero elements in each column is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csc (csc_matrix): input csc_matrix to sort
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
if not isinstance(csc, smat.csc_matrix):
raise ValueError("the input matrix must be a csc_matrix.")
return transpose(sorted_csr(transpose(csc)))
def dense_to_csr(dense, topk=None, batch=None):
"""Memory efficient method to construct a csr_matrix from a dense matrix.
Args:
dense (ndarray): 2-D dense matrix to convert.
topk (int or None, optional): keep topk non-zeros with largest abs value for each row.
Default None to keep everything.
batch (int or None, optional): the batch size for construction.
Default None to use min(dense.shape[0], 10 ** 5).
Returns:
csr_matrix that has topk nnz each row with the same shape as dense.
"""
BATCH_LIMIT = 10 ** 5
if topk is None:
keep_topk = dense.shape[1]
else:
keep_topk = min(dense.shape[1], max(1, int(topk)))
# if batch is given, use input batch size even if input batch > BATCH_LIMIT
if batch is None:
chunk_size = min(dense.shape[0], BATCH_LIMIT)
else:
chunk_size = min(dense.shape[0], max(1, int(batch)))
max_nnz = keep_topk * dense.shape[0]
indptr_dtype = np.int32 if max_nnz < np.iinfo(np.int32).max else np.int64
indices_dtype = np.int32 if dense.shape[1] < np.iinfo(np.int32).max else np.int64
data = np.empty((keep_topk * dense.shape[0],), dtype=dense.dtype)
indices = np.empty((keep_topk * dense.shape[0],), dtype=indices_dtype)
for i in range(0, dense.shape[0], chunk_size):
cur_chunk = dense[i : i + chunk_size, :]
chunk_len = cur_chunk.shape[0]
if keep_topk < dense.shape[1]:
col_indices = np.argpartition(abs(cur_chunk), keep_topk, axis=1)[:, -keep_topk:]
else:
col_indices = np.repeat(np.arange(keep_topk)[np.newaxis, :], chunk_len, axis=0)
row_indices = np.repeat(np.arange(chunk_len)[:, np.newaxis], keep_topk, axis=1)
chunk_data = cur_chunk[row_indices, col_indices]
data[i * keep_topk : i * keep_topk + chunk_data.size] = chunk_data.flatten()
indices[i * keep_topk : i * keep_topk + col_indices.size] = col_indices.flatten()
indptr = np.arange(0, dense.shape[0] * keep_topk + 1, keep_topk, dtype=indptr_dtype)
# Bypass scipy constructor to allow different indices and indptr types
return csr_matrix((data, indices, indptr), shape=dense.shape)
def vstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_cols = matrices[0].shape[1]
if any(mat.shape[1] != nr_cols for mat in matrices):
raise ValueError("Second dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_cols > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_nnz, cur_row = 0, 0, 0
for mat in matrices:
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
# can not merge the following two lines because
# mat.indptr[1:] + cur_nnz may overflow!
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] = mat.indptr[1:]
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] += cur_nnz
cur_nnz += mat.nnz
cur_row += mat.shape[0]
return csr_matrix((data, indices, indptr), shape=(total_rows, nr_cols))
def hstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(nr_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_ptr = 0, 0
for i in range(nr_rows): # for every row
start_col = 0
for mat in matrices:
cur_nnz = mat.indptr[i + 1] - mat.indptr[i]
indices[cur_ptr : cur_ptr + cur_nnz] = (
mat.indices[mat.indptr[i] : mat.indptr[i + 1]] + start_col
)
data[cur_ptr : cur_ptr + cur_nnz] = mat.data[mat.indptr[i] : mat.indptr[i + 1]]
cur_ptr += cur_nnz
start_col += mat.shape[1]
indptr[i + 1] = cur_ptr
return csr_matrix((data, indices, indptr), shape=(nr_rows, total_cols))
def block_diag_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (NR1 + NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if total_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
cur_row, cur_col, cur_nnz = 0, 0, 0
indptr[0] = 0
for mat in matrices:
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices + cur_col
indptr[1 + cur_row : 1 + cur_row + mat.shape[0]] = mat.indptr[1:] + indptr[cur_row]
cur_col += mat.shape[1]
cur_row += mat.shape[0]
cur_nnz += mat.nnz
return csr_matrix((data, indices, indptr), shape=(total_rows, total_cols))
def vstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(hstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def hstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(vstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def block_diag_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (NR1+ NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(block_diag_csr([transpose(mat) for mat in matrices], dtype=dtype))
def get_csc_col_nonzero(matrix):
"""Given a matrix, returns the nonzero row ids of each col
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero row ids of col i
"""
if not isinstance(matrix, smat.csc_matrix):
raise ValueError("matrix need to be csc_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[1])]
def get_csr_row_nonzero(matrix):
"""Given a matrix, returns the nonzero col ids of each row
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero col ids of row i
"""
if not isinstance(matrix, smat.csr_matrix):
raise ValueError("matrix need to be csr_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[0])]
def get_row_submatrices(matrices, row_indices):
"""Get the sub-matrices of given matrices by selecting the rows given in row_indices
Args:
matrices (list of csr_matrix or ndarray): the matrices [mat_1, mat_2, ...] to operate on, with shape (M x N1), (M x N2), ...
row_indices (list or ndarray): the row indices to select
Returns:
list of csr_matrix or ndarray
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
n_mat = len(matrices)
if n_mat == 0:
raise ValueError("At least one matrix required as input")
if any(not isinstance(X, (smat.csr_matrix, np.ndarray)) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix or ndarray!")
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
if any(idx >= nr_rows or idx < 0 for idx in row_indices):
raise ValueError("row indices should be positive and do not exceed matrix first dimension")
results = []
for mat in matrices:
mat1 = mat[row_indices, :]
if isinstance(mat, smat.csr_matrix):
mat1.sort_indices()
results += [mat1]
return results
def dense_to_coo(dense):
"""Convert a dense matrix to COO format.
Args:
dense (ndarray): input dense matrix
Returns:
coo_matrix
"""
rows = np.arange(dense.shape[0], dtype=np.uint32)
cols = np.arange(dense.shape[1], dtype=np.uint32)
row_idx = np.repeat(rows, np.ones_like(rows) * len(cols)).astype(np.uint32)
col_idx = np.ones((len(rows), 1), dtype=np.uint32).dot(cols.reshape(1, -1)).ravel()
return smat.coo_matrix((dense.ravel(), (row_idx, col_idx)), shape=dense.shape)
def get_relevance_csr(csr, mm=None, dtype=np.float64):
"""Return the csr matrix containing relevance scores based on given prediction csr matrix.
Relevance score is defined as: max_rank - local_rank + 1
Args:
csr (csr_matrix): input CSR matrix, row indices are sorted in descending order
mm (int, optional): max rank, will be inferred from csr if not given
dtype (type, optional): datatype for the returned relevance matrix. Default float64.
Returns:
csr_matrix of relevance scores
"""
if mm is None:
mm = (csr.indptr[1:] - csr.indptr[:-1]).max()
nnz = len(csr.data)
nnz_of_rows = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(csr.shape[0]), nnz_of_rows)
rel = np.array(
mm - (np.arange(nnz) - csr.indptr[row_idx]), dtype=dtype
) # adding 1 to avoiding zero entries
return smat.csr_matrix((rel, csr.indices, csr.indptr), csr.shape)
def get_sparsified_coo(coo, selected_rows, selected_columns):
"""
Zero out everything not in selected rows and columns.
Args:
coo (coo_matrix): input coo matrix
selected_rows (list of int or np.array(int)): list of rows to be not zeroed out
selected_columns (list of int or np.array(int)): list of columns to be not zeroed out
Returns:
coo matrix with unwanted rows and columns zeroed out.
"""
valid_rows = np.zeros(coo.shape[0], dtype=bool)
valid_cols = np.zeros(coo.shape[1], dtype=bool)
valid_rows[selected_rows] = True
valid_cols[selected_columns] = True
valid_idx = valid_rows[coo.row] & valid_cols[coo.col]
coo = smat.coo_matrix(
(coo.data[valid_idx], (coo.row[valid_idx], coo.col[valid_idx])), shape=coo.shape
)
return coo
def csr_rowwise_mul(A, v):
"""Row-wise multiplication between sparse csr matrix A and dense array v.
Where each row of A is multiplied by the corresponding element in v.
The number of rows of A is same as the length of v.
Args:
A (csr_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csr_matrix): The product of row-wise multiplication of A and v.
"""
if not isinstance(A, smat.csr_matrix):
raise ValueError(f"A must be scipy.sparse.csr_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[0]:
raise ValueError(f"The dimension of v should be the same as the number of rows of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def csc_colwise_mul(A, v):
"""Column-wise multiplication between sparse csc matrix A and dense array v, where each column of A is multiplied by the corresponding element in v (The number of columns of A is same as the length of v).
Args:
A (csc_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csc_matrix): The product of column-wise multiplication of A and v.
"""
if not isinstance(A, smat.csc_matrix):
raise ValueError(f"A must be scipy.sparse.csc_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[1]:
raise ValueError(f"The dimension of v should be the same as the number of columns of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def get_cocluster_spectral_embeddings(A, dim=24):
"""Obtain the co-cluster spectral embeddings for the given bipartite graph described in [1]
* [1] `Dhillon, Inderjit S, 2001. Co-clustering documents and words using
bipartite spectral graph partition`
Args:
A (csr_matrix or csc_matrix): bipartite graph matrix
dim (int, optional): the dimension of the returned embeddings. Default 24
Returns:
(row_embedding, col_embedding): a tuple of embeddings for rows and columns respectively
row_embedding: numpy.ndarray of shape (A.shape[0], dim).
col_embedding: numpy.ndarray of shape (A.shape[1], dim).
"""
assert A.min() >= 0.0, "A must be nonnegative"
from sklearn.utils.extmath import randomized_svd
# Obtain An, the normalized adjacency bipartite matrix described in Eq (10) of [1]
# A_n = D_1^{-1/2} A D_2^{-1/2}
# row_diag = diagonal of D_1^{-1/2}
# col_diag = diagonal of D_2^{-1/2}
row_diag = np.asarray(np.sqrt(A.sum(axis=1))).squeeze()
col_diag = np.asarray(np.sqrt(A.sum(axis=0))).squeeze()
row_diag[row_diag == 0] = 1.0
col_diag[col_diag == 0] = 1.0
row_diag = 1.0 / row_diag
col_diag = 1.0 / col_diag
if smat.issparse(A):
n_rows, n_cols = A.shape
r = smat.dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = smat.dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
An = r * A * c
else:
An = row_diag[:, np.newaxis] * A * col_diag
# run SVD on An
nr_discards = 1 # discarding the first component
U, Sigma, VT = randomized_svd(An, dim + nr_discards, random_state=0)
# Normalized the singular vectors based on Eq (24) of [1]
row_embedding = np.ascontiguousarray(row_diag[:, np.newaxis] * U[:, nr_discards:])
col_embedding = np.ascontiguousarray(col_diag[:, np.newaxis] * VT[nr_discards:].T)
return row_embedding, col_embedding
class CsrEnsembler(object):
"""A class implementing several ensemblers for a list sorted CSR predictions"""
@staticmethod
def check_validlity(*args):
"""Check whether input CSR matrices are valid
Args:
args (iterable over csr_matrix): input CSR matrices
"""
for x in args:
assert isinstance(x, smat.csr_matrix), type(x)
assert all(x.shape == args[0].shape for x in args)
@staticmethod
def average(*args):
"""Ensemble predictions by averaging prediction values
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
ret = sum(args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def rank_average(*args):
"""Ensemble predictions by averaging prediction ranks
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = sum(get_relevance_csr(csr, mm) for csr in args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def round_robin(*args):
"""Ensemble predictions by round robin
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
base = 1.0 / (len(args) + 1.0)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = get_relevance_csr(args[0], mm)
ret.data[:] += len(args) * base
for i, x in enumerate(args[1:], 1):
tmp = get_relevance_csr(x, mm)
tmp.data[:] += (len(args) - i) * base
ret = ret.maximum(tmp)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def print_ens(Ytrue, pred_set, param_set, topk=10):
"""Print matrices before and after ensemble
Args:
Ytrue (csr_matrix): ground truth label matrix
pred_set (iterable over csr_matrix): prediction matrices to ensemble
param_set (iterable): parameters or model names associated with pred_set
"""
for param, pred in zip(param_set, pred_set):
print("param: {}".format(param))
print(Metrics.generate(Ytrue, pred, topk=topk))
for ens in [CsrEnsembler.average, CsrEnsembler.rank_average, CsrEnsembler.round_robin]:
print("ens: {}".format(ens.__name__))
print(Metrics.generate(Ytrue, ens(*pred_set), topk=topk))
class Metrics(collections.namedtuple("Metrics", ["prec", "recall"])):
"""The metrics (precision, recall) for multi-label classification problems."""
__slots__ = ()
def __str__(self):
"""Format printing"""
def fmt(key):
return " ".join("{:4.2f}".format(100 * v) for v in getattr(self, key)[:])
return "\n".join("{:7}= {}".format(key, fmt(key)) for key in self._fields)
@classmethod
def default(cls):
"""Default dummy metric"""
return cls(prec=[], recall=[])
@classmethod
def generate(cls, tY, pY, topk=10):
"""Compute the metrics with given prediction and ground truth.
Args:
tY (csr_matrix): ground truth label matrix
pY (csr_matrix): predicted logits
topk (int, optional): only generate topk prediction. Default 10
Returns:
Metrics
"""
assert isinstance(tY, smat.csr_matrix), type(tY)
assert isinstance(pY, smat.csr_matrix), type(pY)
assert tY.shape == pY.shape, "tY.shape = {}, pY.shape = {}".format(tY.shape, pY.shape)
pY = sorted_csr(pY)
total_matched = np.zeros(topk, dtype=np.uint64)
recall = np.zeros(topk, dtype=np.float64)
for i in range(tY.shape[0]):
truth = tY.indices[tY.indptr[i] : tY.indptr[i + 1]]
matched = np.isin(pY.indices[pY.indptr[i] : pY.indptr[i + 1]][:topk], truth)
cum_matched = np.cumsum(matched, dtype=np.uint64)
total_matched[: len(cum_matched)] += cum_matched
recall[: len(cum_matched)] += cum_matched / max(len(truth), 1)
if len(cum_matched) != 0:
total_matched[len(cum_matched) :] += cum_matched[-1]
recall[len(cum_matched) :] += cum_matched[-1] / max(len(truth), 1)
prec = total_matched / tY.shape[0] / np.arange(1, topk + 1)
recall = recall / tY.shape[0]
return cls(prec=prec, recall=recall)
| 38.992408
| 208
| 0.648549
| 4,889
| 0.135991
| 0
| 0
| 4,317
| 0.12008
| 0
| 0
| 16,349
| 0.454758
|
c04a299ef4dc134ab3bfdfd03d7e5fd9d275da7c
| 1,944
|
py
|
Python
|
MSMetaEnhancer/libs/Curator.py
|
xtrojak/pyMSPannotator
|
4d6ec0ee9781294c621271a6c045e0b15102bb9b
|
[
"MIT"
] | 2
|
2021-06-16T07:42:02.000Z
|
2021-06-16T09:26:59.000Z
|
MSMetaEnhancer/libs/Curator.py
|
xtrojak/pyMSPannotator
|
4d6ec0ee9781294c621271a6c045e0b15102bb9b
|
[
"MIT"
] | 34
|
2021-06-15T09:52:51.000Z
|
2021-11-11T13:47:11.000Z
|
MSMetaEnhancer/libs/Curator.py
|
xtrojak/pyMSPannotator
|
4d6ec0ee9781294c621271a6c045e0b15102bb9b
|
[
"MIT"
] | 4
|
2021-06-09T06:42:19.000Z
|
2021-07-21T08:37:06.000Z
|
from matchms import utils
class Curator:
"""
Curator makes sure that all data is curated before the actual annotation can proceed.
Currently, fixing CAS numbers to correct format is supported.
"""
def curate_spectra(self, spectra):
"""
Iterates over given spectrums and curates individual spectra.
:param spectra: given spectrums
:return: curated spectrums
"""
for spectrum in spectra.spectrums:
spectrum.metadata = self.curate_metadata(spectrum.metadata)
return spectra
def curate_metadata(self, metadata):
"""
Curate metadata of particular spectra.
:param metadata: given metadata
:return: curated metadata
"""
if 'casno' in metadata:
metadata['casno'] = self.fix_cas_number(metadata['casno'])
return metadata
@staticmethod
def fix_cas_number(cas_number):
"""
Adds dashes to CAS number.
:param cas_number: given CAS number
:return: CAS number enriched by dashes (if needed)
"""
if "-" not in cas_number:
return f'{cas_number[:-3]}-{cas_number[-3:-1]}-{cas_number[-1]}'
return cas_number
@staticmethod
def filter_invalid_metadata(metadata):
"""
Validates metadata and filters out invalid ones.
:param metadata: metadata content
:return: only valid metadata
"""
filters = {
'smiles': utils.is_valid_smiles,
'inchi': utils.is_valid_inchi,
'inchikey': utils.is_valid_inchikey
}
valid_metadata = {}
for (attribute, value) in metadata.items():
if attribute in filters.keys():
if filters[attribute](value):
valid_metadata[attribute] = value
else:
valid_metadata[attribute] = value
return valid_metadata
| 29.907692
| 89
| 0.598251
| 1,915
| 0.985082
| 0
| 0
| 1,055
| 0.542695
| 0
| 0
| 877
| 0.451132
|
c04a2a3eb342ba391c15029d393dfe3507aca08e
| 2,498
|
py
|
Python
|
bin/install_megadrivers.py
|
antmicro/kvm-aosp-external-mesa3d
|
9a3a0c1e30421cd1d66b138ef6a3269ceb6de39f
|
[
"MIT"
] | null | null | null |
bin/install_megadrivers.py
|
antmicro/kvm-aosp-external-mesa3d
|
9a3a0c1e30421cd1d66b138ef6a3269ceb6de39f
|
[
"MIT"
] | null | null | null |
bin/install_megadrivers.py
|
antmicro/kvm-aosp-external-mesa3d
|
9a3a0c1e30421cd1d66b138ef6a3269ceb6de39f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding=utf-8
# Copyright © 2017-2018 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Script to install megadriver symlinks for meson."""
from __future__ import print_function
import argparse
import os
import shutil
def main():
parser = argparse.ArgumentParser()
parser.add_argument('megadriver')
parser.add_argument('libdir')
parser.add_argument('drivers', nargs='+')
args = parser.parse_args()
if os.path.isabs(args.libdir):
to = os.path.join(os.environ.get('DESTDIR', '/'), args.libdir[1:])
else:
to = os.path.join(os.environ['MESON_INSTALL_DESTDIR_PREFIX'], args.libdir)
master = os.path.join(to, os.path.basename(args.megadriver))
if not os.path.exists(to):
os.makedirs(to)
shutil.copy(args.megadriver, master)
for each in args.drivers:
driver = os.path.join(to, each)
if os.path.exists(driver):
os.unlink(driver)
print('installing {} to {}'.format(args.megadriver, driver))
os.link(master, driver)
try:
ret = os.getcwd()
os.chdir(to)
name, ext = os.path.splitext(each)
while ext != '.so':
if os.path.exists(name):
os.unlink(name)
os.symlink(each, name)
name, ext = os.path.splitext(name)
finally:
os.chdir(ret)
os.unlink(master)
if __name__ == '__main__':
main()
| 33.756757
| 82
| 0.67534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,279
| 0.511805
|
c04af6a3b44f5f5884d745baba056412e928f38e
| 478
|
py
|
Python
|
python_files/helpers.py
|
nilamo/pytchie
|
2e7a7501f23d393bdb66b64466f62d2ef741b778
|
[
"MIT"
] | 10
|
2019-01-21T14:59:39.000Z
|
2022-01-25T19:45:57.000Z
|
python_files/helpers.py
|
nilamo/pytchie
|
2e7a7501f23d393bdb66b64466f62d2ef741b778
|
[
"MIT"
] | 6
|
2019-09-26T08:09:41.000Z
|
2019-10-22T14:54:19.000Z
|
python_files/helpers.py
|
nilamo/pytchie
|
2e7a7501f23d393bdb66b64466f62d2ef741b778
|
[
"MIT"
] | 3
|
2019-09-27T23:05:39.000Z
|
2019-10-22T01:11:06.000Z
|
#!/usr/bin/env python
import os
import sys
def midi_to_freq(num):
""" Takes a MIDI number and returns a frequency in Hz for corresponding note. """
num_a = num - 69
freq = 440 * 2**(num_a / 12.0)
return freq
def fp(relative):
#if hasattr(sys, "_MEIPASS"):
# return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
if __name__ == '__main__':
print(midi_to_freq(69))
print(midi_to_freq(60))
print(midi_to_freq(105))
| 23.9
| 85
| 0.656904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.395397
|
c04af8ddce186b3fd697e8b4010edd2847a07c3a
| 2,896
|
py
|
Python
|
test/integrationMyndFskr.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 18
|
2015-03-12T17:42:44.000Z
|
2021-12-27T10:32:22.000Z
|
test/integrationMyndFskr.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 13
|
2016-01-27T10:19:07.000Z
|
2021-12-13T20:24:36.000Z
|
test/integrationMyndFskr.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 6
|
2016-11-28T15:41:29.000Z
|
2022-01-08T11:16:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
import sys
import shutil
import inspect
from ferenda import TextReader, util
from ferenda.testutil import RepoTester, file_parametrize
from ferenda.compat import unittest
# SUT
from ferenda.sources.legal.se import myndfskr
class Parse(RepoTester):
repoclass = myndfskr.MyndFskrBase # in some cases we might need to get a
# specific one like SOSFS, see below
aliases = {} # setUpClass fills this in
@classmethod
def setUpClass(cls):
super(Parse, cls).setUpClass()
# enumerate all classes defined in the module where
# MyndFskrBase is defined, check their static property 'alias'
# and use it to add to cls.aliases
for name, obj in inspect.getmembers(myndfskr):
if inspect.isclass(obj) and hasattr(obj, 'alias'):
cls.aliases[obj.alias] = obj
def parse_filename(self, filename):
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
alias = os.path.basename(filename).split("-")[0]
basefile = os.path.splitext(
os.path.basename(filename))[0].replace("-",
"/", 1).replace("-", ":")
repoclass = self.aliases[alias]
repo = repoclass(datadir=self.datadir,
storelocation=self.datadir + "/ferenda.sqlite",
indexlocation=self.datadir + "/whoosh",)
return repo, basefile
def parametric_test(self, filename):
# these options adjusts the constructed URIs. by default, the
# official rpubl URIs are minted.
#
# self.repo.config.localizeuri = True
# self.repo.config.url = "http://example.org/"
# self.repo.config.urlpath = ''
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
repo, basefile = self.parse_filename(filename)
doc = repo.make_document(basefile)
text = repo.sanitize_text(util.readfile(filename), basefile)
reader = TextReader(string=text, encoding='utf-8')
props = repo.extract_metadata(reader, basefile)
props = repo.sanitize_metadata(props, basefile)
resource = repo.polish_metadata(props, basefile)
repo.infer_metadata(resource, basefile)
wantfile = filename.replace(".txt", ".n3")
if os.path.exists(wantfile):
self.assertEqualGraphs(wantfile, resource.graph, exact=False)
else:
self.fail("Expected a %s with the following content:\n\n%s" %
(wantfile, doc.meta.serialize(format="n3").decode("utf-8")))
file_parametrize(Parse, "test/files/myndfskr", ".txt")
| 39.135135
| 82
| 0.631906
| 2,445
| 0.844268
| 0
| 0
| 413
| 0.14261
| 0
| 0
| 824
| 0.28453
|
c04b151e636326dee485fc70fa9e09aa52af0717
| 2,319
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/NV/geometry_program4.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/NV/geometry_program4.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/NV/geometry_program4.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_geometry_program4'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_geometry_program4',error_checker=_errors._error_checker)
GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT=_C('GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT',0x8DA7)
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT=_C('GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT',0x8CD4)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT',0x8DA9)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT',0x8DA8)
GL_GEOMETRY_INPUT_TYPE_EXT=_C('GL_GEOMETRY_INPUT_TYPE_EXT',0x8DDB)
GL_GEOMETRY_OUTPUT_TYPE_EXT=_C('GL_GEOMETRY_OUTPUT_TYPE_EXT',0x8DDC)
GL_GEOMETRY_PROGRAM_NV=_C('GL_GEOMETRY_PROGRAM_NV',0x8C26)
GL_GEOMETRY_VERTICES_OUT_EXT=_C('GL_GEOMETRY_VERTICES_OUT_EXT',0x8DDA)
GL_LINES_ADJACENCY_EXT=_C('GL_LINES_ADJACENCY_EXT',0x000A)
GL_LINE_STRIP_ADJACENCY_EXT=_C('GL_LINE_STRIP_ADJACENCY_EXT',0x000B)
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT=_C('GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT',0x8C29)
GL_MAX_PROGRAM_OUTPUT_VERTICES_NV=_C('GL_MAX_PROGRAM_OUTPUT_VERTICES_NV',0x8C27)
GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV=_C('GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV',0x8C28)
GL_PROGRAM_POINT_SIZE_EXT=_C('GL_PROGRAM_POINT_SIZE_EXT',0x8642)
GL_TRIANGLES_ADJACENCY_EXT=_C('GL_TRIANGLES_ADJACENCY_EXT',0x000C)
GL_TRIANGLE_STRIP_ADJACENCY_EXT=_C('GL_TRIANGLE_STRIP_ADJACENCY_EXT',0x000D)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glFramebufferTextureEXT(target,attachment,texture,level):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLenum)
def glFramebufferTextureFaceEXT(target,attachment,texture,level,face):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLint)
def glFramebufferTextureLayerEXT(target,attachment,texture,level,layer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glProgramVertexLimitNV(target,limit):pass
| 55.214286
| 118
| 0.850367
| 0
| 0
| 0
| 0
| 519
| 0.223803
| 0
| 0
| 702
| 0.302717
|
c04b8e57191159c1e20db662b36e4eb42827c687
| 2,652
|
py
|
Python
|
benchbuild/projects/benchbuild/xz.py
|
sturmianseq/benchbuild
|
e3cc1a24e877261e90baf781aa67a9d6f6528dac
|
[
"MIT"
] | 11
|
2017-10-05T08:59:35.000Z
|
2021-05-29T01:43:07.000Z
|
benchbuild/projects/benchbuild/xz.py
|
sturmianseq/benchbuild
|
e3cc1a24e877261e90baf781aa67a9d6f6528dac
|
[
"MIT"
] | 326
|
2016-07-12T08:11:43.000Z
|
2022-03-28T07:10:11.000Z
|
benchbuild/projects/benchbuild/xz.py
|
sturmianseq/benchbuild
|
e3cc1a24e877261e90baf781aa67a9d6f6528dac
|
[
"MIT"
] | 13
|
2016-06-17T12:13:35.000Z
|
2022-01-04T16:09:12.000Z
|
from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils.cmd import make, tar
class XZ(bb.Project):
""" XZ """
VERSION = '5.2.1'
NAME = 'xz'
DOMAIN = 'compression'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={'5.2.1': 'http://tukaani.org/xz/xz-5.2.1.tar.gz'},
local='xz.tar.gz'
),
HTTP(
remote={'1.0': 'http://lairosiel.de/dist/compression.tar.gz'},
local='compression.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self):
xz_source = local.path(self.source_of('xz.tar.gz'))
xz_version = self.version_of('xz.tar.gz')
compression_source = local.path(self.source_of('compression.tar.gz'))
tar('xf', xz_source)
tar('xf', compression_source)
unpack_dir = local.path(f'xz-{xz_version}')
clang = bb.compiler.cc(self)
with local.cwd(unpack_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
with local.env(CC=str(clang)):
_configure(
"--enable-threads=no", "--with-gnu-ld=yes",
"--disable-shared", "--disable-dependency-tracking",
"--disable-xzdec", "--disable-lzmadec",
"--disable-lzmainfo", "--disable-lzma-links",
"--disable-scripts", "--disable-doc"
)
_make = bb.watch(make)
_make("CC=" + str(clang), "clean", "all")
def run_tests(self):
xz_version = self.version_of('xz.tar.gz')
unpack_dir = local.path(f'xz-{xz_version}')
xz = bb.wrap(unpack_dir / "src" / "xz" / "xz", self)
_xz = bb.watch(xz)
# Compress
_xz("--compress", "-f", "-k", "-e", "-9", "compression/text.html")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/chicken.jpg")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/control")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/input.source")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/liberty.jpg")
# Decompress
_xz("--decompress", "-f", "-k", "compression/text.html.xz")
_xz("--decompress", "-f", "-k", "compression/chicken.jpg.xz")
_xz("--decompress", "-f", "-k", "compression/control.xz")
_xz("--decompress", "-f", "-k", "compression/input.source.xz")
_xz("--decompress", "-f", "-k", "compression/liberty.jpg.xz")
| 36.833333
| 77
| 0.536199
| 2,450
| 0.923831
| 0
| 0
| 0
| 0
| 0
| 0
| 1,060
| 0.399698
|
c04bfbdd189377e61884680d0c03817aca6a78ee
| 1,101
|
py
|
Python
|
train.py
|
sazzad/CarND-Behavioral-Cloning-P3
|
46599661bf194cf22683f49cae749eb403aaff01
|
[
"MIT"
] | null | null | null |
train.py
|
sazzad/CarND-Behavioral-Cloning-P3
|
46599661bf194cf22683f49cae749eb403aaff01
|
[
"MIT"
] | null | null | null |
train.py
|
sazzad/CarND-Behavioral-Cloning-P3
|
46599661bf194cf22683f49cae749eb403aaff01
|
[
"MIT"
] | null | null | null |
import numpy as np
import csv
import cv2
from keras.models import Sequential
from keras.layers import Dense, Flatten
def load_data():
lines = []
with open('Data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path = line[0]
filename = source_path.split('/')[-1]
current_path = 'Data/IMG/'+filename
image = cv2.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
X_train = np.array(images)
y_train = np.array(measurements)
return X_train, y_train
def train(X_train, y_train):
model = Sequential()
model.add(Flatten(input_shape=(160, 320, 3)))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=10)
model.save('model.h5')
if __name__ == "__main__":
X_train, y_train = load_data()
train(X_train, y_train)
| 28.973684
| 80
| 0.647593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.060854
|
c04ce8c06bdc166d9b3b9ffe4880ea147a89ea15
| 3,226
|
py
|
Python
|
models/FedXXX/resnet_utils.py
|
TD21forever/QoS-Predcition-Algorithm-library
|
f4503462887d719a39c9ccddd6cc55546e783fd5
|
[
"MIT"
] | 2
|
2022-02-08T08:19:59.000Z
|
2022-02-17T01:42:54.000Z
|
models/FedXXX/resnet_utils.py
|
TD21forever/QoS-Predcition-Algorithm-library
|
f4503462887d719a39c9ccddd6cc55546e783fd5
|
[
"MIT"
] | null | null | null |
models/FedXXX/resnet_utils.py
|
TD21forever/QoS-Predcition-Algorithm-library
|
f4503462887d719a39c9ccddd6cc55546e783fd5
|
[
"MIT"
] | null | null | null |
from abc import get_cache_token
from collections import OrderedDict
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self.in_size, self.out_size = in_size, out_size
self.blocks = nn.Identity()
self.shortcut = nn.Identity()
def forward(self, x):
residual = x
if self.should_apply_shortcut:
residual = self.shortcut(x)
x = self.blocks(x)
x += residual
return x
@property
def should_apply_shortcut(self):
return self.in_size != self.out_size
# 用来处理short cut
class ResNetResidualBlock(ResidualBlock):
def __init__(self, in_size, out_size):
super().__init__(in_size, out_size)
self.shortcut = nn.Sequential(OrderedDict(
{
'dense': nn.Linear(self.in_size, self.out_size),
# 'bn': nn.BatchNorm1d(self.out_size)
})) if self.should_apply_shortcut else None
@property
def should_apply_shortcut(self):
return self.in_size != self.out_size
# 来定义一个block
class ResNetBasicBlock(ResNetResidualBlock):
def __init__(self, in_size, out_size, activation=nn.ReLU):
super().__init__(in_size, out_size)
self.blocks = nn.Sequential(
nn.Linear(self.in_size, self.out_size),
activation(),
nn.Linear(self.out_size, self.out_size),
)
# 定义一个resnet层,里面会有多个block
class ResNetLayer(nn.Module):
def __init__(self, in_size, out_size, block=ResNetBasicBlock, n=1, activation=nn.ReLU):
super().__init__()
self.blocks = nn.Sequential(
block(in_size, out_size, activation),
*[block(out_size,
out_size, activation) for _ in range(n-1)]
)
def forward(self, x):
x = self.blocks(x)
return x
# 由多个resnet layer组成encoder
class ResNetEncoder(nn.Module):
"""
ResNet encoder composed by decreasing different layers with increasing features.
"""
def __init__(self, in_size=128, blocks_sizes=[64, 32, 16], deepths=[2, 2, 2],
activation=nn.ReLU, block=ResNetBasicBlock):
super().__init__()
self.blocks_sizes = blocks_sizes
self.gate = nn.Sequential(
nn.Linear(in_size, self.blocks_sizes[0]),
# nn.BatchNorm1d(self.blocks_sizes[0]),
activation(),
)
self.in_out_block_sizes = list(zip(blocks_sizes, blocks_sizes[1:]))
self.blocks = nn.ModuleList([
*[ResNetLayer(in_size, out_size, n=n, activation=activation, block=block)
for (in_size, out_size), n in zip(self.in_out_block_sizes, deepths)]
])
def forward(self, x):
x = self.gate(x)
for block in self.blocks:
x = block(x)
return x
if __name__ == "__main__":
m = ResNetEncoder()
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel()
for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(m))
| 28.548673
| 91
| 0.614073
| 2,676
| 0.816351
| 0
| 0
| 182
| 0.055522
| 0
| 0
| 337
| 0.102807
|
c04d90069f191974d0ed369a9c73406bd54fa0cc
| 2,114
|
py
|
Python
|
xblock/test/test_json_conversion.py
|
edly-io/XBlock
|
60d01a32e5bfe1b543f598cbc56ba3f4d736129d
|
[
"Apache-2.0"
] | null | null | null |
xblock/test/test_json_conversion.py
|
edly-io/XBlock
|
60d01a32e5bfe1b543f598cbc56ba3f4d736129d
|
[
"Apache-2.0"
] | null | null | null |
xblock/test/test_json_conversion.py
|
edly-io/XBlock
|
60d01a32e5bfe1b543f598cbc56ba3f4d736129d
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests asserting that ModelTypes convert to and from json when working
with ModelDatas
"""
# Allow inspection of private class members
# pylint: disable=protected-access
from mock import Mock
from xblock.core import XBlock
from xblock.fields import Field, Scope, ScopeIds
from xblock.field_data import DictFieldData
from xblock.test.tools import TestRuntime
class TestJSONConversionField(Field):
"""Field for testing json conversion"""
__test__ = False
def from_json(self, value):
assert value['$type'] == 'set'
return set(value['$vals'])
def to_json(self, value):
return {
'$type': 'set',
'$vals': sorted(value)
}
class TestBlock(XBlock):
"""XBlock for testing json conversion"""
__test__ = False
field_a = TestJSONConversionField(scope=Scope.content)
field_b = TestJSONConversionField(scope=Scope.content)
class TestModel(DictFieldData):
"""ModelData for testing json conversion"""
__test__ = False
def default(self, block, name):
return {'$type': 'set', '$vals': [0, 1]}
class TestJsonConversion:
"""
Verify that all ModelType operations correctly convert
the json that comes out of the ModelData to python objects
"""
def setup_method(self):
"""
Setup for each test method in this class.
"""
field_data = TestModel({
'field_a': {'$type': 'set', '$vals': [1, 2, 3]}
})
runtime = TestRuntime(services={'field-data': field_data})
self.block = TestBlock(runtime, scope_ids=Mock(spec=ScopeIds)) # pylint: disable=attribute-defined-outside-init
def test_get(self):
# Test field with a value
assert isinstance(self.block.field_a, set)
# Test ModelData default
assert isinstance(self.block.field_b, set)
def test_set(self):
self.block.field_b = set([5, 6, 5])
self.block.save()
assert isinstance(self.block.field_b, set)
assert {'$type': 'set', '$vals': [5, 6]} == \
self.block._field_data.get(self.block, 'field_b')
| 28.186667
| 120
| 0.64333
| 1,739
| 0.822611
| 0
| 0
| 0
| 0
| 0
| 0
| 712
| 0.336802
|
c04dc0e5e93dcddb8cf11931aefe2f5bf4588f05
| 10,592
|
py
|
Python
|
uq_benchmark_2019/experiment_utils.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
uq_benchmark_2019/experiment_utils.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
uq_benchmark_2019/experiment_utils.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to help set up and run experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
from absl import logging
import numpy as np
import scipy.special
from six.moves import range
from six.moves import zip
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
gfile = tf.io.gfile
class _SimpleJsonEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def json_dumps(x):
return json.dumps(x, indent=2, cls=_SimpleJsonEncoder)
def record_config(config, path):
out = json_dumps(config)
logging.info('Recording config to %s\n %s', path, out)
gfile.makedirs(os.path.dirname(path))
with gfile.GFile(path, 'w') as fh:
fh.write(out)
def load_config(path):
logging.info('Loading config from %s', path)
with gfile.GFile(path) as fh:
return json.loads(fh.read())
def save_model(model, output_dir):
"""Save Keras model weights and architecture as HDF5 file."""
save_path = '%s/model.hdf5' % output_dir
logging.info('Saving model to %s', save_path)
model.save(save_path, include_optimizer=False)
return save_path
def load_model(path):
logging.info('Loading model from %s', path)
return tf.keras.models.load_model(path)
def metrics_from_stats(stats):
"""Compute metrics to report to hyperparameter tuner."""
labels, probs = stats['labels'], stats['probs']
# Reshape binary predictions to 2-class.
if len(probs.shape) == 1:
probs = np.stack([1-probs, probs], axis=-1)
assert len(probs.shape) == 2
predictions = np.argmax(probs, axis=-1)
accuracy = np.equal(labels, predictions)
label_probs = probs[np.arange(len(labels)), labels]
log_probs = np.maximum(-1e10, np.log(label_probs))
brier_scores = np.square(probs).sum(-1) - 2 * label_probs
return {'accuracy': accuracy.mean(0),
'brier_score': brier_scores.mean(0),
'log_prob': log_probs.mean(0)}
def make_predictions(
model, batched_dataset, predictions_per_example=1, writers=None,
predictions_are_logits=True, record_image_samples=True, max_batches=1e6):
"""Build a dictionary of predictions for examples from a dataset.
Args:
model: Trained Keras model.
batched_dataset: tf.data.Dataset that yields batches of image, label pairs.
predictions_per_example: Number of predictions to generate per example.
writers: `dict` with keys 'small' and 'full', containing
array_utils.StatsWriter instances for full prediction results and small
prediction results (omitting logits).
predictions_are_logits: Indicates whether model outputs are logits or
probabilities.
record_image_samples: `bool` Record one batch of input examples.
max_batches: `int`, maximum number of batches.
Returns:
Dictionary containing:
labels: Labels copied from the dataset (shape=[N]).
logits_samples: Samples of model predict outputs for each example
(shape=[N, M, K]).
probs: Probabilities after averaging over samples (shape=[N, K]).
image_samples: One batch of input images (for sanity checking).
"""
if predictions_are_logits:
samples_key = 'logits_samples'
avg_probs_fn = lambda x: scipy.special.softmax(x, axis=-1).mean(-2)
else:
samples_key = 'probs_samples'
avg_probs_fn = lambda x: x.mean(-2)
labels, outputs = [], []
predict_fn = model.predict if hasattr(model, 'predict') else model
for i, (inputs_i, labels_i) in enumerate(tfds.as_numpy(batched_dataset)):
logging.info('iteration: %d', i)
outputs_i = np.stack(
[predict_fn(inputs_i) for _ in range(predictions_per_example)], axis=1)
if writers is None:
labels.extend(labels_i)
outputs.append(outputs_i)
else:
avg_probs_i = avg_probs_fn(outputs_i)
prediction_batch = dict(labels=labels_i, probs=avg_probs_i)
if i == 0 and record_image_samples:
prediction_batch['image_samples'] = inputs_i
writers['small'].write_batch(prediction_batch)
prediction_batch[samples_key] = outputs_i
writers['full'].write_batch(prediction_batch)
# Don't predict whole ImageNet training set
if i > max_batches:
break
if writers is None:
image_samples = inputs_i # pylint: disable=undefined-loop-variable
labels = np.stack(labels, axis=0)
outputs = np.concatenate(outputs, axis=0)
stats = {'labels': labels, 'image_samples': image_samples,
samples_key: outputs, 'probs': avg_probs_fn(outputs)}
if record_image_samples:
stats['image_samples'] = image_samples
return stats
def download_dataset(dataset, batch_size_for_dl=1024):
logging.info('Starting dataset download...')
tup = list(zip(*tfds.as_numpy(dataset.batch(batch_size_for_dl))))
logging.info('dataset download complete.')
return tuple(np.concatenate(x, axis=0) for x in tup)
def get_distribution_strategy(distribution_strategy='default',
num_gpus=0,
num_workers=1,
all_reduce_alg=None,
num_packs=1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', 'mirrored',
'parameter_server', 'multi_worker_mirrored', case insensitive. 'off' means
not to use Distribution Strategy; 'default' means to choose from
`MirroredStrategy`, `MultiWorkerMirroredStrategy`, or `OneDeviceStrategy`
according to the number of GPUs and number of workers.
num_gpus: Number of GPUs to run this model.
num_workers: Number of workers to run this model.
all_reduce_alg: Optional. Specifies which algorithm to use when performing
all-reduce. For `MirroredStrategy`, valid values are 'nccl' and
'hierarchical_copy'. For `MultiWorkerMirroredStrategy`, valid values are
'ring' and 'nccl'. If None, DistributionStrategy will choose based on
device topology.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError('`num_gpus` can not be negative.')
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == 'off':
if num_gpus > 1:
raise ValueError(
'When {} GPUs and {} workers are specified, distribution_strategy '
'flag cannot be set to "off".'.format(num_gpus, num_workers))
return None
if distribution_strategy == 'multi_worker_mirrored':
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg))
if (distribution_strategy == 'one_device' or
(distribution_strategy == 'default' and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy('device:CPU:0')
else:
if num_gpus > 1:
raise ValueError('`OneDeviceStrategy` can not be used for more than '
'one device.')
return tf.distribute.OneDeviceStrategy('device:GPU:0')
if distribution_strategy in ('mirrored', 'default'):
if num_gpus == 0:
assert distribution_strategy == 'mirrored'
devices = ['device:CPU:0']
else:
devices = ['device:GPU:%d' % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
if distribution_strategy == 'parameter_server':
return tf.compat.v1.distribute.experimental.ParameterServerStrategy()
raise ValueError(
'Unrecognized Distribution Strategy: %r' % distribution_strategy)
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to pick,
or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl']
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
'ring': tf.distribute.experimental.CollectiveCommunication.RING,
'nccl': tf.distribute.experimental.CollectiveCommunication.NCCL
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
'When used with `multi_worker_mirrored`, valid values for '
'all_reduce_alg are ["ring", "nccl"]. Supplied value: {}'.format(
all_reduce_alg))
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy'].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
'nccl': tf.distribute.NcclAllReduce,
'hierarchical_copy': tf.distribute.HierarchicalCopyAllReduce
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
'When used with `mirrored`, valid values for all_reduce_alg are '
'["nccl", "hierarchical_copy"]. Supplied value: {}'.format(
all_reduce_alg))
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
| 36.273973
| 80
| 0.715823
| 90
| 0.008497
| 0
| 0
| 0
| 0
| 0
| 0
| 4,921
| 0.464596
|
c04f13b9a712c28cf890f8bd241f887d6602c688
| 42,844
|
py
|
Python
|
modisco/coordproducers.py
|
Bluedragon137/tfmodisco
|
d7c56b21e1bb58b07695ef3035f173b7d1a039e6
|
[
"MIT"
] | null | null | null |
modisco/coordproducers.py
|
Bluedragon137/tfmodisco
|
d7c56b21e1bb58b07695ef3035f173b7d1a039e6
|
[
"MIT"
] | null | null | null |
modisco/coordproducers.py
|
Bluedragon137/tfmodisco
|
d7c56b21e1bb58b07695ef3035f173b7d1a039e6
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter, OrderedDict
import itertools
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer, PrecisionValTransformer)
import scipy
from sklearn.isotonic import IsotonicRegression
SUBSAMPLE_CAP = 1000000
#The only parts of TransformAndThresholdResults that are used in
# TfModiscoWorkflow are the transformed_pos/neg_thresholds and the
# val_transformer (used in metaclustering with multiple tasks)
#TransformAndThresholdResults are also used to be
# able to replicate the same procedure used for identifying coordinates as
# when TfMoDisco was first run; the information needed in that case would
# be specific to the type of Coordproducer used
class AbstractTransformAndThresholdResults(object):
def __init__(self, transformed_neg_threshold, transformed_pos_threshold,
val_transformer):
self.transformed_neg_threshold = transformed_neg_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
@classmethod
def from_hdf5(cls, grp):
if "class" not in grp.attrs:
the_class = FWACTransformAndThresholdResults
else:
the_class = eval(grp.attrs["class"])
if (the_class.__name__ != cls.__name__):
return the_class.from_hdf5(grp)
class BasicTransformAndThresholdResults(AbstractTransformAndThresholdResults):
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def load_basic_attrs_from_hdf5(cls, grp):
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return (transformed_neg_threshold, transformed_pos_threshold,
val_transformer)
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
(transformed_neg_threshold,
transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
return cls(transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
#FWAC = FixedWindowAroundChunks; this TransformAndThresholdResults object
# is specific to the type of info needed in that case.
class FWACTransformAndThresholdResults(
BasicTransformAndThresholdResults):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.pos_threshold = pos_threshold
super(FWACTransformAndThresholdResults, self).__init__(
transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
def save_hdf5(self, grp):
super(FWACTransformAndThresholdResults, self).save_hdf5(grp)
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
@classmethod
def from_hdf5(cls, grp):
(transformed_neg_threshold, transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
neg_threshold = grp.attrs['neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score, other_info={}):
self.score = score
self.other_info = other_info
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = AbstractTransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, window_size, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
if (original_summed_score_track is None):
window_sum_function = get_simple_window_sum_function(window_size)
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = -np.log(1-sampled_cdf)/pos_laplace_lambda + mu
else:
sampled_cdf = self.rng.uniform()
val = mu + np.log(1-sampled_cdf)/neg_laplace_lambda
sampled_vals.append(val)
return np.array(sampled_vals)
class FlipSignNullDist(GenerateNullDist):
def __init__(self, num_seq_to_samp, shuffle_pos=False,
seed=1234, num_breaks=100,
lower_null_percentile=20,
upper_null_percentile=80):
self.num_seq_to_samp = num_seq_to_samp
self.shuffle_pos = shuffle_pos
self.seed = seed
self.rng = np.random.RandomState()
self.num_breaks = num_breaks
self.lower_null_percentile = lower_null_percentile
self.upper_null_percentile = upper_null_percentile
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track, windowsize, original_summed_score_track):
#summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
all_orig_summed_scores = np.concatenate(
original_summed_score_track, axis=0)
pos_threshold = np.percentile(a=all_orig_summed_scores,
q=self.upper_null_percentile)
neg_threshold = np.percentile(a=all_orig_summed_scores,
q=self.lower_null_percentile)
#retain only the portions of the tracks that are under the
# thresholds
retained_track_portions = []
num_pos_vals = 0
num_neg_vals = 0
for (single_score_track, single_summed_score_track)\
in zip(score_track, original_summed_score_track):
window_passing_track = [
(1.0 if (x > neg_threshold and x < pos_threshold) else 0)
for x in single_summed_score_track]
padded_window_passing_track = [0.0]*int(windowsize-1)
padded_window_passing_track.extend(window_passing_track)
padded_window_passing_track.extend([0.0]*int(windowsize-1))
pos_in_passing_window = window_sum_function(
[padded_window_passing_track])[0]
assert len(single_score_track)==len(pos_in_passing_window)
single_retained_track = []
for (val, pos_passing) in zip(single_score_track,
pos_in_passing_window):
if (pos_passing > 0):
single_retained_track.append(val)
num_pos_vals += (1 if val > 0 else 0)
num_neg_vals += (1 if val < 0 else 0)
retained_track_portions.append(single_retained_track)
print("Fraction of positions retained:",
sum(len(x) for x in retained_track_portions)/
sum(len(x) for x in score_track))
prob_pos = num_pos_vals/float(num_pos_vals + num_neg_vals)
self.rng.seed(self.seed)
null_tracks = []
for i in range(self.num_seq_to_samp):
random_track = retained_track_portions[
int(self.rng.randint(0,len(retained_track_portions)))]
track_with_sign_flips = np.array([
abs(x)*(1 if self.rng.uniform() < prob_pos else -1)
for x in random_track])
if (self.shuffle_pos):
self.rng.shuffle(track_with_sign_flips)
null_tracks.append(track_with_sign_flips)
return np.concatenate(window_sum_function(null_tracks), axis=0)
def get_null_vals(null_track, score_track, window_size,
original_summed_score_track):
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=window_size,
original_summed_score_track=original_summed_score_track)
else:
window_sum_function = get_simple_window_sum_function(window_size)
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = list(np.concatenate(null_summed_score_track, axis=0))
return null_vals
def subsample_if_large(arr):
if (len(arr) > SUBSAMPLE_CAP):
print("Subsampling!")
sys.stdout.flush()
arr = np.random.RandomState(1234).choice(a=arr, size=SUBSAMPLE_CAP,
replace=False)
return arr
def irval_to_probpos(irval, frac_neg):
#n(x):= pdf of null dist (negatives)
#p(x):= pdf of positive distribution
#f_p:= fraction of positives
#f_n:= fraction of negatives = 1-f_p
#o(x):= pdf of observed distribution = n(x)f_n + p(x)f_p
#The isotonic regression produces a(x) = o(x)/[o(x) + n(x)]
# o(x)/[o(x) + n(x)] = [n(x)f_n + o(x)f_p]/[n(x)(1+f_n) + p(x)]
# a(x)[n(x)(1+f_n) + p(x)f_p] = n(x)f_n + p(x)f_p
# a(x)n(x)(1+f_n) - n(x)f_n = p(x)f_p - a(x)p(x)f_p
# n(x)[a(x)(1+f_n) - f_n] = p(x)f_p[1 - a(x)]
# [a(x)/f_n + (a(x)-1)]/[1-a(x)] = (p(x)f_p)/(n(x)f_n) = r(x)
#p_pos = 1 / (1 + 1/r(x))
# = [a(x)/f_n + (a(x)-1)]/[a(x)/f_n + (a(x)-1) + (1-a(x))]
# = [a(x)/f_n + a(x)-1]/[a(x)/f_n]
# = [a(x) + f_n(a(x)-1)]/a(x)
# = 1 + f_n(a(x)-1)/a(x)
# = 1 + f_n(1 - 1/a(x))
#If solving for p_pos=0, we have -1/(1 - 1/a(x)) = f_n
#As f_n --> 100%, p_pos --> 2 - 1/a(x); this assumes max(a(x)) = 0.5
return np.minimum(np.maximum(1 + frac_neg*(
1 - (1/np.maximum(irval,1e-7))), 0.0), 1.0)
class SavableIsotonicRegression(object):
def __init__(self, origvals, nullvals, increasing, min_frac_neg=0.95):
self.origvals = origvals
self.nullvals = nullvals
self.increasing = increasing
self.min_frac_neg = min_frac_neg
self.ir = IsotonicRegression(out_of_bounds='clip',
increasing=increasing).fit(
X=np.concatenate([self.origvals, self.nullvals], axis=0),
y=([1.0 for x in self.origvals] + [0.0 for x in self.nullvals]),
sample_weight=([1.0 for x in self.origvals]
+[float(len(self.origvals))/len(self.nullvals)
for x in self.nullvals]))
#Infer frac_pos based on the minimum value of the ir probs
#See derivation in irval_to_probpos function
min_prec_x = self.ir.X_min_ if self.increasing else self.ir.X_max_
min_precision = self.ir.transform([min_prec_x])[0]
implied_frac_neg = -1/(1-(1/max(min_precision,1e-7)))
print("For increasing =",increasing,", the minimum IR precision was",
min_precision,"occurring at",min_prec_x,
"implying a frac_neg",
"of",implied_frac_neg)
if (implied_frac_neg > 1.0 or implied_frac_neg < self.min_frac_neg):
implied_frac_neg = max(min(1.0,implied_frac_neg),
self.min_frac_neg)
print("To be conservative, adjusted frac neg is",implied_frac_neg)
self.implied_frac_neg = implied_frac_neg
def transform(self, vals):
return irval_to_probpos(self.ir.transform(vals),
frac_neg=self.implied_frac_neg)
def save_hdf5(self, grp):
grp.attrs['increasing'] = self.increasing
grp.attrs['min_frac_neg'] = self.min_frac_neg
grp.create_dataset('origvals', data=self.origvals)
grp.create_dataset('nullvals', data=self.nullvals)
@classmethod
def from_hdf5(cls, grp):
increasing = grp.attrs['increasing']
min_frac_neg = grp.attrs['min_frac_neg']
origvals = np.array(grp['origvals'])
nullvals = np.array(grp['nullvals'])
return cls(origvals=origvals, nullvals=nullvals,
increasing=increasing, min_frac_neg=min_frac_neg)
def get_isotonic_regression_classifier(orig_vals, null_vals):
orig_vals = subsample_if_large(orig_vals)
null_vals = subsample_if_large(null_vals)
pos_orig_vals = (
np.array(sorted([x for x in orig_vals if x >= 0])))
neg_orig_vals = (
np.array(sorted([x for x in orig_vals if x < 0],
key=lambda x: abs(x))))
pos_null_vals = [x for x in null_vals if x >= 0]
neg_null_vals = [x for x in null_vals if x < 0]
pos_ir = SavableIsotonicRegression(origvals=pos_orig_vals,
nullvals=pos_null_vals, increasing=True)
if (len(neg_orig_vals) > 0):
neg_ir = SavableIsotonicRegression(origvals=neg_orig_vals,
nullvals=neg_null_vals, increasing=False)
else:
neg_ir = None
return pos_ir, neg_ir, orig_vals, null_vals
#sliding in this case would be a list of values
class VariableWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding, flank, suppress, target_fdr,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds,
max_seqlets_total,
progress_update=5000,
verbose=True, plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = np.array(grp["sliding"]).astype("int")
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("sliding", data=np.array(self.sliding))
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def fit_pos_and_neg_irs(self, score_track, null_track):
pos_irs = []
neg_irs = []
for sliding_window_size in self.sliding:
window_sum_function = get_simple_window_sum_function(
sliding_window_size)
print("Fitting - on window size",sliding_window_size)
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=sliding_window_size,
original_summed_score_track=None)
else:
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = np.concatenate(null_summed_score_track,
axis=0)
print("Computing window sums")
sys.stdout.flush()
window_sums_rows = window_sum_function(arrs=score_track)
print("Done computing window sums")
sys.stdout.flush()
orig_vals = np.concatenate(window_sums_rows, axis=0)
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=np.concatenate(window_sums_rows, axis=0),
null_vals=null_vals)
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=None,
neg_threshold=None)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_window"
+str(sliding_window_size)+"_"
+str(VariableWindowAroundChunks.count)+".png")
pos_irs.append(pos_ir)
neg_irs.append(neg_ir)
return pos_irs, neg_irs
def __call__(self, score_track, null_track, tnt_results=None):
if (tnt_results is None):
pos_irs, neg_irs = self.fit_pos_and_neg_irs(
score_track=score_track,
null_track=null_track)
precision_transformer = PrecisionValTransformer(
sliding_window_sizes=self.sliding,
pos_irs=pos_irs,
neg_irs=neg_irs)
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
subsampled_prec_vals = subsample_if_large(
np.concatenate(precisiontransformed_score_track, axis=0))
from matplotlib import pyplot as plt
plt.plot(sorted(subsampled_prec_vals),
(np.arange(len(subsampled_prec_vals))/
len(subsampled_prec_vals)))
plt.xlabel("Tranformed IR precision value")
plt.ylabel("CDF")
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="final_prec_vals_cdf_dist"
+str(VariableWindowAroundChunks.count)+".png")
#Pick a threshold according the the precisiontransformed score track
pos_threshold = (1-self.target_fdr)
neg_threshold = -(1-self.target_fdr)
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_prec_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
tnt_results = BasicTransformAndThresholdResults(
transformed_neg_threshold=neg_threshold,
transformed_pos_threshold=pos_threshold,
val_transformer=precision_transformer)
else:
precision_transformer = tnt_results.val_transformer
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
#Need to remove padding because identify_coords is assumed to
# operate on a scoretrack that has already been processed with
# a sliding window of window_size (and assumes that partial windows
# were not included)
left_padding_to_remove = int((max(self.sliding)-1)/2)
right_padding_to_remove = (max(self.sliding)-1)-left_padding_to_remove
coords = identify_coords(
score_track=[x[left_padding_to_remove:-right_padding_to_remove]
for x in precisiontransformed_score_track],
pos_threshold=tnt_results.transformed_pos_threshold,
neg_threshold=tnt_results.transformed_neg_threshold,
window_size=max(self.sliding),
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose,
other_info_tracks={'best_window_idx':
[x[left_padding_to_remove:-right_padding_to_remove] for x in
precisiontransformed_bestwindowsizeidxs]})
VariableWindowAroundChunks.count += 1
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
#identify_coords is expecting something that has already been processed
# with sliding windows of size window_size
def identify_coords(score_track, pos_threshold, neg_threshold,
window_size, flank, suppress,
max_seqlets_total, verbose, other_info_tracks={}):
for other_info_track in other_info_tracks.values():
assert all([x.shape==y.shape for x,y
in zip(other_info_track,score_track)])
#cp_score_track = 'copy' of the score track, which can be modified as
# coordinates are identified
cp_score_track = [np.array(x) for x in score_track]
#if a position is less than the threshold, set it to -np.inf
#Note that the threshold comparisons need to be >= and not just > for
# cases where there are lots of ties at the high end (e.g. with an IR
# tranformation that gives a lot of values that have a precision of 1.0)
cp_score_track = [
np.array([np.abs(y) if (y >= pos_threshold
or y <= neg_threshold)
else -np.inf for y in x])
for x in cp_score_track]
coords = []
for example_idx,single_score_track in enumerate(cp_score_track):
#set the stuff near the flanks to -np.inf so that we
# don't pick it up during argmax
single_score_track[0:flank] = -np.inf
single_score_track[len(single_score_track)-(flank):
len(single_score_track)] = -np.inf
while True:
argmax = np.argmax(single_score_track,axis=0)
max_val = single_score_track[argmax]
#bail if exhausted everything that passed the threshold
#and was not suppressed
if (max_val == -np.inf):
break
#need to be able to expand without going off the edge
if ((argmax >= flank) and
(argmax < (len(single_score_track)-flank))):
coord = SeqletCoordsFWAP(
example_idx=example_idx,
start=argmax-flank,
end=argmax+window_size+flank,
score=score_track[example_idx][argmax],
other_info = dict([
(track_name, track[example_idx][argmax])
for (track_name, track) in other_info_tracks.items()]))
assert (coord.score >= pos_threshold
or coord.score <= neg_threshold)
coords.append(coord)
else:
assert False,\
("This shouldn't happen because I set stuff near the"
"border to -np.inf early on")
#suppress the chunks within +- suppress
left_supp_idx = int(max(np.floor(argmax+0.5-suppress),0))
right_supp_idx = int(min(np.ceil(argmax+0.5+suppress),
len(single_score_track)))
single_score_track[left_supp_idx:right_supp_idx] = -np.inf
if (verbose):
print("Got "+str(len(coords))+" coords")
sys.stdout.flush()
if ((max_seqlets_total is not None) and
len(coords) > max_seqlets_total):
if (verbose):
print("Limiting to top "+str(max_seqlets_total))
sys.stdout.flush()
coords = sorted(coords, key=lambda x: -np.abs(x.score))\
[:max_seqlets_total]
return coords
def refine_thresholds_based_on_frac_passing(
vals, pos_threshold, neg_threshold,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds, verbose):
frac_passing_windows =(
sum(vals >= pos_threshold)
+ sum(vals <= neg_threshold))/float(len(vals))
if (verbose):
print("Thresholds from null dist were",
neg_threshold," and ",pos_threshold,
"with frac passing", frac_passing_windows)
pos_vals = [x for x in vals if x >= 0]
neg_vals = [x for x in vals if x < 0]
#deal with edge case of len < 0
pos_vals = [0] if len(pos_vals)==0 else pos_vals
neg_vals = [0] if len(neg_vals)==0 else neg_vals
#adjust the thresholds if the fall outside the min/max
# windows frac
if (frac_passing_windows < min_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is below ",
min_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-min_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(min_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-min_passing_windows_frac))
neg_threshold = -pos_threshold
if (frac_passing_windows > max_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is above ",
max_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-max_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(max_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-max_passing_windows_frac))
neg_threshold = -pos_threshold
if (verbose):
print("New thresholds are",pos_threshold,"and",neg_threshold)
return pos_threshold, neg_threshold
def make_nulldist_figure(orig_vals, null_vals, pos_ir, neg_ir,
pos_threshold, neg_threshold):
from matplotlib import pyplot as plt
fig,ax1 = plt.subplots()
orig_vals = np.array(sorted(orig_vals))
ax1.hist(orig_vals, bins=100, density=True, alpha=0.5)
ax1.hist(null_vals, bins=100, density=True, alpha=0.5)
ax1.set_ylabel("Probability density\n(blue=foreground, orange=null)")
ax1.set_xlabel("Total importance in window")
precisions = pos_ir.transform(orig_vals)
if (neg_ir is not None):
precisions = np.maximum(precisions, neg_ir.transform(orig_vals))
ax2 = ax1.twinx()
ax2.plot(orig_vals, precisions)
if (pos_threshold is not None):
ax2.plot([pos_threshold, pos_threshold], [0.0, 1.0], color="red")
if (neg_threshold is not None):
ax2.plot([neg_threshold, neg_threshold], [0.0, 1.0], color="red")
ax2.set_ylabel("Estimated foreground precision")
ax2.set_ylim(0.0, 1.02)
class FixedWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding,
flank,
suppress, #flanks to suppress
target_fdr,
min_passing_windows_frac,
max_passing_windows_frac,
separate_pos_neg_thresholds=False,
max_seqlets_total=None,
progress_update=5000,
verbose=True,
plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = grp.attrs["sliding"]
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["sliding"] = self.sliding
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def __call__(self, score_track, null_track, tnt_results=None):
# score_track now can be a list of arrays,
assert all([len(x.shape)==1 for x in score_track])
window_sum_function = get_simple_window_sum_function(self.sliding)
if (self.verbose):
print("Computing windowed sums on original")
sys.stdout.flush()
original_summed_score_track = window_sum_function(arrs=score_track)
#Determine the window thresholds
if (tnt_results is None):
if (self.verbose):
print("Generating null dist")
sys.stdout.flush()
null_vals = get_null_vals(
null_track=null_track,
score_track=score_track,
window_size=self.sliding,
original_summed_score_track=original_summed_score_track)
if (self.verbose):
print("Computing threshold")
sys.stdout.flush()
orig_vals = list(
np.concatenate(original_summed_score_track, axis=0))
#Note that orig_vals may have been subsampled at this point
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=orig_vals,
null_vals=null_vals)
subsampled_pos_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x >= 0])))
subsampled_neg_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x < 0],
key=lambda x: abs(x))))
subsampled_pos_val_precisions =\
pos_ir.transform(subsampled_pos_orig_vals)
if (len(subsampled_neg_orig_vals) > 0):
subsampled_neg_val_precisions =\
neg_ir.transform(subsampled_neg_orig_vals)
pos_threshold = ([x[1] for x in
zip(subsampled_pos_val_precisions,
subsampled_pos_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_pos_orig_vals[-1]])[0]
if (len(subsampled_neg_orig_vals) > 0):
neg_threshold = ([x[1] for x in
zip(subsampled_neg_val_precisions,
subsampled_neg_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_neg_orig_vals[-1]])[0]
else:
neg_threshold = -np.inf
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_orig_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
if (self.separate_pos_neg_thresholds):
val_transformer = SignedPercentileValTransformer(
distribution=orig_vals)
else:
val_transformer = AbsPercentileValTransformer(
distribution=orig_vals)
if (self.verbose):
print("Final raw thresholds are",
neg_threshold," and ",pos_threshold)
print("Final transformed thresholds are",
val_transformer(neg_threshold)," and ",
val_transformer(pos_threshold))
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_"
+str(FixedWindowAroundChunks.count)+".png")
FixedWindowAroundChunks.count += 1
tnt_results = FWACTransformAndThresholdResults(
neg_threshold=neg_threshold,
transformed_neg_threshold=val_transformer(neg_threshold),
pos_threshold=pos_threshold,
transformed_pos_threshold=val_transformer(pos_threshold),
val_transformer=val_transformer)
coords = identify_coords(
score_track=original_summed_score_track,
pos_threshold=tnt_results.pos_threshold,
neg_threshold=tnt_results.neg_threshold,
window_size=self.sliding,
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose)
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
| 42.294176
| 80
| 0.618336
| 31,771
| 0.741551
| 0
| 0
| 5,430
| 0.126739
| 0
| 0
| 5,769
| 0.134651
|
c04f8c1ca8657a2985f474bb739ac4de154e1a01
| 425
|
py
|
Python
|
Google Jam/2016/lastword.py
|
djphan/Prog-Problems
|
db79d76f8a40e844c8cc61b3df2c0d52737ee9e4
|
[
"MIT"
] | null | null | null |
Google Jam/2016/lastword.py
|
djphan/Prog-Problems
|
db79d76f8a40e844c8cc61b3df2c0d52737ee9e4
|
[
"MIT"
] | null | null | null |
Google Jam/2016/lastword.py
|
djphan/Prog-Problems
|
db79d76f8a40e844c8cc61b3df2c0d52737ee9e4
|
[
"MIT"
] | null | null | null |
import sys
def lastWord ( inputString ):
results = []
outputString = inputString[0]
for i in range(1, len(inputString)):
intermediateString = sorted([outputString + inputString[i], inputString[i] + outputString ])
outputString = intermediateString[1]
return outputString
numTests = input()
for i in range (0, int(numTests)):
print ("Case #" + str(i+1) +": " + str(lastWord(input())))
| 25
| 100
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.028235
|
c04ff3ada5e9e3495ef3e426dee60d1388e47451
| 62,817
|
py
|
Python
|
aiotdlib/api/types/update.py
|
pylakey/pytdlib
|
a390a298a24a7123f3f3aec9f995dee6d51a478e
|
[
"MIT"
] | 37
|
2021-05-04T10:41:41.000Z
|
2022-03-30T13:48:05.000Z
|
aiotdlib/api/types/update.py
|
pylakey/pytdlib
|
a390a298a24a7123f3f3aec9f995dee6d51a478e
|
[
"MIT"
] | 13
|
2021-07-17T19:54:51.000Z
|
2022-02-26T06:50:00.000Z
|
aiotdlib/api/types/update.py
|
pylakey/pytdlib
|
a390a298a24a7123f3f3aec9f995dee6d51a478e
|
[
"MIT"
] | 7
|
2021-09-22T21:27:11.000Z
|
2022-02-20T02:33:19.000Z
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from .address import Address
from .authorization_state import AuthorizationState
from .background import Background
from .basic_group import BasicGroup
from .basic_group_full_info import BasicGroupFullInfo
from .call import Call
from .callback_query_payload import CallbackQueryPayload
from .chat import Chat
from .chat_action import ChatAction
from .chat_action_bar import ChatActionBar
from .chat_filter_info import ChatFilterInfo
from .chat_invite_link import ChatInviteLink
from .chat_join_request import ChatJoinRequest
from .chat_join_requests_info import ChatJoinRequestsInfo
from .chat_list import ChatList
from .chat_member import ChatMember
from .chat_nearby import ChatNearby
from .chat_notification_settings import ChatNotificationSettings
from .chat_permissions import ChatPermissions
from .chat_photo_info import ChatPhotoInfo
from .chat_position import ChatPosition
from .chat_theme import ChatTheme
from .chat_type import ChatType
from .connection_state import ConnectionState
from .draft_message import DraftMessage
from .file import File
from .group_call import GroupCall
from .group_call_participant import GroupCallParticipant
from .language_pack_string import LanguagePackString
from .location import Location
from .message import Message
from .message_content import MessageContent
from .message_interaction_info import MessageInteractionInfo
from .message_sender import MessageSender
from .notification import Notification
from .notification_group import NotificationGroup
from .notification_group_type import NotificationGroupType
from .notification_settings_scope import NotificationSettingsScope
from .option_value import OptionValue
from .order_info import OrderInfo
from .poll import Poll
from .reply_markup import ReplyMarkup
from .scope_notification_settings import ScopeNotificationSettings
from .secret_chat import SecretChat
from .sticker import Sticker
from .sticker_set import StickerSet
from .sticker_sets import StickerSets
from .suggested_action import SuggestedAction
from .supergroup import Supergroup
from .supergroup_full_info import SupergroupFullInfo
from .terms_of_service import TermsOfService
from .user import User
from .user_full_info import UserFullInfo
from .user_privacy_setting import UserPrivacySetting
from .user_privacy_setting_rules import UserPrivacySettingRules
from .user_status import UserStatus
from .video_chat import VideoChat
from ..base_object import BaseObject
class Update(BaseObject):
"""
Contains notifications about data changes
"""
ID: str = Field("update", alias="@type")
class UpdateActiveNotifications(Update):
"""
Contains active notifications that was shown on previous application launches. This update is sent only if the message database is used. In that case it comes once before any updateNotification and updateNotificationGroup update
:param groups: Lists of active notification groups
:type groups: :class:`list[NotificationGroup]`
"""
ID: str = Field("updateActiveNotifications", alias="@type")
groups: list[NotificationGroup]
@staticmethod
def read(q: dict) -> UpdateActiveNotifications:
return UpdateActiveNotifications.construct(**q)
class UpdateAnimatedEmojiMessageClicked(Update):
"""
Some animated emoji message was clicked and a big animated sticker must be played if the message is visible on the screen. chatActionWatchingAnimations with the text of the message needs to be sent if the sticker is played
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param sticker: The animated sticker to be played
:type sticker: :class:`Sticker`
"""
ID: str = Field("updateAnimatedEmojiMessageClicked", alias="@type")
chat_id: int
message_id: int
sticker: Sticker
@staticmethod
def read(q: dict) -> UpdateAnimatedEmojiMessageClicked:
return UpdateAnimatedEmojiMessageClicked.construct(**q)
class UpdateAnimationSearchParameters(Update):
"""
The parameters of animation search through GetOption("animation_search_bot_username") bot has changed
:param provider: Name of the animation search provider
:type provider: :class:`str`
:param emojis: The new list of emojis suggested for searching
:type emojis: :class:`list[str]`
"""
ID: str = Field("updateAnimationSearchParameters", alias="@type")
provider: str
emojis: list[str]
@staticmethod
def read(q: dict) -> UpdateAnimationSearchParameters:
return UpdateAnimationSearchParameters.construct(**q)
class UpdateAuthorizationState(Update):
"""
The user authorization state has changed
:param authorization_state: New authorization state
:type authorization_state: :class:`AuthorizationState`
"""
ID: str = Field("updateAuthorizationState", alias="@type")
authorization_state: AuthorizationState
@staticmethod
def read(q: dict) -> UpdateAuthorizationState:
return UpdateAuthorizationState.construct(**q)
class UpdateBasicGroup(Update):
"""
Some data of a basic group has changed. This update is guaranteed to come before the basic group identifier is returned to the application
:param basic_group: New data about the group
:type basic_group: :class:`BasicGroup`
"""
ID: str = Field("updateBasicGroup", alias="@type")
basic_group: BasicGroup
@staticmethod
def read(q: dict) -> UpdateBasicGroup:
return UpdateBasicGroup.construct(**q)
class UpdateBasicGroupFullInfo(Update):
"""
Some data in basicGroupFullInfo has been changed
:param basic_group_id: Identifier of a basic group
:type basic_group_id: :class:`int`
:param basic_group_full_info: New full information about the group
:type basic_group_full_info: :class:`BasicGroupFullInfo`
"""
ID: str = Field("updateBasicGroupFullInfo", alias="@type")
basic_group_id: int
basic_group_full_info: BasicGroupFullInfo
@staticmethod
def read(q: dict) -> UpdateBasicGroupFullInfo:
return UpdateBasicGroupFullInfo.construct(**q)
class UpdateCall(Update):
"""
New call was created or information about a call was updated
:param call: New data about a call
:type call: :class:`Call`
"""
ID: str = Field("updateCall", alias="@type")
call: Call
@staticmethod
def read(q: dict) -> UpdateCall:
return UpdateCall.construct(**q)
class UpdateChatAction(Update):
"""
A message sender activity in the chat has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_thread_id: If not 0, a message thread identifier in which the action was performed
:type message_thread_id: :class:`int`
:param sender_id: Identifier of a message sender performing the action
:type sender_id: :class:`MessageSender`
:param action: The action
:type action: :class:`ChatAction`
"""
ID: str = Field("updateChatAction", alias="@type")
chat_id: int
message_thread_id: int
sender_id: MessageSender
action: ChatAction
@staticmethod
def read(q: dict) -> UpdateChatAction:
return UpdateChatAction.construct(**q)
class UpdateChatActionBar(Update):
"""
The chat action bar was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param action_bar: The new value of the action bar; may be null, defaults to None
:type action_bar: :class:`ChatActionBar`, optional
"""
ID: str = Field("updateChatActionBar", alias="@type")
chat_id: int
action_bar: typing.Optional[ChatActionBar] = None
@staticmethod
def read(q: dict) -> UpdateChatActionBar:
return UpdateChatActionBar.construct(**q)
class UpdateChatDefaultDisableNotification(Update):
"""
The value of the default disable_notification parameter, used when a message is sent to the chat, was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param default_disable_notification: The new default_disable_notification value
:type default_disable_notification: :class:`bool`
"""
ID: str = Field("updateChatDefaultDisableNotification", alias="@type")
chat_id: int
default_disable_notification: bool
@staticmethod
def read(q: dict) -> UpdateChatDefaultDisableNotification:
return UpdateChatDefaultDisableNotification.construct(**q)
class UpdateChatDraftMessage(Update):
"""
A chat draft has changed. Be aware that the update may come in the currently opened chat but with old content of the draft. If the user has changed the content of the draft, this update mustn't be applied
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param draft_message: The new draft message; may be null, defaults to None
:type draft_message: :class:`DraftMessage`, optional
:param positions: The new chat positions in the chat lists
:type positions: :class:`list[ChatPosition]`
"""
ID: str = Field("updateChatDraftMessage", alias="@type")
chat_id: int
draft_message: typing.Optional[DraftMessage] = None
positions: list[ChatPosition]
@staticmethod
def read(q: dict) -> UpdateChatDraftMessage:
return UpdateChatDraftMessage.construct(**q)
class UpdateChatFilters(Update):
"""
The list of chat filters or a chat filter has changed
:param chat_filters: The new list of chat filters
:type chat_filters: :class:`list[ChatFilterInfo]`
"""
ID: str = Field("updateChatFilters", alias="@type")
chat_filters: list[ChatFilterInfo]
@staticmethod
def read(q: dict) -> UpdateChatFilters:
return UpdateChatFilters.construct(**q)
class UpdateChatHasProtectedContent(Update):
"""
A chat content was allowed or restricted for saving
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param has_protected_content: New value of has_protected_content
:type has_protected_content: :class:`bool`
"""
ID: str = Field("updateChatHasProtectedContent", alias="@type")
chat_id: int
has_protected_content: bool
@staticmethod
def read(q: dict) -> UpdateChatHasProtectedContent:
return UpdateChatHasProtectedContent.construct(**q)
class UpdateChatHasScheduledMessages(Update):
"""
A chat's has_scheduled_messages field has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param has_scheduled_messages: New value of has_scheduled_messages
:type has_scheduled_messages: :class:`bool`
"""
ID: str = Field("updateChatHasScheduledMessages", alias="@type")
chat_id: int
has_scheduled_messages: bool
@staticmethod
def read(q: dict) -> UpdateChatHasScheduledMessages:
return UpdateChatHasScheduledMessages.construct(**q)
class UpdateChatIsBlocked(Update):
"""
A chat was blocked or unblocked
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param is_blocked: New value of is_blocked
:type is_blocked: :class:`bool`
"""
ID: str = Field("updateChatIsBlocked", alias="@type")
chat_id: int
is_blocked: bool
@staticmethod
def read(q: dict) -> UpdateChatIsBlocked:
return UpdateChatIsBlocked.construct(**q)
class UpdateChatIsMarkedAsUnread(Update):
"""
A chat was marked as unread or was read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param is_marked_as_unread: New value of is_marked_as_unread
:type is_marked_as_unread: :class:`bool`
"""
ID: str = Field("updateChatIsMarkedAsUnread", alias="@type")
chat_id: int
is_marked_as_unread: bool
@staticmethod
def read(q: dict) -> UpdateChatIsMarkedAsUnread:
return UpdateChatIsMarkedAsUnread.construct(**q)
class UpdateChatLastMessage(Update):
"""
The last message of a chat was changed. If last_message is null, then the last message in the chat became unknown. Some new unknown messages might be added to the chat in this case
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_message: The new last message in the chat; may be null, defaults to None
:type last_message: :class:`Message`, optional
:param positions: The new chat positions in the chat lists
:type positions: :class:`list[ChatPosition]`
"""
ID: str = Field("updateChatLastMessage", alias="@type")
chat_id: int
last_message: typing.Optional[Message] = None
positions: list[ChatPosition]
@staticmethod
def read(q: dict) -> UpdateChatLastMessage:
return UpdateChatLastMessage.construct(**q)
class UpdateChatMember(Update):
"""
User rights changed in a chat; for bots only
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param actor_user_id: Identifier of the user, changing the rights
:type actor_user_id: :class:`int`
:param date: Point in time (Unix timestamp) when the user rights was changed
:type date: :class:`int`
:param invite_link: If user has joined the chat using an invite link, the invite link; may be null, defaults to None
:type invite_link: :class:`ChatInviteLink`, optional
:param old_chat_member: Previous chat member
:type old_chat_member: :class:`ChatMember`
:param new_chat_member: New chat member
:type new_chat_member: :class:`ChatMember`
"""
ID: str = Field("updateChatMember", alias="@type")
chat_id: int
actor_user_id: int
date: int
invite_link: typing.Optional[ChatInviteLink] = None
old_chat_member: ChatMember
new_chat_member: ChatMember
@staticmethod
def read(q: dict) -> UpdateChatMember:
return UpdateChatMember.construct(**q)
class UpdateChatMessageSender(Update):
"""
The message sender that is selected to send messages in a chat has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_sender_id: New value of message_sender_id; may be null if the user can't change message sender, defaults to None
:type message_sender_id: :class:`MessageSender`, optional
"""
ID: str = Field("updateChatMessageSender", alias="@type")
chat_id: int
message_sender_id: typing.Optional[MessageSender] = None
@staticmethod
def read(q: dict) -> UpdateChatMessageSender:
return UpdateChatMessageSender.construct(**q)
class UpdateChatMessageTtl(Update):
"""
The message Time To Live setting for a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ttl: New value of message_ttl
:type message_ttl: :class:`int`
"""
ID: str = Field("updateChatMessageTtl", alias="@type")
chat_id: int
message_ttl: int
@staticmethod
def read(q: dict) -> UpdateChatMessageTtl:
return UpdateChatMessageTtl.construct(**q)
class UpdateChatNotificationSettings(Update):
"""
Notification settings for a chat were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param notification_settings: The new notification settings
:type notification_settings: :class:`ChatNotificationSettings`
"""
ID: str = Field("updateChatNotificationSettings", alias="@type")
chat_id: int
notification_settings: ChatNotificationSettings
@staticmethod
def read(q: dict) -> UpdateChatNotificationSettings:
return UpdateChatNotificationSettings.construct(**q)
class UpdateChatOnlineMemberCount(Update):
"""
The number of online group members has changed. This update with non-zero count is sent only for currently opened chats. There is no guarantee that it will be sent just after the count has changed
:param chat_id: Identifier of the chat
:type chat_id: :class:`int`
:param online_member_count: New number of online members in the chat, or 0 if unknown
:type online_member_count: :class:`int`
"""
ID: str = Field("updateChatOnlineMemberCount", alias="@type")
chat_id: int
online_member_count: int
@staticmethod
def read(q: dict) -> UpdateChatOnlineMemberCount:
return UpdateChatOnlineMemberCount.construct(**q)
class UpdateChatPendingJoinRequests(Update):
"""
The chat pending join requests were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param pending_join_requests: The new data about pending join requests; may be null, defaults to None
:type pending_join_requests: :class:`ChatJoinRequestsInfo`, optional
"""
ID: str = Field("updateChatPendingJoinRequests", alias="@type")
chat_id: int
pending_join_requests: typing.Optional[ChatJoinRequestsInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPendingJoinRequests:
return UpdateChatPendingJoinRequests.construct(**q)
class UpdateChatPermissions(Update):
"""
Chat permissions was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param permissions: The new chat permissions
:type permissions: :class:`ChatPermissions`
"""
ID: str = Field("updateChatPermissions", alias="@type")
chat_id: int
permissions: ChatPermissions
@staticmethod
def read(q: dict) -> UpdateChatPermissions:
return UpdateChatPermissions.construct(**q)
class UpdateChatPhoto(Update):
"""
A chat photo was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param photo: The new chat photo; may be null, defaults to None
:type photo: :class:`ChatPhotoInfo`, optional
"""
ID: str = Field("updateChatPhoto", alias="@type")
chat_id: int
photo: typing.Optional[ChatPhotoInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPhoto:
return UpdateChatPhoto.construct(**q)
class UpdateChatPosition(Update):
"""
The position of a chat in a chat list has changed. Instead of this update updateChatLastMessage or updateChatDraftMessage might be sent
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param position: New chat position. If new order is 0, then the chat needs to be removed from the list
:type position: :class:`ChatPosition`
"""
ID: str = Field("updateChatPosition", alias="@type")
chat_id: int
position: ChatPosition
@staticmethod
def read(q: dict) -> UpdateChatPosition:
return UpdateChatPosition.construct(**q)
class UpdateChatReadInbox(Update):
"""
Incoming messages were read or the number of unread messages has been changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_inbox_message_id: Identifier of the last read incoming message
:type last_read_inbox_message_id: :class:`int`
:param unread_count: The number of unread messages left in the chat
:type unread_count: :class:`int`
"""
ID: str = Field("updateChatReadInbox", alias="@type")
chat_id: int
last_read_inbox_message_id: int
unread_count: int
@staticmethod
def read(q: dict) -> UpdateChatReadInbox:
return UpdateChatReadInbox.construct(**q)
class UpdateChatReadOutbox(Update):
"""
Outgoing messages were read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_outbox_message_id: Identifier of last read outgoing message
:type last_read_outbox_message_id: :class:`int`
"""
ID: str = Field("updateChatReadOutbox", alias="@type")
chat_id: int
last_read_outbox_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReadOutbox:
return UpdateChatReadOutbox.construct(**q)
class UpdateChatReplyMarkup(Update):
"""
The default chat reply markup was changed. Can occur because new messages with reply markup were received or because an old reply markup was hidden by the user
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param reply_markup_message_id: Identifier of the message from which reply markup needs to be used; 0 if there is no default custom reply markup in the chat
:type reply_markup_message_id: :class:`int`
"""
ID: str = Field("updateChatReplyMarkup", alias="@type")
chat_id: int
reply_markup_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReplyMarkup:
return UpdateChatReplyMarkup.construct(**q)
class UpdateChatTheme(Update):
"""
The chat theme was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param theme_name: The new name of the chat theme; may be empty if theme was reset to default
:type theme_name: :class:`str`
"""
ID: str = Field("updateChatTheme", alias="@type")
chat_id: int
theme_name: str
@staticmethod
def read(q: dict) -> UpdateChatTheme:
return UpdateChatTheme.construct(**q)
class UpdateChatThemes(Update):
"""
The list of available chat themes has changed
:param chat_themes: The new list of chat themes
:type chat_themes: :class:`list[ChatTheme]`
"""
ID: str = Field("updateChatThemes", alias="@type")
chat_themes: list[ChatTheme]
@staticmethod
def read(q: dict) -> UpdateChatThemes:
return UpdateChatThemes.construct(**q)
class UpdateChatTitle(Update):
"""
The title of a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param title: The new chat title
:type title: :class:`str`
"""
ID: str = Field("updateChatTitle", alias="@type")
chat_id: int
title: str
@staticmethod
def read(q: dict) -> UpdateChatTitle:
return UpdateChatTitle.construct(**q)
class UpdateChatUnreadMentionCount(Update):
"""
The chat unread_mention_count has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param unread_mention_count: The number of unread mention messages left in the chat
:type unread_mention_count: :class:`int`
"""
ID: str = Field("updateChatUnreadMentionCount", alias="@type")
chat_id: int
unread_mention_count: int
@staticmethod
def read(q: dict) -> UpdateChatUnreadMentionCount:
return UpdateChatUnreadMentionCount.construct(**q)
class UpdateChatVideoChat(Update):
"""
A chat video chat state has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param video_chat: New value of video_chat
:type video_chat: :class:`VideoChat`
"""
ID: str = Field("updateChatVideoChat", alias="@type")
chat_id: int
video_chat: VideoChat
@staticmethod
def read(q: dict) -> UpdateChatVideoChat:
return UpdateChatVideoChat.construct(**q)
class UpdateConnectionState(Update):
"""
The connection state has changed. This update must be used only to show a human-readable description of the connection state
:param state: The new connection state
:type state: :class:`ConnectionState`
"""
ID: str = Field("updateConnectionState", alias="@type")
state: ConnectionState
@staticmethod
def read(q: dict) -> UpdateConnectionState:
return UpdateConnectionState.construct(**q)
class UpdateDeleteMessages(Update):
"""
Some messages were deleted
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ids: Identifiers of the deleted messages
:type message_ids: :class:`list[int]`
:param is_permanent: True, if the messages are permanently deleted by a user (as opposed to just becoming inaccessible)
:type is_permanent: :class:`bool`
:param from_cache: True, if the messages are deleted only from the cache and can possibly be retrieved again in the future
:type from_cache: :class:`bool`
"""
ID: str = Field("updateDeleteMessages", alias="@type")
chat_id: int
message_ids: list[int]
is_permanent: bool
from_cache: bool
@staticmethod
def read(q: dict) -> UpdateDeleteMessages:
return UpdateDeleteMessages.construct(**q)
class UpdateDiceEmojis(Update):
"""
The list of supported dice emojis has changed
:param emojis: The new list of supported dice emojis
:type emojis: :class:`list[str]`
"""
ID: str = Field("updateDiceEmojis", alias="@type")
emojis: list[str]
@staticmethod
def read(q: dict) -> UpdateDiceEmojis:
return UpdateDiceEmojis.construct(**q)
class UpdateFavoriteStickers(Update):
"""
The list of favorite stickers was updated
:param sticker_ids: The new list of file identifiers of favorite stickers
:type sticker_ids: :class:`list[int]`
"""
ID: str = Field("updateFavoriteStickers", alias="@type")
sticker_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateFavoriteStickers:
return UpdateFavoriteStickers.construct(**q)
class UpdateFile(Update):
"""
Information about a file was updated
:param file: New data about the file
:type file: :class:`File`
"""
ID: str = Field("updateFile", alias="@type")
file: File
@staticmethod
def read(q: dict) -> UpdateFile:
return UpdateFile.construct(**q)
class UpdateFileGenerationStart(Update):
"""
The file generation process needs to be started by the application
:param generation_id: Unique identifier for the generation process
:type generation_id: :class:`int`
:param original_path: The path to a file from which a new file is generated; may be empty
:type original_path: :class:`str`
:param destination_path: The path to a file that must be created and where the new file is generated
:type destination_path: :class:`str`
:param conversion: String specifying the conversion applied to the original file. If conversion is "#url#" than original_path contains an HTTP/HTTPS URL of a file, which must be downloaded by the application
:type conversion: :class:`str`
"""
ID: str = Field("updateFileGenerationStart", alias="@type")
generation_id: int
original_path: str
destination_path: str
conversion: str
@staticmethod
def read(q: dict) -> UpdateFileGenerationStart:
return UpdateFileGenerationStart.construct(**q)
class UpdateFileGenerationStop(Update):
"""
File generation is no longer needed
:param generation_id: Unique identifier for the generation process
:type generation_id: :class:`int`
"""
ID: str = Field("updateFileGenerationStop", alias="@type")
generation_id: int
@staticmethod
def read(q: dict) -> UpdateFileGenerationStop:
return UpdateFileGenerationStop.construct(**q)
class UpdateGroupCall(Update):
"""
Information about a group call was updated
:param group_call: New data about a group call
:type group_call: :class:`GroupCall`
"""
ID: str = Field("updateGroupCall", alias="@type")
group_call: GroupCall
@staticmethod
def read(q: dict) -> UpdateGroupCall:
return UpdateGroupCall.construct(**q)
class UpdateGroupCallParticipant(Update):
"""
Information about a group call participant was changed. The updates are sent only after the group call is received through getGroupCall and only if the call is joined or being joined
:param group_call_id: Identifier of group call
:type group_call_id: :class:`int`
:param participant: New data about a participant
:type participant: :class:`GroupCallParticipant`
"""
ID: str = Field("updateGroupCallParticipant", alias="@type")
group_call_id: int
participant: GroupCallParticipant
@staticmethod
def read(q: dict) -> UpdateGroupCallParticipant:
return UpdateGroupCallParticipant.construct(**q)
class UpdateHavePendingNotifications(Update):
"""
Describes whether there are some pending notification updates. Can be used to prevent application from killing, while there are some pending notifications
:param have_delayed_notifications: True, if there are some delayed notification updates, which will be sent soon
:type have_delayed_notifications: :class:`bool`
:param have_unreceived_notifications: True, if there can be some yet unreceived notifications, which are being fetched from the server
:type have_unreceived_notifications: :class:`bool`
"""
ID: str = Field("updateHavePendingNotifications", alias="@type")
have_delayed_notifications: bool
have_unreceived_notifications: bool
@staticmethod
def read(q: dict) -> UpdateHavePendingNotifications:
return UpdateHavePendingNotifications.construct(**q)
class UpdateInstalledStickerSets(Update):
"""
The list of installed sticker sets was updated
:param is_masks: True, if the list of installed mask sticker sets was updated
:type is_masks: :class:`bool`
:param sticker_set_ids: The new list of installed ordinary sticker sets
:type sticker_set_ids: :class:`list[int]`
"""
ID: str = Field("updateInstalledStickerSets", alias="@type")
is_masks: bool
sticker_set_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateInstalledStickerSets:
return UpdateInstalledStickerSets.construct(**q)
class UpdateLanguagePackStrings(Update):
"""
Some language pack strings have been updated
:param localization_target: Localization target to which the language pack belongs
:type localization_target: :class:`str`
:param language_pack_id: Identifier of the updated language pack
:type language_pack_id: :class:`str`
:param strings: List of changed language pack strings
:type strings: :class:`list[LanguagePackString]`
"""
ID: str = Field("updateLanguagePackStrings", alias="@type")
localization_target: str
language_pack_id: str
strings: list[LanguagePackString]
@staticmethod
def read(q: dict) -> UpdateLanguagePackStrings:
return UpdateLanguagePackStrings.construct(**q)
class UpdateMessageContent(Update):
"""
The message content has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param new_content: New message content
:type new_content: :class:`MessageContent`
"""
ID: str = Field("updateMessageContent", alias="@type")
chat_id: int
message_id: int
new_content: MessageContent
@staticmethod
def read(q: dict) -> UpdateMessageContent:
return UpdateMessageContent.construct(**q)
class UpdateMessageContentOpened(Update):
"""
The message content was opened. Updates voice note messages to "listened", video note messages to "viewed" and starts the TTL timer for self-destructing messages
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageContentOpened", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageContentOpened:
return UpdateMessageContentOpened.construct(**q)
class UpdateMessageEdited(Update):
"""
A message was edited. Changes in the message content will come in a separate updateMessageContent
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param edit_date: Point in time (Unix timestamp) when the message was edited
:type edit_date: :class:`int`
:param reply_markup: New message reply markup; may be null, defaults to None
:type reply_markup: :class:`ReplyMarkup`, optional
"""
ID: str = Field("updateMessageEdited", alias="@type")
chat_id: int
message_id: int
edit_date: int
reply_markup: typing.Optional[ReplyMarkup] = None
@staticmethod
def read(q: dict) -> UpdateMessageEdited:
return UpdateMessageEdited.construct(**q)
class UpdateMessageInteractionInfo(Update):
"""
The information about interactions with a message has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param interaction_info: New information about interactions with the message; may be null, defaults to None
:type interaction_info: :class:`MessageInteractionInfo`, optional
"""
ID: str = Field("updateMessageInteractionInfo", alias="@type")
chat_id: int
message_id: int
interaction_info: typing.Optional[MessageInteractionInfo] = None
@staticmethod
def read(q: dict) -> UpdateMessageInteractionInfo:
return UpdateMessageInteractionInfo.construct(**q)
class UpdateMessageIsPinned(Update):
"""
The message pinned state was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: The message identifier
:type message_id: :class:`int`
:param is_pinned: True, if the message is pinned
:type is_pinned: :class:`bool`
"""
ID: str = Field("updateMessageIsPinned", alias="@type")
chat_id: int
message_id: int
is_pinned: bool
@staticmethod
def read(q: dict) -> UpdateMessageIsPinned:
return UpdateMessageIsPinned.construct(**q)
class UpdateMessageLiveLocationViewed(Update):
"""
A message with a live location was viewed. When the update is received, the application is supposed to update the live location
:param chat_id: Identifier of the chat with the live location message
:type chat_id: :class:`int`
:param message_id: Identifier of the message with live location
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageLiveLocationViewed", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageLiveLocationViewed:
return UpdateMessageLiveLocationViewed.construct(**q)
class UpdateMessageMentionRead(Update):
"""
A message with an unread mention was read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param unread_mention_count: The new number of unread mention messages left in the chat
:type unread_mention_count: :class:`int`
"""
ID: str = Field("updateMessageMentionRead", alias="@type")
chat_id: int
message_id: int
unread_mention_count: int
@staticmethod
def read(q: dict) -> UpdateMessageMentionRead:
return UpdateMessageMentionRead.construct(**q)
class UpdateMessageSendAcknowledged(Update):
"""
A request to send a message has reached the Telegram server. This doesn't mean that the message will be sent successfully or even that the send message request will be processed. This update will be sent only if the option "use_quick_ack" is set to true. This update may be sent multiple times for the same message
:param chat_id: The chat identifier of the sent message
:type chat_id: :class:`int`
:param message_id: A temporary message identifier
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageSendAcknowledged", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageSendAcknowledged:
return UpdateMessageSendAcknowledged.construct(**q)
class UpdateMessageSendFailed(Update):
"""
A message failed to send. Be aware that some messages being sent can be irrecoverably deleted, in which case updateDeleteMessages will be received instead of this update
:param message: The failed to send message
:type message: :class:`Message`
:param old_message_id: The previous temporary message identifier
:type old_message_id: :class:`int`
:param error_code: An error code
:type error_code: :class:`int`
:param error_message: Error message
:type error_message: :class:`str`
"""
ID: str = Field("updateMessageSendFailed", alias="@type")
message: Message
old_message_id: int
error_code: int
error_message: str
@staticmethod
def read(q: dict) -> UpdateMessageSendFailed:
return UpdateMessageSendFailed.construct(**q)
class UpdateMessageSendSucceeded(Update):
"""
A message has been successfully sent
:param message: The sent message. Usually only the message identifier, date, and content are changed, but almost all other fields can also change
:type message: :class:`Message`
:param old_message_id: The previous temporary message identifier
:type old_message_id: :class:`int`
"""
ID: str = Field("updateMessageSendSucceeded", alias="@type")
message: Message
old_message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageSendSucceeded:
return UpdateMessageSendSucceeded.construct(**q)
class UpdateNewCallSignalingData(Update):
"""
New call signaling data arrived
:param call_id: The call identifier
:type call_id: :class:`int`
:param data: The data
:type data: :class:`str`
"""
ID: str = Field("updateNewCallSignalingData", alias="@type")
call_id: int
data: str
@staticmethod
def read(q: dict) -> UpdateNewCallSignalingData:
return UpdateNewCallSignalingData.construct(**q)
class UpdateNewCallbackQuery(Update):
"""
A new incoming callback query; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param chat_id: Identifier of the chat where the query was sent
:type chat_id: :class:`int`
:param message_id: Identifier of the message, from which the query originated
:type message_id: :class:`int`
:param chat_instance: Identifier that uniquely corresponds to the chat to which the message was sent
:type chat_instance: :class:`int`
:param payload: Query payload
:type payload: :class:`CallbackQueryPayload`
"""
ID: str = Field("updateNewCallbackQuery", alias="@type")
id: int
sender_user_id: int
chat_id: int
message_id: int
chat_instance: int
payload: CallbackQueryPayload
@staticmethod
def read(q: dict) -> UpdateNewCallbackQuery:
return UpdateNewCallbackQuery.construct(**q)
class UpdateNewChat(Update):
"""
A new chat has been loaded/created. This update is guaranteed to come before the chat identifier is returned to the application. The chat field changes will be reported through separate updates
:param chat: The chat
:type chat: :class:`Chat`
"""
ID: str = Field("updateNewChat", alias="@type")
chat: Chat
@staticmethod
def read(q: dict) -> UpdateNewChat:
return UpdateNewChat.construct(**q)
class UpdateNewChatJoinRequest(Update):
"""
A user sent a join request to a chat; for bots only
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param request: Join request
:type request: :class:`ChatJoinRequest`
:param invite_link: The invite link, which was used to send join request; may be null, defaults to None
:type invite_link: :class:`ChatInviteLink`, optional
"""
ID: str = Field("updateNewChatJoinRequest", alias="@type")
chat_id: int
request: ChatJoinRequest
invite_link: typing.Optional[ChatInviteLink] = None
@staticmethod
def read(q: dict) -> UpdateNewChatJoinRequest:
return UpdateNewChatJoinRequest.construct(**q)
class UpdateNewChosenInlineResult(Update):
"""
The user has chosen a result of an inline query; for bots only
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param user_location: User location; may be null, defaults to None
:type user_location: :class:`Location`, optional
:param query: Text of the query
:type query: :class:`str`
:param result_id: Identifier of the chosen result
:type result_id: :class:`str`
:param inline_message_id: Identifier of the sent inline message, if known
:type inline_message_id: :class:`str`
"""
ID: str = Field("updateNewChosenInlineResult", alias="@type")
sender_user_id: int
user_location: typing.Optional[Location] = None
query: str
result_id: str
inline_message_id: str
@staticmethod
def read(q: dict) -> UpdateNewChosenInlineResult:
return UpdateNewChosenInlineResult.construct(**q)
class UpdateNewCustomEvent(Update):
"""
A new incoming event; for bots only
:param event: A JSON-serialized event
:type event: :class:`str`
"""
ID: str = Field("updateNewCustomEvent", alias="@type")
event: str
@staticmethod
def read(q: dict) -> UpdateNewCustomEvent:
return UpdateNewCustomEvent.construct(**q)
class UpdateNewCustomQuery(Update):
"""
A new incoming query; for bots only
:param id: The query identifier
:type id: :class:`int`
:param data: JSON-serialized query data
:type data: :class:`str`
:param timeout: Query timeout
:type timeout: :class:`int`
"""
ID: str = Field("updateNewCustomQuery", alias="@type")
id: int
data: str
timeout: int
@staticmethod
def read(q: dict) -> UpdateNewCustomQuery:
return UpdateNewCustomQuery.construct(**q)
class UpdateNewInlineCallbackQuery(Update):
"""
A new incoming callback query from a message sent via a bot; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param inline_message_id: Identifier of the inline message, from which the query originated
:type inline_message_id: :class:`str`
:param chat_instance: An identifier uniquely corresponding to the chat a message was sent to
:type chat_instance: :class:`int`
:param payload: Query payload
:type payload: :class:`CallbackQueryPayload`
"""
ID: str = Field("updateNewInlineCallbackQuery", alias="@type")
id: int
sender_user_id: int
inline_message_id: str
chat_instance: int
payload: CallbackQueryPayload
@staticmethod
def read(q: dict) -> UpdateNewInlineCallbackQuery:
return UpdateNewInlineCallbackQuery.construct(**q)
class UpdateNewInlineQuery(Update):
"""
A new incoming inline query; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param user_location: User location; may be null, defaults to None
:type user_location: :class:`Location`, optional
:param chat_type: The type of the chat, from which the query originated; may be null if unknown, defaults to None
:type chat_type: :class:`ChatType`, optional
:param query: Text of the query
:type query: :class:`str`
:param offset: Offset of the first entry to return
:type offset: :class:`str`
"""
ID: str = Field("updateNewInlineQuery", alias="@type")
id: int
sender_user_id: int
user_location: typing.Optional[Location] = None
chat_type: typing.Optional[ChatType] = None
query: str
offset: str
@staticmethod
def read(q: dict) -> UpdateNewInlineQuery:
return UpdateNewInlineQuery.construct(**q)
class UpdateNewMessage(Update):
"""
A new message was received; can also be an outgoing message
:param message: The new message
:type message: :class:`Message`
"""
ID: str = Field("updateNewMessage", alias="@type")
message: Message
@staticmethod
def read(q: dict) -> UpdateNewMessage:
return UpdateNewMessage.construct(**q)
class UpdateNewPreCheckoutQuery(Update):
"""
A new incoming pre-checkout query; for bots only. Contains full information about a checkout
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param currency: Currency for the product price
:type currency: :class:`str`
:param total_amount: Total price for the product, in the smallest units of the currency
:type total_amount: :class:`int`
:param invoice_payload: Invoice payload
:type invoice_payload: :class:`str`
:param shipping_option_id: Identifier of a shipping option chosen by the user; may be empty if not applicable
:type shipping_option_id: :class:`str`
:param order_info: Information about the order; may be null, defaults to None
:type order_info: :class:`OrderInfo`, optional
"""
ID: str = Field("updateNewPreCheckoutQuery", alias="@type")
id: int
sender_user_id: int
currency: str
total_amount: int
invoice_payload: str
shipping_option_id: str
order_info: typing.Optional[OrderInfo] = None
@staticmethod
def read(q: dict) -> UpdateNewPreCheckoutQuery:
return UpdateNewPreCheckoutQuery.construct(**q)
class UpdateNewShippingQuery(Update):
"""
A new incoming shipping query; for bots only. Only for invoices with flexible price
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param invoice_payload: Invoice payload
:type invoice_payload: :class:`str`
:param shipping_address: User shipping address
:type shipping_address: :class:`Address`
"""
ID: str = Field("updateNewShippingQuery", alias="@type")
id: int
sender_user_id: int
invoice_payload: str
shipping_address: Address
@staticmethod
def read(q: dict) -> UpdateNewShippingQuery:
return UpdateNewShippingQuery.construct(**q)
class UpdateNotification(Update):
"""
A notification was changed
:param notification_group_id: Unique notification group identifier
:type notification_group_id: :class:`int`
:param notification: Changed notification
:type notification: :class:`Notification`
"""
ID: str = Field("updateNotification", alias="@type")
notification_group_id: int
notification: Notification
@staticmethod
def read(q: dict) -> UpdateNotification:
return UpdateNotification.construct(**q)
class UpdateNotificationGroup(Update):
"""
A list of active notifications in a notification group has changed
:param notification_group_id: Unique notification group identifier
:type notification_group_id: :class:`int`
:param type_: New type of the notification group
:type type_: :class:`NotificationGroupType`
:param chat_id: Identifier of a chat to which all notifications in the group belong
:type chat_id: :class:`int`
:param notification_settings_chat_id: Chat identifier, which notification settings must be applied to the added notifications
:type notification_settings_chat_id: :class:`int`
:param is_silent: True, if the notifications must be shown without sound
:type is_silent: :class:`bool`
:param total_count: Total number of unread notifications in the group, can be bigger than number of active notifications
:type total_count: :class:`int`
:param added_notifications: List of added group notifications, sorted by notification ID
:type added_notifications: :class:`list[Notification]`
:param removed_notification_ids: Identifiers of removed group notifications, sorted by notification ID
:type removed_notification_ids: :class:`list[int]`
"""
ID: str = Field("updateNotificationGroup", alias="@type")
notification_group_id: int
type_: NotificationGroupType = Field(..., alias='type')
chat_id: int
notification_settings_chat_id: int
is_silent: bool
total_count: int
added_notifications: list[Notification]
removed_notification_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateNotificationGroup:
return UpdateNotificationGroup.construct(**q)
class UpdateOption(Update):
"""
An option changed its value
:param name: The option name
:type name: :class:`str`
:param value: The new option value
:type value: :class:`OptionValue`
"""
ID: str = Field("updateOption", alias="@type")
name: str
value: OptionValue
@staticmethod
def read(q: dict) -> UpdateOption:
return UpdateOption.construct(**q)
class UpdatePoll(Update):
"""
A poll was updated; for bots only
:param poll: New data about the poll
:type poll: :class:`Poll`
"""
ID: str = Field("updatePoll", alias="@type")
poll: Poll
@staticmethod
def read(q: dict) -> UpdatePoll:
return UpdatePoll.construct(**q)
class UpdatePollAnswer(Update):
"""
A user changed the answer to a poll; for bots only
:param poll_id: Unique poll identifier
:type poll_id: :class:`int`
:param user_id: The user, who changed the answer to the poll
:type user_id: :class:`int`
:param option_ids: 0-based identifiers of answer options, chosen by the user
:type option_ids: :class:`list[int]`
"""
ID: str = Field("updatePollAnswer", alias="@type")
poll_id: int
user_id: int
option_ids: list[int]
@staticmethod
def read(q: dict) -> UpdatePollAnswer:
return UpdatePollAnswer.construct(**q)
class UpdateRecentStickers(Update):
"""
The list of recently used stickers was updated
:param is_attached: True, if the list of stickers attached to photo or video files was updated, otherwise the list of sent stickers is updated
:type is_attached: :class:`bool`
:param sticker_ids: The new list of file identifiers of recently used stickers
:type sticker_ids: :class:`list[int]`
"""
ID: str = Field("updateRecentStickers", alias="@type")
is_attached: bool
sticker_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateRecentStickers:
return UpdateRecentStickers.construct(**q)
class UpdateSavedAnimations(Update):
"""
The list of saved animations was updated
:param animation_ids: The new list of file identifiers of saved animations
:type animation_ids: :class:`list[int]`
"""
ID: str = Field("updateSavedAnimations", alias="@type")
animation_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateSavedAnimations:
return UpdateSavedAnimations.construct(**q)
class UpdateScopeNotificationSettings(Update):
"""
Notification settings for some type of chats were updated
:param scope: Types of chats for which notification settings were updated
:type scope: :class:`NotificationSettingsScope`
:param notification_settings: The new notification settings
:type notification_settings: :class:`ScopeNotificationSettings`
"""
ID: str = Field("updateScopeNotificationSettings", alias="@type")
scope: NotificationSettingsScope
notification_settings: ScopeNotificationSettings
@staticmethod
def read(q: dict) -> UpdateScopeNotificationSettings:
return UpdateScopeNotificationSettings.construct(**q)
class UpdateSecretChat(Update):
"""
Some data of a secret chat has changed. This update is guaranteed to come before the secret chat identifier is returned to the application
:param secret_chat: New data about the secret chat
:type secret_chat: :class:`SecretChat`
"""
ID: str = Field("updateSecretChat", alias="@type")
secret_chat: SecretChat
@staticmethod
def read(q: dict) -> UpdateSecretChat:
return UpdateSecretChat.construct(**q)
class UpdateSelectedBackground(Update):
"""
The selected background has changed
:param for_dark_theme: True, if background for dark theme has changed
:type for_dark_theme: :class:`bool`
:param background: The new selected background; may be null, defaults to None
:type background: :class:`Background`, optional
"""
ID: str = Field("updateSelectedBackground", alias="@type")
for_dark_theme: bool
background: typing.Optional[Background] = None
@staticmethod
def read(q: dict) -> UpdateSelectedBackground:
return UpdateSelectedBackground.construct(**q)
class UpdateServiceNotification(Update):
"""
A service notification from the server was received. Upon receiving this the application must show a popup with the content of the notification
:param type_: Notification type. If type begins with "AUTH_KEY_DROP_", then two buttons "Cancel" and "Log out" must be shown under notification; if user presses the second, all local data must be destroyed using Destroy method
:type type_: :class:`str`
:param content: Notification content
:type content: :class:`MessageContent`
"""
ID: str = Field("updateServiceNotification", alias="@type")
type_: str = Field(..., alias='type')
content: MessageContent
@staticmethod
def read(q: dict) -> UpdateServiceNotification:
return UpdateServiceNotification.construct(**q)
class UpdateStickerSet(Update):
"""
A sticker set has changed
:param sticker_set: The sticker set
:type sticker_set: :class:`StickerSet`
"""
ID: str = Field("updateStickerSet", alias="@type")
sticker_set: StickerSet
@staticmethod
def read(q: dict) -> UpdateStickerSet:
return UpdateStickerSet.construct(**q)
class UpdateSuggestedActions(Update):
"""
The list of suggested to the user actions has changed
:param added_actions: Added suggested actions
:type added_actions: :class:`list[SuggestedAction]`
:param removed_actions: Removed suggested actions
:type removed_actions: :class:`list[SuggestedAction]`
"""
ID: str = Field("updateSuggestedActions", alias="@type")
added_actions: list[SuggestedAction]
removed_actions: list[SuggestedAction]
@staticmethod
def read(q: dict) -> UpdateSuggestedActions:
return UpdateSuggestedActions.construct(**q)
class UpdateSupergroup(Update):
"""
Some data of a supergroup or a channel has changed. This update is guaranteed to come before the supergroup identifier is returned to the application
:param supergroup: New data about the supergroup
:type supergroup: :class:`Supergroup`
"""
ID: str = Field("updateSupergroup", alias="@type")
supergroup: Supergroup
@staticmethod
def read(q: dict) -> UpdateSupergroup:
return UpdateSupergroup.construct(**q)
class UpdateSupergroupFullInfo(Update):
"""
Some data in supergroupFullInfo has been changed
:param supergroup_id: Identifier of the supergroup or channel
:type supergroup_id: :class:`int`
:param supergroup_full_info: New full information about the supergroup
:type supergroup_full_info: :class:`SupergroupFullInfo`
"""
ID: str = Field("updateSupergroupFullInfo", alias="@type")
supergroup_id: int
supergroup_full_info: SupergroupFullInfo
@staticmethod
def read(q: dict) -> UpdateSupergroupFullInfo:
return UpdateSupergroupFullInfo.construct(**q)
class UpdateTermsOfService(Update):
"""
New terms of service must be accepted by the user. If the terms of service are declined, then the deleteAccount method must be called with the reason "Decline ToS update"
:param terms_of_service_id: Identifier of the terms of service
:type terms_of_service_id: :class:`str`
:param terms_of_service: The new terms of service
:type terms_of_service: :class:`TermsOfService`
"""
ID: str = Field("updateTermsOfService", alias="@type")
terms_of_service_id: str
terms_of_service: TermsOfService
@staticmethod
def read(q: dict) -> UpdateTermsOfService:
return UpdateTermsOfService.construct(**q)
class UpdateTrendingStickerSets(Update):
"""
The list of trending sticker sets was updated or some of them were viewed
:param sticker_sets: The prefix of the list of trending sticker sets with the newest trending sticker sets
:type sticker_sets: :class:`StickerSets`
"""
ID: str = Field("updateTrendingStickerSets", alias="@type")
sticker_sets: StickerSets
@staticmethod
def read(q: dict) -> UpdateTrendingStickerSets:
return UpdateTrendingStickerSets.construct(**q)
class UpdateUnreadChatCount(Update):
"""
Number of unread chats, i.e. with unread messages or marked as unread, has changed. This update is sent only if the message database is used
:param chat_list: The chat list with changed number of unread messages
:type chat_list: :class:`ChatList`
:param total_count: Approximate total number of chats in the chat list
:type total_count: :class:`int`
:param unread_count: Total number of unread chats
:type unread_count: :class:`int`
:param unread_unmuted_count: Total number of unread unmuted chats
:type unread_unmuted_count: :class:`int`
:param marked_as_unread_count: Total number of chats marked as unread
:type marked_as_unread_count: :class:`int`
:param marked_as_unread_unmuted_count: Total number of unmuted chats marked as unread
:type marked_as_unread_unmuted_count: :class:`int`
"""
ID: str = Field("updateUnreadChatCount", alias="@type")
chat_list: ChatList
total_count: int
unread_count: int
unread_unmuted_count: int
marked_as_unread_count: int
marked_as_unread_unmuted_count: int
@staticmethod
def read(q: dict) -> UpdateUnreadChatCount:
return UpdateUnreadChatCount.construct(**q)
class UpdateUnreadMessageCount(Update):
"""
Number of unread messages in a chat list has changed. This update is sent only if the message database is used
:param chat_list: The chat list with changed number of unread messages
:type chat_list: :class:`ChatList`
:param unread_count: Total number of unread messages
:type unread_count: :class:`int`
:param unread_unmuted_count: Total number of unread messages in unmuted chats
:type unread_unmuted_count: :class:`int`
"""
ID: str = Field("updateUnreadMessageCount", alias="@type")
chat_list: ChatList
unread_count: int
unread_unmuted_count: int
@staticmethod
def read(q: dict) -> UpdateUnreadMessageCount:
return UpdateUnreadMessageCount.construct(**q)
class UpdateUser(Update):
"""
Some data of a user has changed. This update is guaranteed to come before the user identifier is returned to the application
:param user: New data about the user
:type user: :class:`User`
"""
ID: str = Field("updateUser", alias="@type")
user: User
@staticmethod
def read(q: dict) -> UpdateUser:
return UpdateUser.construct(**q)
class UpdateUserFullInfo(Update):
"""
Some data in userFullInfo has been changed
:param user_id: User identifier
:type user_id: :class:`int`
:param user_full_info: New full information about the user
:type user_full_info: :class:`UserFullInfo`
"""
ID: str = Field("updateUserFullInfo", alias="@type")
user_id: int
user_full_info: UserFullInfo
@staticmethod
def read(q: dict) -> UpdateUserFullInfo:
return UpdateUserFullInfo.construct(**q)
class UpdateUserPrivacySettingRules(Update):
"""
Some privacy setting rules have been changed
:param setting: The privacy setting
:type setting: :class:`UserPrivacySetting`
:param rules: New privacy rules
:type rules: :class:`UserPrivacySettingRules`
"""
ID: str = Field("updateUserPrivacySettingRules", alias="@type")
setting: UserPrivacySetting
rules: UserPrivacySettingRules
@staticmethod
def read(q: dict) -> UpdateUserPrivacySettingRules:
return UpdateUserPrivacySettingRules.construct(**q)
class UpdateUserStatus(Update):
"""
The user went online or offline
:param user_id: User identifier
:type user_id: :class:`int`
:param status: New status of the user
:type status: :class:`UserStatus`
"""
ID: str = Field("updateUserStatus", alias="@type")
user_id: int
status: UserStatus
@staticmethod
def read(q: dict) -> UpdateUserStatus:
return UpdateUserStatus.construct(**q)
class UpdateUsersNearby(Update):
"""
The list of users nearby has changed. The update is guaranteed to be sent only 60 seconds after a successful searchChatsNearby request
:param users_nearby: The new list of users nearby
:type users_nearby: :class:`list[ChatNearby]`
"""
ID: str = Field("updateUsersNearby", alias="@type")
users_nearby: list[ChatNearby]
@staticmethod
def read(q: dict) -> UpdateUsersNearby:
return UpdateUsersNearby.construct(**q)
| 29.203626
| 318
| 0.692711
| 59,611
| 0.948963
| 0
| 0
| 10,520
| 0.167471
| 0
| 0
| 36,401
| 0.579477
|
c0507144735d0e0532afa021b9f51f1bb1e7c543
| 3,908
|
py
|
Python
|
lib/tests/test_integration.py
|
OneIdentity/safeguard-sessions-plugin-cyberark-vault
|
34f8c7a826b6b89c3c9a649b5395798263b4077f
|
[
"MIT"
] | null | null | null |
lib/tests/test_integration.py
|
OneIdentity/safeguard-sessions-plugin-cyberark-vault
|
34f8c7a826b6b89c3c9a649b5395798263b4077f
|
[
"MIT"
] | 3
|
2020-08-07T10:41:44.000Z
|
2021-01-27T08:56:57.000Z
|
lib/tests/test_integration.py
|
OneIdentity/safeguard-sessions-plugin-cyberark-vault
|
34f8c7a826b6b89c3c9a649b5395798263b4077f
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2019 One Identity
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import pytest
from textwrap import dedent
from ..plugin import Plugin
from safeguard.sessions.plugin_impl.test_utils.plugin import assert_plugin_hook_result
def test_cyberark_integration_getting_password(cy_config, cy_account, cy_asset, cy_account_password, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [cy_account_password]})
def test_cyberark_integration_getting_password_for_wrong_user(cy_config, cy_wrong_account, cy_asset, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_wrong_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": []})
def test_cyberark_integration_getting_private_key(cy_config, cy_account_with_key, cy_asset, cy_account_private_key, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_private_key_list(
**connection_parameters(server_uname=cy_account_with_key, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"private_keys": [("ssh-rsa", cy_account_private_key)]})
def test_cyberark_integration_getting_private_key_for_wrong_account(cy_config, cy_wrong_account, cy_asset, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_private_key_list(
**connection_parameters(server_uname=cy_wrong_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"private_keys": []})
def test_v10_user_logon(cy_config, cy_account, cy_asset, cy_account_password, connection_parameters):
config = cy_config + "\nauthentication_method=cyberark"
plugin = Plugin(config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [cy_account_password]})
@pytest.mark.skip(reason="I don't know how this was tested before, cannot see settings on our CArk")
def test_v10_ldap_logon(
cy_address,
cy_ldap_username,
cy_ldap_password,
cy_account,
cy_asset,
cy_account_password,
connection_parameters
):
config = dedent(
"""
[cyberark]
address={}
use_credential=explicit
username={}
password={}
authentication_method=ldap
""".format(
cy_address, cy_ldap_username, cy_ldap_password
)
)
plugin = Plugin(config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [cy_account_password]})
| 36.185185
| 139
| 0.753327
| 0
| 0
| 0
| 0
| 794
| 0.203173
| 0
| 0
| 1,422
| 0.363869
|
c050754add3acb4ba8ba228383257d1e46d1352d
| 2,997
|
py
|
Python
|
forum_modules/akismet/startup.py
|
Stackato-Apps/osqa
|
728bb43ae913e33769c52f40cadb26721faaf2b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2017-07-14T09:58:07.000Z
|
2017-07-14T09:58:07.000Z
|
forum_modules/akismet/startup.py
|
Stackato-Apps/osqa
|
728bb43ae913e33769c52f40cadb26721faaf2b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
forum_modules/akismet/startup.py
|
Stackato-Apps/osqa
|
728bb43ae913e33769c52f40cadb26721faaf2b2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
import json
from django.utils.translation import ugettext as _
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.utils.encoding import smart_str
from django.shortcuts import render_to_response
from forum.modules import decorate
from forum import views
from lib.akismet import Akismet
from forum.settings import APP_URL, OSQA_VERSION
from settings import WORDPRESS_API_KEY, REP_FOR_NO_SPAM_CHECK
from forum.models.user import User
from forum.forms.general import SimpleCaptchaForm
import settings
def can_bypass_spam_check(user):
return user.is_authenticated and (user.is_superuser or user.is_staff or cmp(int(user.reputation), REP_FOR_NO_SPAM_CHECK) > 0)
def check_spam(param, comment_type):
def wrapper(origin, request, *args, **kwargs):
if request.POST and request.POST.get(param, None) and WORDPRESS_API_KEY and (not can_bypass_spam_check(request.user)):
comment = smart_str(request.POST[param])
data = {
"user_ip":request.META["REMOTE_ADDR"],
"user_agent":request.environ['HTTP_USER_AGENT'],
"comment_type": comment_type,
"comment":comment
}
if request.user.is_authenticated():
data.update({
"comment_author":smart_str(request.user.username),
"comment_author_email":request.user.email,
"comment_author_url":request.user.website,
})
api = Akismet(settings.WORDPRESS_API_KEY, APP_URL, "OSQA/%s" % OSQA_VERSION)
if api.comment_check(comment, data):
post_data = request.POST
captcha_form = SimpleCaptchaForm(request.POST)
if request.is_ajax():
response = {
'success': False,
'error_message': _("Sorry, but akismet thinks your %s is spam.") % comment_type
}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
captcha_checked = False
try:
if captcha_form.is_valid() and 'recaptcha' in captcha_form.fields.keys():
captcha_checked = True
except:
pass
if not captcha_checked:
return render_to_response('modules/akismet/foundspam.html', {
'action_name': comment_type,
'post_data' : post_data,
'captcha_form' : captcha_form,
}, RequestContext(request))
return origin(request, *args, **kwargs)
return wrapper
decorate(views.writers.ask)(check_spam('text', _('question')))
decorate(views.writers.answer)(check_spam('text', _('answer')))
decorate(views.commands.comment)(check_spam('comment', _('comment')))
| 38.423077
| 129
| 0.610944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.118785
|
c051e45b01f5963f9aee4c9d0f6e2146b9de7aad
| 7,077
|
py
|
Python
|
prototype/python/element_translator.py
|
doanminhdang/YAML_MATH
|
4a95ae26ccd36de9a2c148f4ac1246f3cf0372f8
|
[
"MIT"
] | 1
|
2019-06-29T16:54:59.000Z
|
2019-06-29T16:54:59.000Z
|
prototype/python/element_translator.py
|
doanminhdang/YAML_MATH
|
4a95ae26ccd36de9a2c148f4ac1246f3cf0372f8
|
[
"MIT"
] | null | null | null |
prototype/python/element_translator.py
|
doanminhdang/YAML_MATH
|
4a95ae26ccd36de9a2c148f4ac1246f3cf0372f8
|
[
"MIT"
] | null | null | null |
"""
Translate an element, which is described by the YAML method file
and a descriptor file, into a target function.
Procedure:
1. When analyzing a YAML file, parse the call to the method-element, to get:
- list of inputs,
- list of outputs
2. Parse the YAML of that element, to know the name of the inputs and outputs,
create inputs and outputs with such names, value are translated-names (string,
given by the name-allocator before translating methods), they will be accessed
in the descriptor of that element.
3. Process the descriptor:
- If preprocess part is available: execute the preprocess part as Python 3 code.
- Treat the code part as text (a string), parse that text to detect:
anywhere there is the structure <var_name>, then replace it with the value
of that variable currently in Python memory (within scope of processing that
specific descriptor). The new text after processing the code part is named code.
- If postprocess part is available: execute the postprocess part as Python 3
code. By requirement, at the end of postprocess part, there will be a variables
named `code`. Write the value of `code` into the output string.
"""
import re
from . import descriptor_parser
from . import utils
from .shared_parameters import *
# def descriptor_file_parse(descriptor_file, method_file):
# descriptor = descriptor_file_read(descriptor_file)
# yaml_method = yaml_method_file_read(method_file)
# preprocess_parse(descriptor_file)
def yaml_single_method_file_read(yaml_method_file):
"""
Read a method file which contains only one block
"""
yaml_block = utils.yaml_file_read(yaml_method_file)
# Analyze its commands
return
def translate_command_element(odict_command, element_file, descriptor_file):
descriptor = descriptor_parser.descriptor_file_read(descriptor_file)
preprocess_string = descriptor['preprocess']
code_string = descriptor['code']
postprocess_string = descriptor['postprocess']
yaml_element = utils.yaml_file_read(element_file)
list_command_keys = [key for key in odict_command.keys()]
first_key = list_command_keys[0]
input_names = odict_command[first_key]
output_name = utils.get_var_name_from_bank(1)
list_element_keys = [key for key in yaml_element.keys()]
element_name = list_element_keys[0]
element_inputs = yaml_element[element_name]['inputs']
element_output = yaml_element[element_name]['outputs']
if not element_name.startswith(first_key): # overloading: add__float for add
raise ValueError('Element does not match command.')
else:
real_inputs = analyze_inputs(input_names, element_inputs)
real_output = analyze_outputs(output_name, element_output)
translated_code = translate_single_code(real_inputs, real_output,\
preprocess_string, code_string, postprocess_string)
return translated_code
def analyze_inputs(input_names, element_inputs):
"""
Get decoded names from the input_names (list) and the template
element_inputs (odict).
The output is a dict, with keys from element_inputs and values are picked
with corresponding order from input_names.
If element_inputs contains both 'name' and 'array_name', then array_name
must be the last item. This function automatically assign the rest of the
input names into an array, if 'array_name' is found in element_inputs.
"""
real_inputs = {}
index_input_names = 0
for item in element_inputs:
# item == OrderedDict([('array_name', 'input_'), ('length', ''), ('type', 'float')])
if 'name' in item:
real_inputs.update({item['name']: input_names[index_input_names]})
index_input_names += 1
elif 'array_name' in item:
names_left = input_names[index_input_names:]
array_length = len(names_left)
real_inputs.update({item['array_name']: names_left})
# for k in range(array_length):
# real_inputs.update({item['array_name'] + '[' + str(k) + ']': names_left[k]})
return real_inputs
def analyze_outputs(output_name, element_output):
output_var = element_output[0]['name']
output_dict = {output_var: output_name[0]}
return output_dict
def parse_code(code_string):
"""
Parse the multi-line string which contains the code, pick variable in <>.
Output: list of segments, each is a dict with key `text` or `var`,
and value is the text or the variable name.
"""
code = []
var_pattern = r'\<[\w\[\]]+\>'
rolling_code = code_string
while re.search(var_pattern, rolling_code):
start_index = re.search(var_pattern, rolling_code).start()
var_group = re.search(var_pattern, rolling_code).group()
var_name = var_group.strip('<>')
if start_index > 0:
text_before = rolling_code[0:start_index]
code.append({'text': text_before})
code.append({'var': var_name})
rolling_code = rolling_code[start_index+len(var_group):]
return code
def translate_single_code(input_dict, output_dict, preprocess_string,\
code_string, postprocess_string):
"""
input_dict == {'input_': ['A_1', 'A_2', 'A_3']}
output_dict == {'output': 'Alpha'}
parsed_code == [{'var': 'output'}, {'text': ' := '}, {'var': 'command_text'}]
"""
_code_series = parse_code(code_string)
print('preprocess:')
print(preprocess_string)
print('code:')
print(code_string)
print('postprocess:')
print(postprocess_string)
for _key in input_dict:
if isinstance(input_dict[_key], list):
# it is an array
_assign_code = _key + '=' + '['
for _item in input_dict[_key]:
_assign_code += '\'' + _item + '\','
_assign_code = _assign_code[:-1]+']' # remove the last comma
else:
_assign_code = _key + '=' + '\'' + input_dict[_key] + '\''
exec(_assign_code)
for _key in output_dict:
_assign_code = _key + '=' + '\'' + output_dict[_key] + '\''
exec(_assign_code)
exec(preprocess_string)
# 1st round: substitute variable names in code string
_1st_processed_code = ''
for _chunk in _code_series:
if 'text' in _chunk:
_1st_processed_code += _chunk['text']
if 'var' in _chunk:
_1st_processed_code += eval(_chunk['var'])
#2nd round: replace variable names left, which might come from preprocess,
# like: input_[0]
_parsed_2nd_code = parse_code(_1st_processed_code)
code = ''
for _chunk in _parsed_2nd_code:
if 'text' in _chunk:
code += _chunk['text']
if 'var' in _chunk:
code += eval(_chunk['var'])
# Preset output code, in case postprocess part is empty
exec(output_code_descriptor + ' = code')
# BUG: if output_code_descriptor is 'code', there is a Python bug that
# variable code is not updated after the next exec
exec(postprocess_string)
final_processed_code = eval(output_code_descriptor)
return final_processed_code
| 39.758427
| 94
| 0.687014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,164
| 0.447082
|
c0549485e176a6b48bb54cda44e0d335364d8ccb
| 16,351
|
py
|
Python
|
build_feature_vectors_32.py
|
weberdc/find_hccs
|
43fcb151901f48765ea8e4ccf0b82dbb726762a3
|
[
"Apache-2.0"
] | 7
|
2020-10-23T20:41:30.000Z
|
2021-11-20T14:00:25.000Z
|
build_feature_vectors_32.py
|
weberdc/find_hccs
|
43fcb151901f48765ea8e4ccf0b82dbb726762a3
|
[
"Apache-2.0"
] | 5
|
2020-11-25T00:29:43.000Z
|
2021-11-01T02:15:29.000Z
|
build_feature_vectors_32.py
|
weberdc/find_hccs
|
43fcb151901f48765ea8e4ccf0b82dbb726762a3
|
[
"Apache-2.0"
] | 2
|
2021-05-31T06:51:08.000Z
|
2022-02-09T13:55:18.000Z
|
#!/usr/bin/env python3
import csv
import gzip
import json
import networkx as nx
import sys
import time
import utils
from argparse import ArgumentParser
from calculate_activity_network import embedded_extended_tweet_url, root_of_conversation
from collections import defaultdict
from datetime import datetime
from utils import eprint, expanded_urls_from, extract_text, flatten, lowered_hashtags_from, mentioned_ids_from#, timestamp_2_epoch_seconds
# Builds feature vectors for HCC members and their groupings as input to the
# classifiers for validation
#
# This version extracts 32 features
#
# Renamed from extract_feature_vectors_for_hcc_classifier.py
class Options():
def __init__(self):
self._init_parser()
def _init_parser(self):
usage = 'extract_feature_vectors_for_hcc_classifier.py -t <tweets.json> -i <ids.csv> -l <label>'
self.parser = ArgumentParser(usage=usage,conflict_handler='resolve')
self.parser.add_argument(
'-t', '--tweets',
required=True,
dest='tweets_file',
help='File containing all the tweets'
)
self.parser.add_argument(
'-i', '--ids-file',
required=True,
dest='ids_file',
help='The list of IDs to build feature vectors for.'
)
self.parser.add_argument(
'-l', '--label',
required=True,
dest='label',
help='The label to apply to each entry in the data generated (first column).'
)
self.parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
dest='verbose',
help='Turn on verbose logging (default: False)'
)
def parse(self, args=None):
return self.parser.parse_args(args)
TWITTER_TS_FORMAT = '%a %b %d %H:%M:%S +0000 %Y' # Tue Apr 26 08:57:55 +0000 2011
def parse_ts(ts_str):
time_struct = time.strptime(ts_str, TWITTER_TS_FORMAT)
return datetime.fromtimestamp(time.mktime(time_struct))
def count(fltr): return len(list(fltr))
def root_of_conversation(tweet_in_conversation, tweet_map):
"""Finds the root of the conversation that the provided tweet is in"""
root_id = tweet_in_conversation
# go until we reply outside of the corpus, or the current tweet isn't a reply
while root_id in tweet_map and 'in_reply_to_status_id_str' in tweet_map[root_id] and tweet_map[root_id]['in_reply_to_status_id_str']:
root_id = tweet_map[root_id]['in_reply_to_status_id_str']
return root_id
def embedded_extended_tweet_url(tweet_id, url):
# extended tweets, because their text field is not long enough for the
# content, they include an embedded url pointing to the full tweet
# Of course, this isn't the sort of URL we're interested in, so we can
# test for it so we can strip it out. This method identifies it.
return url == 'https://twitter.com/i/web/status/%s' % tweet_id
USER_FEATURES = [
'U_tweet_count',
'U_retweet_count',
'U_reply_count',
'U_tweet_rate',
'U_mentioned_ids', # unique IDs
'U_mention_count', # every mention
'U_unique_hts', # unique hashtags
'U_ht_count', # every hashtag
'U_unique_urls', # unique hashtags
'U_url_count', # every hashtag
'U_default_img',
'U_desc_len',
'U_url'
]
DEFAULT_PROF_IMG_URL = 'http://abs.twimg.com/sticky/default_profile_images/default_profile_normal.png'
def build_user_feature_vector(u_id, activity, collection_period_mins):
profile = activity[0]['user']
return {
'U_tweet_count' : len(activity),
'U_retweet_count' : count(filter(lambda t: 'retweeted_status' in t and t['retweeted_status'], activity)),
'U_reply_count' : count(filter(lambda t: t['in_reply_to_status_id_str'], activity)),
'U_tweet_rate' : len(activity) / collection_period_mins,
'U_mentioned_ids' : len(set(flatten(map(mentioned_ids_from, activity)))), # unique IDs
'U_mention_count' : len(list(flatten(map(mentioned_ids_from, activity)))), # every mention
'U_unique_hts' : len(set(flatten(map(lowered_hashtags_from, activity)))), # unique hashtags
'U_ht_count' : len(list(flatten(map(lowered_hashtags_from, activity)))), # every hashtag
'U_unique_urls' : len(set(flatten(map(expanded_urls_from, activity)))), # unique hashtags
'U_url_count' : len(list(flatten(map(expanded_urls_from, activity)))), # every hashtag
'U_default_img' : 1 if profile['profile_image_url'] == DEFAULT_PROF_IMG_URL else 0,
'U_desc_len' : len(profile['description'] if profile['description'] else ''),
'U_url' : len(profile['url'] if profile['url'] else ''),
}
COMMUNITY_FEATURES = [
'C_tweet_count',
'C_node_count',
'C_edge_count',
'C_user_count',
'C_author_count',
'C_hashtag_count',
'C_url_count',
'C_repost_count',
'C_quote_count',
'C_mention_count',
'C_reply_count',
'C_use_ht_count',
'C_use_url_count',
'C_in_conv_count',
'C_in/ext_repost',
'C_in/ext_mention',
'C_in/ext_reply',
]
def build_community_feature_vector(community, g):
def count_nodes_if(cond):
return len([n for n, d in g.nodes(data=True) if cond(n, d)])
def count_edges_if(cond):
return len([k for u, v, k, d in g.edges(data=True,keys=True) if cond(u, v, k, d)]) # d['interaction'] == t]
# return len(['x' for u, v, d in g.edges(data=True) if cond(u, v, d)])
int_users = [n for n, d in g.nodes(data=True) if d['is_author']]
ext_users = [n for n, d in g.nodes(data=True) if d['n_type'] == 'USER' and not d['is_author']]
repost_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPOST')
reply_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPLY')
mention_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'MENTION')
return {
'C_tweet_count' : g.graph['post_count'],
'C_node_count' : len(g),
'C_edge_count' : len(g.edges()),
'C_user_count' : count_nodes_if(lambda n, d: d['n_type'] == 'USER'),
'C_author_count' : count_nodes_if(lambda n, d: d['n_type'] == 'USER' and d['is_author']),
'C_hashtag_count' : count_nodes_if(lambda n, d: d['n_type'] == 'HASHTAG'),
'C_url_count' : count_nodes_if(lambda n, d: d['n_type'] == 'URL'),
'C_repost_count' : repost_count,
'C_quote_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'QUOTE'),
'C_mention_count' : mention_count,
'C_reply_count' : reply_count,
'C_use_ht_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'HASHTAG'),
'C_use_url_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'URL'),
'C_in_conv_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'IN_CONVERSATION'),
'C_in/ext_repost' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPOST' and v in int_users) / repost_count if repost_count else 0,
'C_in/ext_mention' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'MENTION' and v in int_users) / mention_count if mention_count else 0,
'C_in/ext_reply' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPLY' and v in int_users) / reply_count if reply_count else 0
}
def mk_feature_str(keys, feature_map):
return ','.join([str(feature_map[k]) for k in keys])
def build_activity_graph(tweets, t_0): # tweets is a tweet map { tweet_id : tweet }
first_tweet_ts_str = utils.ts_to_str(t_0, fmt=utils.TWITTER_TS_FORMAT) # epoch_seconds_2_timestamp_str(t_0)
first_tweet_ts = utils.epoch_seconds_2_ts(t_0) #first_tweet_ts_str) # parse_twitter_ts(first_tweet_ts_str)
g = nx.MultiDiGraph(post_count=len(tweets))
def add_node(g, n_id, n_type='USER', is_author=False):
if n_id not in g:
g.add_node(n_id, n_type=n_type, label=n_id, is_author=is_author)
elif is_author:
# g.nodes[n_id]['n_type'] = n_type
g.nodes[n_id]['is_author'] = is_author
def node_type_for(interaction):
if interaction == 'HASHTAG' or interaction == 'URL':
return interaction
else:
return 'USER'
def add_edge(g, from_id, to_id, tweet_id, ts_str, int_type, **kwargs):
add_node(g, from_id, 'USER', True)
# g.nodes[from_id]['is_author'] = True
add_node(g, to_id, n_type=node_type_for(int_type))
t = utils.extract_ts_s(ts_str) - t_0 # timestamp_2_epoch_seconds(utils.extract_ts_s(ts_str)) - t_0
attrs = {
'time_t' : t,
'tweet_id' : tweet_id,
'interaction' : int_type
}
key = '%s %s %s in %s' % (from_id, int_type, to_id, tweet_id)
g.add_edge(from_id, to_id, key=key, **{**attrs, **kwargs})
# Build networks
# edge types: REPOST, MENTION, REPLY, QUOTE, URL, HASHTAG
observed_user_ids = set()
for tweet_id in tweets:
tweet = tweets[tweet_id]
hashtags = lowered_hashtags_from(tweet)
urls = expanded_urls_from(tweet)
mentions = mentioned_ids_from(tweet)
tweet_text = extract_text(tweet)
tweet_ts = tweet['created_at']
tweet_id = tweet['id_str']
tweeter_id = tweet['user']['id_str']
observed_user_ids.add(tweeter_id)
for ht in hashtags:
add_edge(g, tweeter_id, ht, tweet_id, tweet_ts, 'HASHTAG')
for url in urls:
if not embedded_extended_tweet_url(tweet_id, url): # extended tweets include a URL to their extended form
add_edge(g, tweeter_id, url, tweet_id, tweet_ts, 'URL')
for mentioned_id in mentions:
observed_user_ids.add(mentioned_id)
add_edge(g, tweeter_id, mentioned_id, tweet_id, tweet_ts, 'MENTION')
if 'retweeted_status' in tweet:
retweeter = tweeter_id
retweetee = tweet['retweeted_status']['user']['id_str']
observed_user_ids.add(retweetee)
add_edge(
g, retweeter, retweetee, tweet_id, tweet_ts, 'REPOST',
original_tweet_id=tweet['retweeted_status']['id_str'],
original_tweet_ts=tweet['retweeted_status']['created_at'],
posting_delay_sec=(
utils.extract_ts_s(tweet['retweeted_status']['created_at']) -
utils.extract_ts_s(tweet_ts)
)#.total_seconds()
)
elif 'quoted_status' in tweet and 'retweeted_status' not in tweet:
quoter = tweeter_id
quotee = tweet['quoted_status']['user']['id_str']
observed_user_ids.add(quotee)
add_edge(
g, quoter, quotee, tweet_id, tweet_ts, 'QUOTE',
original_tweet_id=tweet['quoted_status']['id_str'],
original_tweet_ts=tweet['quoted_status']['created_at'],
posting_delay_sec=(
utils.extract_ts_s(tweet['quoted_status']['created_at']) -
utils.extract_ts_s(tweet_ts)
)#.total_seconds()
)
elif 'in_reply_to_status_id_str' in tweet and tweet['in_reply_to_status_id_str'] in tweets:
# only consider replies that appear in the corpus
# basic reply info
replier = tweeter_id
replied_to = tweet['in_reply_to_user_id_str']
observed_user_ids.add(replied_to)
replied_to_status = tweets[tweet['in_reply_to_status_id_str']]
replied_to_status_ts = replied_to_status['created_at']
posting_delay_sec = (utils.extract_ts_s(replied_to_status_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
add_edge(
g, replier, replied_to, tweet_id, tweet_ts, 'REPLY',
original_tweet_id=tweet['in_reply_to_status_id_str'],
original_tweet_ts=replied_to_status_ts,
posting_delay_sec=posting_delay_sec
)
# in conversation
if tweet['in_reply_to_status_id_str'] in tweets:
# follow the reply chain as far as we can
conversation_root = root_of_conversation(tweet['in_reply_to_status_id_str'], tweets)
# conversation_root MAY NOT be in the corpus - it's still a link though
conv_root_ts = first_tweet_ts_str
posting_delay_sec = (utils.ts_2_epoch_seconds(first_tweet_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
if conversation_root in tweets:
observed_user_ids.add(tweets[conversation_root]['user']['id_str'])
conv_root_ts = tweets[conversation_root]['created_at']
posting_delay_sec = (utils.extract_ts_s(conv_root_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
add_edge(
g, replier, conversation_root, tweet_id, tweet_ts, 'IN_CONVERSATION',
original_tweet_id=conversation_root,
original_tweet_ts=conv_root_ts,
posting_delay_sec=posting_delay_sec
)
return g
DEBUG=False
def log(msg):
if DEBUG: eprint(msg)
if __name__ == '__main__':
options = Options()
opts = options.parse(sys.argv[1:])
DEBUG=opts.verbose
users = {}
communities = defaultdict(lambda: [], {})
with open(opts.ids_file, 'r', encoding='utf-8') as f:
csv_reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in csv_reader:
r = {}
for key in row: # range(len(row)):
r[key] = row[key]
users[r['node_id']] = r
communities[r['community_id']].append(r['node_id'])
# users[r[0]] = r
tweets = dict([(uid, []) for uid in users.keys()])
earliest_ts = sys.maxsize
latest_ts = 0
# with open(opts.tweets_file, 'r', encoding='utf-8') as f:
f = gzip.open(opts.tweets_file, 'rt') if opts.tweets_file[-1] in 'zZ' else open(opts.tweets_file, 'r', encoding='utf-8')
for l in f:
tweet = json.loads(l.strip())
tweet['ts'] = utils.extract_ts_s(tweet['created_at']) # timestamp_2_epoch_seconds(parse_ts(tweet['created_at']))
if tweet['ts'] < earliest_ts: earliest_ts = tweet['ts']
if tweet['ts'] > latest_ts: latest_ts = tweet['ts']
user_id = tweet['user']['id_str']
if user_id in users.keys():
# tweet['ts'] = timestamp_2_epoch_seconds(parse_ts(tweet['created_at']))
tweets[user_id].append(tweet)
f.close()
collection_period_mins = (latest_ts - earliest_ts) / 60
user_feature_vectors = {}
for user_id in tweets:
tweets[user_id].sort(key=lambda t: t['ts'])
user_feature_vectors[user_id] = build_user_feature_vector(user_id, tweets[user_id], collection_period_mins)
community_feature_vectors = {}
for community_id in communities:
community_tweets = {}
community = communities[community_id]
for user_id in community:
for t in tweets[user_id]:
community_tweets[t['id_str']] = t
# community_tweets += tweets[user_id]
# community_tweets.sort(key=lambda t: t['ts'])
# build activity graph from tweets
g = build_activity_graph(community_tweets, earliest_ts)
# build feature vector from activity graph
community_feature_vectors[community_id] = build_community_feature_vector(community, g)
header = ','.join(map(str, ['Label'] + USER_FEATURES + ['U_prop_hcc_degree', 'community_id'] + COMMUNITY_FEATURES))
print(header)
for user_id in tweets:
user_vector = user_feature_vectors[user_id]
hcc_prop_degree = users[user_id]['proportional_degree']
community_id = users[user_id]['community_id']
community_vector = community_feature_vectors[community_id]
print(','.join([
opts.label,
mk_feature_str(USER_FEATURES, user_vector),
hcc_prop_degree,
community_id,
mk_feature_str(COMMUNITY_FEATURES, community_vector)
]))
# print('%s: %s %s' % (user_id, str(user_feature_vectors[user_id]), str()))
| 43.836461
| 153
| 0.633906
| 1,176
| 0.071922
| 0
| 0
| 0
| 0
| 0
| 0
| 5,003
| 0.305975
|