blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e2d27aa8e45e1d0028a1e9ed43e9f10c484a899
|
8d1d1e7677e1a18c00fb295971211d4e29d10896
|
/vocal_synthesis/experiments/16mar_minimalist2.py
|
8a0371d7ae056b62551b05945627504c8a9a062d
|
[] |
no_license
|
christopher-beckham/ift6266h16
|
8296d1529f6ce3e209af371283f816a4c6d63ea9
|
f141fb0a320c20c2c7b43b46f06b1c68cde183f0
|
refs/heads/master
| 2021-01-10T13:38:40.733180
| 2016-04-17T02:22:52
| 2016-04-17T02:22:52
| 49,399,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import numpy as np
from scipy.io import wavfile
import os
import sys
#os.chdir("..")
sys.path.append( os.pardir )
#sys.stderr.write("current working directory: %s\n" % os.getcwd())
import cPickle as pickle
from lasagne.updates import *
import rnn_experiment as experiment
if __name__ == "__main__":
# e.g. 1000_60sec.pkl
in_pkl = sys.argv[1]
out_pkl = sys.argv[2]
with open(in_pkl) as f:
dat = pickle.load(f)
X_train, X_valid, X_test = dat[0]
sys.stderr.write("X_train shape = %s\n" % str(X_train.shape))
sys.stderr.write("X_valid shape = %s\n" % str(X_valid.shape))
sys.stderr.write("X_test shape = %s\n" % str(X_test.shape))
args = dict()
args["seed"] = 0
args["batch_size"] = 16
args["learning_rate"] = 0.01
args["momentum"] = 0.9
args["num_epochs"] = 2000
args["X_train"] = X_train
args["X_valid"] = X_valid
args["X_test"] = X_test
args["update_method"] = rmsprop
args["out_pkl"] = out_pkl
args["config"] = "../configurations/19feb_testing_d_minimalist2.py"
experiment.train(args)
|
[
"chrispy645@gmail.com"
] |
chrispy645@gmail.com
|
a606c217a63ac78b1e6b4834db6bae76d44eb286
|
6d24a0820a2e1227e8caff083a8fef4f6f207c6f
|
/django_test4/manage.py
|
919acccc182da8e574663362c470487af704fde5
|
[] |
no_license
|
pyh3887/Django
|
45d4b3be955634edba924cc18bbc8d3454c7355b
|
a44e1067494391ff4a7473aeaeb63bbeba43b3d8
|
refs/heads/master
| 2022-11-08T08:36:04.750050
| 2020-06-28T14:00:53
| 2020-06-28T14:00:53
| 275,596,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_test4.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"pyh3887@naver.com"
] |
pyh3887@naver.com
|
3ae0f1c95796e57b71290c1ea4593981b8eaa8cc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_105/ch14_2019_08_28_18_56_07_920001.py
|
274be80f2e6a4a400ac2bbcb7a5f2f8276dbfe22
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#funçao para calcular o volume de uma esfera
import math
def calcula_volume_da_espera(raio):
volume = (4 / 3) * math.pi * (raio)**3
return volume
r = 5
x = calcula_volume_da_espera(r)
print(x)
|
[
"you@example.com"
] |
you@example.com
|
23025f36549d15efc64fc8f5fc60643bf064f03f
|
a08225934c425be313a12975c9563a72ded58be6
|
/round668/ansbfsbro.py
|
d92b9b43907213e629b22639d82c6f148de5a3cc
|
[] |
no_license
|
marcus-aurelianus/codeforce
|
27c966554dee9986f23fb2925bd53e6cceb8b9e9
|
4764df151ade7806e32b6c88283a2de946f99e16
|
refs/heads/master
| 2023-03-18T09:30:55.042594
| 2021-03-12T18:14:08
| 2021-03-12T18:14:08
| 231,387,022
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
from collections import deque
import sys
input = sys.stdin.readline
def get_diameter(tree):
u, _, _ = _dfs(0, tree)
v, diam, dist = _dfs(u, tree)
path = [v]
while v != u:
for nxt_v in tree[v]:
if 1 + dist[nxt_v] == dist[v]:
path.append(nxt_v)
v = nxt_v
break
return diam, path
def _dfs(start, tree):
n = len(tree)
dist = [-1] * n
dist[start] = 0
stack = [start]
while stack:
v = stack.pop()
for nxt_v in tree[v]:
if dist[nxt_v] != -1:
continue
dist[nxt_v] = dist[v] + 1
stack.append(nxt_v)
max_d = max(dist)
return dist.index(max_d), max_d, dist
def ab(a, b):
INF = 10 ** 6
visited = [INF] * n
visited[a] = 0
q = deque([a])
while q:
v = q.popleft()
for nxt_v in tree[v]:
if visited[v] + 1 < visited[nxt_v]:
visited[nxt_v] = visited[v] + 1
q.append(nxt_v)
return visited[b]
t = int(input())
for _ in range(t):
n, a, b, da, db = map(int, input().split())
edges = [list(map(int, input().split())) for i in range(n - 1)]
a -= 1
b -= 1
if da * 2 >= db:
print("Alice")
continue
tree = [[] for i in range(n)]
for u, v in edges:
u -= 1
v -= 1
tree[u].append(v)
tree[v].append(u)
distance = ab(a, b)
if distance <= da:
print("Alice")
continue
d, _ = get_diameter(tree)
if d >= da*2+1:
print("Bob")
else:
print("Alice")
|
[
"37787424+marcus-aurelianus@users.noreply.github.com"
] |
37787424+marcus-aurelianus@users.noreply.github.com
|
9eea1f089e5df0837a64cfa3733cfb136dab2f1b
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/syslog/rttoremotesysloggroup.py
|
681035ba11c5042c7e1934d5b99fb4c077af129c
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,647
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToRemoteSyslogGroup(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.syslog.RtToRemoteSyslogGroup", "cobra.model.fv.RemotePolHolder")
meta.moClassName = "syslogRtToRemoteSyslogGroup"
meta.rnFormat = "rtfvToRemoteSyslogGroup-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x800040000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.syslog.Group")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtfvToRemoteSyslogGroup-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 21982, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2085
prop.defaultValueStr = "fvRemotePolHolder"
prop._addConstant("fvRemotePolHolder", None, 2085)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 21981, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
82fe2175ec04d5bd0ac591bc2dca2851cd2e62d4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_088/ch50_2020_09_30_20_44_23_062380.py
|
2fc8c9a8fab5b14828d3b554d8252e61a4429570
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
def junta_nome_sobrenome(nome,sobrenome):
juncao=[]
i=0
j=0
while(i<len(nome) and j<len(sobrenome)):
juncao.append(nome[i],sobrenome[j])
i+=1
j+=1
return juncao
|
[
"you@example.com"
] |
you@example.com
|
94fce4febd1a1ec48c9220916fc4081efd807f64
|
3955c3f367a3a60f8602dcb4609faec9898438bb
|
/test/test_retention_strategy_description.py
|
cac1211511a84328ecf682bb3d91e58adf7feb4c
|
[
"Apache-2.0"
] |
permissive
|
MinhKMA/graylog.py
|
e89c34defa5422d59d0a501355058f5eb2dfe68c
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
refs/heads/master
| 2021-05-06T21:03:06.946509
| 2016-09-23T04:31:13
| 2016-09-23T04:31:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1.1+01d50e5
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import graylog
from graylog.rest import ApiException
from graylog.models.retention_strategy_description import RetentionStrategyDescription
class TestRetentionStrategyDescription(unittest.TestCase):
""" RetentionStrategyDescription unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRetentionStrategyDescription(self):
"""
Test RetentionStrategyDescription
"""
model = graylog.models.retention_strategy_description.RetentionStrategyDescription()
if __name__ == '__main__':
unittest.main()
|
[
"on99@users.noreply.github.com"
] |
on99@users.noreply.github.com
|
e349b153a25f1095319e963ee6aeeb8b7ac6f557
|
8371e55cd5dc529611fd705c62476728e7b7e529
|
/test/test_metrics.py
|
ed265becede1a85c12bbe8dad61fe8ce4522789f
|
[
"Apache-2.0"
] |
permissive
|
timgates42/kornia
|
3436515991f8799027766beef767c05ba3796cb0
|
03ab49feb075149c0df65d47cdb91d563b8980e2
|
refs/heads/master
| 2023-03-20T21:22:22.836296
| 2022-07-03T08:56:33
| 2022-07-03T08:56:33
| 236,164,451
| 0
| 0
| null | 2020-01-25T12:01:24
| 2020-01-25T12:01:23
| null |
UTF-8
|
Python
| false
| false
| 10,430
|
py
|
import pytest
import torch
import kornia
from kornia.testing import assert_close
class TestMeanIoU:
def test_two_classes_perfect(self, device, dtype):
batch_size = 1
num_classes = 2
actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long)
predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou_real = torch.tensor([[1.0, 1.0]], device=device, dtype=torch.float32)
assert mean_iou.shape == (batch_size, num_classes)
assert_close(mean_iou, mean_iou_real)
def test_two_classes_perfect_batch2(self, device, dtype):
batch_size = 2
num_classes = 2
actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long).repeat(batch_size, 1)
predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long).repeat(batch_size, 1)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou_real = torch.tensor([[1.0, 1.0], [1.0, 1.0]], device=device, dtype=torch.float32)
assert mean_iou.shape == (batch_size, num_classes)
assert_close(mean_iou, mean_iou_real)
def test_two_classes(self, device, dtype):
batch_size = 1
num_classes = 2
actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long)
predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 1]], device=device, dtype=torch.long)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou_real = torch.tensor([[0.75, 0.80]], device=device, dtype=torch.float32)
assert mean_iou.shape == (batch_size, num_classes)
assert_close(mean_iou, mean_iou_real)
def test_four_classes_2d_perfect(self, device, dtype):
batch_size = 1
num_classes = 4
actual = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou_real = torch.tensor([[1.0, 1.0, 1.0, 1.0]], device=device, dtype=torch.float32)
assert mean_iou.shape == (batch_size, num_classes)
assert_close(mean_iou, mean_iou_real)
def test_four_classes_one_missing(self, device, dtype):
batch_size = 1
num_classes = 4
actual = torch.tensor(
[[[0, 0, 0, 0], [0, 0, 0, 0], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[3, 3, 2, 2], [3, 3, 2, 2], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
mean_iou = kornia.metrics.mean_iou(predicted, actual, num_classes)
mean_iou_real = torch.tensor([[0.0, 1.0, 0.5, 0.5]], device=device, dtype=torch.float32)
assert mean_iou.shape == (batch_size, num_classes)
assert_close(mean_iou, mean_iou_real)
class TestConfusionMatrix:
def test_two_classes(self, device, dtype):
num_classes = 2
actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long)
predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 1]], device=device, dtype=torch.long)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor([[[3, 1], [0, 4]]], device=device, dtype=torch.float32)
assert_close(conf_mat, conf_mat_real)
def test_two_classes_batch2(self, device, dtype):
batch_size = 2
num_classes = 2
actual = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 0]], device=device, dtype=torch.long).repeat(batch_size, 1)
predicted = torch.tensor([[1, 1, 1, 1, 0, 0, 0, 1]], device=device, dtype=torch.long).repeat(batch_size, 1)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor([[[3, 1], [0, 4]], [[3, 1], [0, 4]]], device=device, dtype=torch.float32)
assert_close(conf_mat, conf_mat_real)
def test_three_classes(self, device, dtype):
num_classes = 3
actual = torch.tensor([[2, 2, 0, 0, 1, 0, 0, 2, 1, 1, 0, 0, 1, 2, 1, 0]], device=device, dtype=torch.long)
predicted = torch.tensor([[2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 2, 1, 0, 0, 2, 2]], device=device, dtype=torch.long)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor([[[4, 1, 2], [3, 0, 2], [1, 2, 1]]], device=device, dtype=torch.float32)
assert_close(conf_mat, conf_mat_real)
def test_four_classes_one_missing(self, device, dtype):
num_classes = 4
actual = torch.tensor([[3, 3, 1, 1, 2, 1, 1, 3, 2, 2, 1, 1, 2, 3, 2, 1]], device=device, dtype=torch.long)
predicted = torch.tensor([[3, 2, 1, 1, 1, 1, 1, 2, 1, 3, 3, 2, 1, 1, 3, 3]], device=device, dtype=torch.long)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor(
[[[0, 0, 0, 0], [0, 4, 1, 2], [0, 3, 0, 2], [0, 1, 2, 1]]], device=device, dtype=torch.float32
)
assert_close(conf_mat, conf_mat_real)
def test_three_classes_normalized(self, device, dtype):
num_classes = 3
normalized = True
actual = torch.tensor([[2, 2, 0, 0, 1, 0, 0, 2, 1, 1, 0, 0, 1, 2, 1, 0]], device=device, dtype=torch.long)
predicted = torch.tensor([[2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 2, 1, 0, 0, 2, 2]], device=device, dtype=torch.long)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes, normalized)
conf_mat_real = torch.tensor(
[[[0.5000, 0.3333, 0.4000], [0.3750, 0.0000, 0.4000], [0.1250, 0.6667, 0.2000]]],
device=device,
dtype=torch.float32,
)
assert_close(conf_mat, conf_mat_real)
def test_four_classes_2d_perfect(self, device, dtype):
num_classes = 4
actual = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor(
[[[4, 0, 0, 0], [0, 4, 0, 0], [0, 0, 4, 0], [0, 0, 0, 4]]], device=device, dtype=torch.float32
)
assert_close(conf_mat, conf_mat_real)
def test_four_classes_2d_one_class_nonperfect(self, device, dtype):
num_classes = 4
actual = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[0, 0, 1, 1], [0, 3, 0, 1], [2, 2, 1, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor(
[[[3, 0, 0, 1], [1, 3, 0, 0], [0, 0, 4, 0], [0, 1, 0, 3]]], device=device, dtype=torch.float32
)
assert_close(conf_mat, conf_mat_real)
def test_four_classes_2d_one_class_missing(self, device, dtype):
num_classes = 4
actual = torch.tensor(
[[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[3, 3, 1, 1], [3, 3, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor(
[[[0, 0, 0, 4], [0, 4, 0, 0], [0, 0, 4, 0], [0, 0, 0, 4]]], device=device, dtype=torch.float32
)
assert_close(conf_mat, conf_mat_real)
def test_four_classes_2d_one_class_no_predicted(self, device, dtype):
num_classes = 4
actual = torch.tensor(
[[[0, 0, 0, 0], [0, 0, 0, 0], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
predicted = torch.tensor(
[[[3, 3, 2, 2], [3, 3, 2, 2], [2, 2, 3, 3], [2, 2, 3, 3]]], device=device, dtype=torch.long
)
conf_mat = kornia.metrics.confusion_matrix(predicted, actual, num_classes)
conf_mat_real = torch.tensor(
[[[0, 0, 4, 4], [0, 0, 0, 0], [0, 0, 4, 0], [0, 0, 0, 4]]], device=device, dtype=torch.float32
)
assert_close(conf_mat, conf_mat_real)
class TestPsnr:
def test_metric(self, device, dtype):
sample = torch.ones(1, device=device, dtype=dtype)
expected = torch.tensor(20.0, device=device, dtype=dtype)
actual = kornia.metrics.psnr(sample, 1.2 * sample, 2.0)
assert_close(actual, expected)
class TestMeanAveragePrecision:
def test_smoke(self, device, dtype):
boxes = torch.tensor([[100, 50, 150, 100.]], device=device, dtype=dtype)
labels = torch.tensor([1], device=device, dtype=torch.long)
scores = torch.tensor([.7], device=device, dtype=dtype)
gt_boxes = torch.tensor([[100, 50, 150, 100.]], device=device, dtype=dtype)
gt_labels = torch.tensor([1], device=device, dtype=torch.long)
mean_ap = kornia.metrics.mean_average_precision(
[boxes], [labels], [scores], [gt_boxes], [gt_labels], 2)
assert_close(mean_ap[0], torch.tensor(1., device=device, dtype=dtype))
assert_close(mean_ap[1][1], 1.0)
def test_raise(self, device, dtype):
boxes = torch.tensor([[100, 50, 150, 100.]], device=device, dtype=dtype)
labels = torch.tensor([1], device=device, dtype=torch.long)
scores = torch.tensor([.7], device=device, dtype=dtype)
gt_boxes = torch.tensor([[100, 50, 150, 100.]], device=device, dtype=dtype)
gt_labels = torch.tensor([1], device=device, dtype=torch.long)
with pytest.raises(AssertionError):
_ = kornia.metrics.mean_average_precision(
boxes[0], [labels], [scores], [gt_boxes], [gt_labels], 2)
|
[
"noreply@github.com"
] |
timgates42.noreply@github.com
|
7c33a275e78c048129d92635ec3b86a215c3990b
|
d3fb61f506eb44deb55a5b4a99e1d2ae2d16fe0a
|
/code/models_old/just_graph_multi_video_multi_F_joint_train.py
|
59479e24849bf7f87bef09a8c4d649282353e05d
|
[] |
no_license
|
menorashid/nn_net
|
3a719aaa3f608a65dfde3c57db20a3066d7ea453
|
11df1365b7f0375a251b00f807188f4be6b8f025
|
refs/heads/master
| 2021-07-06T08:08:25.879757
| 2020-07-28T22:01:34
| 2020-07-28T22:01:34
| 143,941,173
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,285
|
py
|
from torchvision import models
import torch.nn as nn
import torch
import torch.nn.functional as F
from graph_layer_flexible_temp import Graph_Layer
from graph_layer_flexible_temp import Graph_Layer_Wrapper
from normalize import Normalize
import numpy as np
class Graph_Multi_Video(nn.Module):
def __init__(self,
n_classes,
deno,
in_out = None,
feat_dim = None,
graph_size = None,
method = 'cos',
sparsify = False,
non_lin = 'HT',
normalize = [True,True]
):
super(Graph_Multi_Video, self).__init__()
self.num_classes = n_classes
self.deno = deno
self.graph_size = graph_size
self.sparsify = sparsify
if in_out is None:
in_out = [2048,64]
if feat_dim is None:
feat_dim = [2048,64]
num_layers = len(in_out)-1
print 'NUM LAYERS', num_layers, in_out
self.linear_layers = nn.ModuleList()
self.linear_layers_after = nn.ModuleList()
for idx_layer_num,layer_num in enumerate(range(num_layers)):
if non_lin =='HT':
non_lin_curr = nn.Hardtanh()
elif non_lin =='RL':
non_lin_curr = nn.ReLU()
else:
error_message = str('Non lin %s not valid', non_lin)
raise ValueError(error_message)
idx_curr = idx_layer_num*2
self.linear_layers.append(nn.Linear(feat_dim[idx_curr], feat_dim[idx_curr+1], bias = False))
last_linear = []
last_linear.append(non_lin_curr)
if normalize[0]:
last_linear.append(Normalize())
last_linear.append(nn.Dropout(0.5))
last_linear.append(nn.Linear(feat_dim[idx_curr+1],n_classes))
last_linear = nn.Sequential(*last_linear)
self.linear_layers_after.append(last_linear)
self.graph_layers = nn.ModuleList()
for num_layer in range(num_layers):
self.graph_layers.append(Graph_Layer_Wrapper(in_out[num_layer],n_out = in_out[num_layer+1], non_lin = non_lin, method = method))
# last_graph = []
# if non_lin =='HT':
# last_graph.append(nn.Hardtanh())
# elif non_lin =='RL':
# last_graph.append(nn.ReLU())
# else:
# error_message = str('Non lin %s not valid', non_lin)
# raise ValueError(error_message)
# if normalize[1]:
# last_graph.append(Normalize())
# last_graph.append(nn.Dropout(0.5))
# last_graph.append(nn.Linear(in_out[-1],n_classes))
# last_graph = nn.Sequential(*last_graph)
# self.last_graph = last_graph
self.num_branches = num_layers+1
# if type(num_switch)==type(1):
# num_switch = [num_switch]*self.num_branches
# self.num_switch = num_switch
# self.epoch_counters = [0]* self.num_branches
# self.focus = focus
# self.epoch_last = 0
print 'self.num_branches', self.num_branches
# print 'self.num_switch', self.num_switch
# print 'self.epoch_counters', self.epoch_counters
# print 'self.focus', self.focus
# print 'self.epoch_last', self.epoch_last
def get_to_keep(self,input_sizes):
k_all = [max(1,size_curr//self.deno) for idx_size_curr,size_curr in enumerate(input_sizes)]
k_all = int(np.mean(k_all))
return k_all
def forward(self, input, epoch_num = None, ret_bg =False, branch_to_test = -1):
strip = False
if type(input)!=type([]):
input = [input]
strip = True
if self.graph_size is None:
graph_size = len(input)
elif self.graph_size=='rand':
import random
graph_size = random.randint(1,len(input))
else:
graph_size = min(self.graph_size, len(input))
input_chunks = [input[i:i + graph_size] for i in xrange(0, len(input), graph_size)]
is_cuda = next(self.parameters()).is_cuda
# print 'Graph branch'
pmf_all = [[] for i in range(self.num_branches)]
x_all_all = [[] for i in range(self.num_branches)]
for input in input_chunks:
input_sizes = [input_curr.size(0) for input_curr in input]
input = torch.cat(input,0)
if self.sparsify:
to_keep = (self.get_to_keep(input_sizes),input_sizes)
else:
to_keep = None
if is_cuda:
input = input.cuda()
assert len(self.graph_layers)==(self.num_branches-1)
input_graph = input
for col_num in range(len(self.graph_layers)):
graph_layer = self.graph_layers[col_num]
linear_layer = self.linear_layers[col_num]
linear_layer_after = self.linear_layers_after[col_num]
feature_out = self.linear_layers[col_num](input_graph)
input_graph = self.graph_layers[col_num](input_graph, feature_out, to_keep = to_keep)
out_col = self.linear_layers_after[col_num](feature_out)
x_all_all[col_num].append(out_col)
x_all_all[len(self.graph_layers)].append(input_graph)
for branch_num in range(len(x_all_all)):
x = x_all_all[branch_num][-1]
for idx_sample in range(len(input_sizes)):
if idx_sample==0:
start = 0
else:
start = sum(input_sizes[:idx_sample])
end = start+input_sizes[idx_sample]
x_curr = x[start:end,:]
pmf_all[branch_num] += [self.make_pmf(x_curr).unsqueeze(0)]
if strip:
for idx_pmf, pmf in enumerate(pmf_all):
assert len(pmf)==1
pmf_all[idx_pmf] = pmf[0].squeeze()
for idx_x, x in enumerate(x_all_all):
x_all_all[idx_x] = torch.cat(x,dim=0)
if branch_to_test>-1:
x_all_all = x_all_all[branch_to_test]
pmf_all = pmf_all[branch_to_test]
if ret_bg:
return x_all_all, pmf_all, None
else:
return x_all_all, pmf_all
def make_pmf(self,x):
k = max(1,x.size(0)//self.deno)
pmf,_ = torch.sort(x, dim=0, descending=True)
pmf = pmf[:k,:]
pmf = torch.sum(pmf[:k,:], dim = 0)/k
return pmf
# def get_similarity(self,input,idx_graph_layer = 0):
# is_cuda = next(self.parameters()).is_cuda
# input_sizes = [input_curr.size(0) for input_curr in input]
# input = torch.cat(input,0)
# if self.sparsify:
# to_keep = (self.get_to_keep(input_sizes),input_sizes)
# else:
# to_keep = None
# print to_keep
# if is_cuda:
# input = input.cuda()
# feature_out = self.linear_layers[idx_graph_layer][0](input)
# sim_mat = self.graph_layers[idx_graph_layer].get_affinity(feature_out,to_keep = to_keep)
# return sim_mat
def printGraphGrad(self):
grad_rel = self.graph_layers[0].graph_layer.weight.grad
print torch.min(grad_rel).data.cpu().numpy(), torch.max(grad_rel).data.cpu().numpy()
class Network:
def __init__(self,
n_classes,
deno,
in_out = None,
feat_dim = None,
graph_size = None,
method = 'cos',
sparsify = False,
non_lin = 'HT',
normalize = [True,True]
):
self.model = Graph_Multi_Video(n_classes, deno, in_out,feat_dim, graph_size, method, sparsify, non_lin, normalize)
def get_lr_list(self, lr):
lr_list = []
lr_list+= [{'params': [p for p in self.model.linear_layers.parameters() if p.requires_grad], 'lr': lr[0]}]
lr_list+= [{'params': [p for p in self.model.linear_layers_after.parameters() if p.requires_grad], 'lr': lr[0]}]
lr_list+= [{'params': [p for p in self.model.graph_layers.parameters() if p.requires_grad], 'lr': lr[1]}]
# lr_list+= [{'params': [p for p in self.model.last_linear.parameters() if p.requires_grad], 'lr': lr[2]}]
# lr_list+= [{'params': [p for p in self.model.last_graph.parameters() if p.requires_grad], 'lr': lr[1]}]
return lr_list
def main():
import numpy as np
import torch
from torch.autograd import Variable
net = Network(n_classes= 20, deno = 8)
print net.model
net.model = net.model.cuda()
input = np.zeros((16,2048))
input = torch.Tensor(input).cuda()
input = Variable(input)
output,pmf = net.model(input)
# print output.shape
print output.data.shape
if __name__=='__main__':
main()
|
[
"mhnrashid@gmail.com"
] |
mhnrashid@gmail.com
|
2c0bf2128c76702e7d8830f8120c27fde6389526
|
2d064dfb4244f5b915c3b02de9594f8e15beb6d0
|
/Other Projects/RamanShiftNiller.py
|
e61d0127378fbfb4a711b9c2b17186a3aaa9a182
|
[] |
no_license
|
ArunShishodia/Smaller-Programs
|
24937ff571a27b5e0c634bf235fb175794986086
|
318ee830188c526921083a5f9aabd41504e055a2
|
refs/heads/master
| 2023-08-24T11:28:54.234711
| 2021-10-22T17:29:03
| 2021-10-22T17:29:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
def print_to_file():
in1,in2 = 0, 0
while in1 != "stop":
in1 = float(input("Enter the right hand side mode: "))
in2 = float(input("Enter the left hand side mode: "))
print("The result is ", (in1 - in2)/2)
in_str = input("Do you want to print to file? y/n ")
if in_str == "y":
with open("Raman_correction.txt", "w+") as f:
angle = input("Please input the angle: ")
f.write("\n" + angle + ": " + str((in1 - in2)/2) + "\n")
if __name__ == "__main__":
print_to_file()
|
[
"martenscheuck@gmail.com"
] |
martenscheuck@gmail.com
|
5a84b74dd69aa586f5d914c72439c64a06582326
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scikit-learn/py2/sklearn/externals/joblib/pool.py
|
e0682c1822314d799c3e14e7fbb7d6735c7ebd9e
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 25,033
|
py
|
"""Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = None
from pickle import whichmodule
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return np.memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = np.memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._prewarm = prewarm
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
basename = "%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), hash(a))
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# The worker process will use joblib.load to memmap the data
return (load, (filename, self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing."""
try:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
except WindowsError:
warnings.warn("Failed to clean temporary folder: %s" % folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmaping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
use_shared_mem = False
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
delete_folder(pool_folder)
atexit.register(_cleanup)
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmapingPool, self).terminate()
break
except WindowsError as e:
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in "
" multiprocessing pool: %r" % e)
delete_folder(self._temp_folder)
|
[
"evgueni.s.petrov@gmail.com"
] |
evgueni.s.petrov@gmail.com
|
5f24281f18439e98042de1cbc4bfec9f0b491c3d
|
d7aea8c5589c4d752e075244aab92d245ad98b4e
|
/densenetocr/core.py
|
78633d70b458b00a6e5517337dfd7df40377835c
|
[] |
no_license
|
fendaq/text-detection-1
|
9da5bc131c89f465f4dc64924bff4765253b6bdd
|
fedbfa1ad02da6b98cb154eeaba32692d40ec672
|
refs/heads/master
| 2020-04-05T17:47:20.221900
| 2018-11-11T04:54:26
| 2018-11-11T04:54:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,750
|
py
|
import json
import keras.backend as K
import numpy as np
from PIL import Image
from keras import Input, Model
from keras.layers import Conv2D, BatchNormalization, Activation, Dropout, AveragePooling2D, ZeroPadding2D, Permute, \
TimeDistributed, Flatten, Dense, Lambda
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils.multi_gpu_utils import multi_gpu_model
from densenetocr.data_loader import DataLoader
def _dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=0.2, weight_decay=1e-4):
for i in range(nb_layers):
cb = _conv_block(x, growth_rate, dropout_rate, weight_decay)
x = concatenate([x, cb])
nb_filter += growth_rate
return x, nb_filter
def _conv_block(input, growth_rate, dropout_rate=None, weight_decay=1e-4):
x = BatchNormalization(epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(growth_rate, (3, 3), kernel_initializer='he_normal', padding='same')(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def _transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
if pooltype == 2:
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif pooltype == 1:
x = ZeroPadding2D(padding=(0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif pooltype == 3:
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter
def _ctc_loss(args):
labels, y_pred, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
class DenseNetOCR:
def __init__(self,
num_classes,
lr=0.0005,
image_height=32,
image_channels=1,
maxlen=50,
dropout_rate=0.2,
weight_decay=1e-4,
filters=64,
weight_path=None,
num_gpu=1):
self.image_shape = (image_height, None, image_channels)
self.lr = lr
self.image_height, self.image_channels = image_height, image_channels
self.maxlen = maxlen
self.dropout_rate = dropout_rate
self.weight_decay = weight_decay
self.filters = filters
self.num_classes = num_classes
self.num_gpu = num_gpu
self.base_model, self.model, self.parallel_model = self.__build_model()
if weight_path is not None:
self.base_model.load_weights(weight_path)
def config(self):
return {
"lr": self.lr,
"num_classes": self.num_classes,
"image_height": self.image_height,
"image_channels": self.image_channels,
"maxlen": self.maxlen,
"dropout_rate": self.dropout_rate,
"weight_decay": self.weight_decay,
"filters": self.filters
}
def __build_model(self):
input = Input(shape=self.image_shape, name="the_input")
nb_filter = self.filters
x = Conv2D(nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
use_bias=False, kernel_regularizer=l2(self.weight_decay))(input)
# 64 + 8 * 8 = 128
x, nb_filter = _dense_block(x, 8, nb_filter, 8, None, self.weight_decay)
# 128
x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2, self.weight_decay)
# 128 + 8 * 8 = 192
x, nb_filter = _dense_block(x, 8, nb_filter, 8, None, self.weight_decay)
# 192->128
x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2, self.weight_decay)
# 128 + 8 * 8 = 192
x, nb_filter = _dense_block(x, 8, nb_filter, 8, None, self.weight_decay)
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = Permute((2, 1, 3), name='permute')(x)
x = TimeDistributed(Flatten(), name='flatten')(x)
y_pred = Dense(self.num_classes, name='out', activation='softmax')(x)
base_model = Model(inputs=input, outputs=y_pred)
labels = Input(shape=(self.maxlen,), dtype='float32', name="the_labels")
input_length = Input(shape=(1,), name="input_length", dtype='int64')
label_length = Input(shape=(1,), name="label_length", dtype='int64')
loss_out = Lambda(_ctc_loss, output_shape=(1,), name='ctc')([labels, y_pred, input_length, label_length])
model = Model(inputs=[input, labels, input_length, label_length], outputs=loss_out)
parallel_model = model
if self.num_gpu > 1:
parallel_model = multi_gpu_model(model, gpus=self.num_gpu)
adam = Adam(self.lr)
parallel_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam, metrics=['accuracy'])
return base_model, model, parallel_model
def train(self, epochs, train_data_loader: DataLoader, valid_data_loader: DataLoader, **kwargs):
self.parallel_model.fit_generator(generator=train_data_loader.load_data(), epochs=epochs,
steps_per_epoch=train_data_loader.steps_per_epoch,
validation_data=valid_data_loader.load_data(),
validation_steps=valid_data_loader.steps_per_epoch,
**kwargs)
def predict(self, image, id_to_char):
if type(image) == str:
img = Image.open(image)
else:
img = image
im = img.convert('L')
scale = im.size[1] * 1.0 / 32
w = im.size[0] / scale
w = int(w)
im = im.resize((w, 32), Image.ANTIALIAS)
img = np.array(im).astype(np.float32) / 255.0 - 0.5
X = img.reshape((32, w, 1))
X = np.array([X])
y_pred = self.base_model.predict(X)
argmax = np.argmax(y_pred, axis=2)[0]
y_pred = y_pred[:, :, :]
out = K.get_value(K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0]) * y_pred.shape[1], )[0][0])[:, :]
out = u''.join([id_to_char[x] for x in out[0]])
return out, im
@staticmethod
def save_config(obj, config_path: str):
with open(config_path, 'w+') as outfile:
json.dump(obj.config(), outfile)
@staticmethod
def load_config(config_path: str):
with open(config_path, 'r') as infile:
return dict(json.load(infile))
|
[
"1490215053@qq.com"
] |
1490215053@qq.com
|
10e11028f40f1ce8e532bb26f81c50593d8258d4
|
9da052515ce465cf296278f96c492d9ead1ccb13
|
/chap09/mypack/mysub/lib.py
|
a3f12c3e5acdee4ab64b6699079d6a7edab03f9b
|
[] |
no_license
|
usako1124/teach-yourself-python
|
ea5cf1491d8609be7436bab56798040314a3ccf8
|
e53b1a27117d03804d136dbb5ace37efc9bf6a10
|
refs/heads/main
| 2023-06-15T22:47:22.873291
| 2021-06-30T13:34:08
| 2021-06-30T13:34:08
| 354,186,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
from .. import hoge
def main():
hoge.func()
|
[
"usako1124.aibo@gmail.com"
] |
usako1124.aibo@gmail.com
|
1111f324e3844b239c55a24138a23a8c48c92e47
|
564fe9c8409d9ff4ba5f88dd36c0743d417767fa
|
/test/test_alert_team_meta.py
|
3ae2e7a913c19ecd20b06e143be8e283b5169389
|
[
"Apache-2.0"
] |
permissive
|
criteo-forks/opsgenie-python-sdk
|
28cf4b2e5eb5f10df582cfd6393a0e952dee5102
|
2a3924a0bd779eab47937925eb5d42ffbbd751d4
|
refs/heads/master
| 2020-04-05T23:09:41.002143
| 2019-04-12T13:37:22
| 2019-04-12T13:37:22
| 65,009,459
| 0
| 2
| null | 2016-08-05T10:08:55
| 2016-08-05T10:08:55
| null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opsgenie_swagger
from opsgenie_swagger.models.alert_team_meta import AlertTeamMeta # noqa: E501
from opsgenie_swagger.rest import ApiException
class TestAlertTeamMeta(unittest.TestCase):
"""AlertTeamMeta unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertTeamMeta(self):
"""Test AlertTeamMeta"""
# FIXME: construct object with mandatory attributes with example values
# model = opsgenie_swagger.models.alert_team_meta.AlertTeamMeta() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"c.chary@criteo.com"
] |
c.chary@criteo.com
|
2575781d0b1a0577879a12cb6c9d39c26bb61fa6
|
00689951be97b3e9e3a036aca64efaa1ee59134a
|
/aula020 - FUNÇÕES/aula020.py
|
6fb54eff457ee36900903b61a3a97b0b0b5a7171
|
[
"MIT"
] |
permissive
|
miradouro/CursoEmVideo-Python
|
4826cf387cc9424e675f2b115842a643f2d67c8d
|
cc7b05a9a4aad8e6ef3b29453d83370094d75e41
|
refs/heads/main
| 2023-03-24T08:51:34.183169
| 2021-03-20T22:15:02
| 2021-03-20T22:15:02
| 349,843,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
def mensagens(msg):
print('-'*30)
print(f'{msg:^30}')
print('-'*30)
def soma(a, b):
print(a + b)
def contador(*num):
somatoria = 0
for valor in num:
somatoria += valor
print(somatoria)
mensagens('Olá, Mundo!')
soma(4, 5)
contador(2, 3, 4, 1)
|
[
"rafaelmiradouro@gmail.com"
] |
rafaelmiradouro@gmail.com
|
61fb1be17f956ac91e76b9c3f90e22510d726b03
|
f63c4eb29ce57319441f5469d1d049b63bc220de
|
/swu_cycle_variance/run408.py
|
f7ee14169edb5f464ff68b4b2091bfc6fa2c2f51
|
[] |
no_license
|
a-co/diversion_models
|
0237642153668b16035699e9e734ff0538568582
|
69eed2687b1cd2b48f5717d15919eccd24a0eabc
|
refs/heads/main
| 2023-05-02T19:04:26.333677
| 2020-06-18T20:50:18
| 2020-06-18T20:50:18
| 216,904,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,241
|
py
|
SIMULATION = {'simulation': {'agent': [{'name': 'deployer_civilian', 'prototype': 'civilian_deployer'}, {'name': 'deployer_shared', 'prototype': 'shared_deployer'}], 'archetypes': {'spec': [{'lib': 'cycamore', 'name': 'DeployInst'}, {'lib': 'cycamore', 'name': 'Source'}, {'lib': 'cycamore', 'name': 'Sink'}, {'lib': 'cycamore', 'name': 'Storage'}, {'lib': 'cycamore', 'name': 'Reactor'}, {'lib': 'cycamore', 'name': 'Separations'}, {'lib': 'cycamore', 'name': 'Enrichment'}]}, 'control': {'duration': '144', 'explicit_inventory': 'true', 'startmonth': '1', 'startyear': '2020'}, 'prototype': [{'config': {'Source': {'inventory_size': '1e30', 'outcommod': 'u_ore', 'outrecipe': 'r_u_ore', 'throughput': '1e10'}}, 'name': 'mine'}, {'config': {'Separations': {'feed_commod_prefs': {'val': ['1.0', '10.0', '100.0']}, 'feed_commods': {'val': ['u_ore', 'u_ore1', 'u_ore2']}, 'feedbuf_size': '2e8', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'u_nat', 'info': {'buf_size': '150000', 'efficiencies': {'item': [{'comp': 'U', 'eff': '.99'}, {'comp': 'O', 'eff': '.99'}]}}}}, 'throughput': '2e8'}}, 'name': 'milling'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'u_nat'}, 'feedbuf_size': '200000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'uf6', 'info': {'buf_size': '200000', 'efficiencies': {'item': {'comp': 'U', 'eff': '.99'}}}}}, 'throughput': '200000'}}, 'name': 'conversion'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': ['1', '20']}, 'feed_commods': {'val': ['uf6', 'mil_uf6']}, 'feed_recipe': 'r_natl_u', 'max_feed_inventory': '100000', 'product_commod': 'civ_leu', 'swu_capacity': '305503.9246233005', 'tails_assay': '0.003', 'tails_commod': 'u_dep'}}, 'name': 'civ_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'u_dep'}, 'out_commods': {'val': 'u_dep_str'}, 'residence_time': '0'}}, 'name': 'civ_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1000'}, 'in_commods': {'val': 'civ_leu'}, 'in_recipe': 'r_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'uox'}, 'residence_time': '1'}}, 'name': 'civ_fabrication'}, {'config': {'Reactor': {'assem_size': '29565', 'cycle_time': '-9', 'fuel_incommods': {'val': 'uox'}, 'fuel_inrecipes': {'val': 'r_uox'}, 'fuel_outcommods': {'val': 'uox_spent'}, 'fuel_outrecipes': {'val': 'r_uox_spent'}, 'n_assem_batch': '1', 'n_assem_core': '3', 'power_cap': '900', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'civ_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'uox_spent'}, 'out_commods': {'val': 'uox_spent_str'}, 'residence_time': '60'}}, 'name': 'civ_str_uox_spent'}, {'config': {'DeployInst': {'build_times': {'val': ['121', '121', '121', '145', '157', '169']}, 'n_build': {'val': ['1', '1', '1', '1', '1', '1']}, 'prototypes': {'val': ['civ_enrichment', 'civ_str_u_dep', 'civ_fabrication', 'civ_lwr', 'civ_str_uox_spent', 'civ_lwr']}}}, 'name': 'civilian_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['1', '1', '1']}, 'n_build': {'val': ['1', '1', '1']}, 'prototypes': {'val': ['mine', 'milling', 'conversion']}}}, 'name': 'shared_deployer'}], 'recipe': [{'basis': 'mass', 'name': 'r_u_ore', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}, {'comp': '999', 'id': '120240000'}]}, {'basis': 'mass', 'name': 'r_natl_u', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox', 'nuclide': [{'comp': '0.05', 'id': '922350000'}, {'comp': '0.95', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox_spent', 'nuclide': [{'comp': '0.01', 'id': '922350000'}, {'comp': '0.94', 'id': '922380000'}, {'comp': '0.01', 'id': '942390000'}, {'comp': '0.001', 'id': '952410000'}, {'comp': '0.03', 'id': '551350000'}]}, {'basis': 'mass', 'name': 'r_mil_uox', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_mil_uox_spent', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9919', 'id': '922380000'}, {'comp': '0.001', 'id': '942390000'}]}, {'basis': 'mass', 'name': 'r_mil_heu', 'nuclide': [{'comp': '0.90', 'id': '922350000'}, {'comp': '0.10', 'id': '922380000'}]}]}}
|
[
"acaldwel@wellesley.edu"
] |
acaldwel@wellesley.edu
|
7239fe20e9a84b702b3c129e529ae7d77af73d46
|
0cab1425f18ea11e498278815ed2d95e26d15ace
|
/helper/helper_file.py
|
9ac77270fb042a05dcf7cf00ff88842a2d62d815
|
[] |
no_license
|
philgookang/pcr
|
1574a26160b83987234f3103e5b82eaf1bc5ac72
|
4fa28fecc3ca22b8b3315991e6505c7519de4fde
|
refs/heads/master
| 2023-08-08T00:47:03.508899
| 2019-12-19T05:11:02
| 2019-12-19T05:11:02
| 198,176,017
| 4
| 0
| null | 2023-07-22T11:30:00
| 2019-07-22T08:02:05
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
import torch
import pickle
import os
from PIL import Image, PSDraw, ImageDraw, ImageFont
from config import *
def save_dataset(filename, target):
with open(os.path.join(RESULT_DATASET_PATH, filename), "wb") as f:
pickle.dump(target, f)
def load_dataset(filename):
with open(os.path.join(RESULT_DATASET_PATH, filename), "rb") as f:
return pickle.load(f)
def save_model(model, filename):
torch.save(model.state_dict(), os.path.join(RESULT_MODEL_PATH, filename))
def create_image_caption(original, target, lst):
font = os.path.join(BASE_PATH, "rss", "RobotoRegular.ttf")
img = Image.open(original, 'r')
w, h = img.size
img = img.crop((0,0,w + 900,h))
draw = ImageDraw.Draw(img)
font = ImageFont.truetype(font, 20)
for no,txt in enumerate(lst):
draw.text((w + 10, 2 + (37*no)), txt, (255,255,255), font=font)
img.save(target)
|
[
"philgookang@gmail.com"
] |
philgookang@gmail.com
|
3952e84d0d040f47a58aa80ddbefce5462fcdebb
|
d67fd48fdfd237c27faa23bd0430a279f63fa25c
|
/pageobjects/frames_page.py
|
07c4302cb4a98152761153da2128c1c1ca0faff1
|
[] |
no_license
|
shreedora/pytestframework
|
25ae843876ddeb77d43de2bbd4a6672d6c4c3541
|
ceafe6c4d000e1c98174c3009b14c2ab115f2e4a
|
refs/heads/master
| 2023-07-21T01:28:53.380077
| 2019-08-15T15:34:53
| 2019-08-15T15:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
from generic.base import Base
class Frames:
pass
|
[
"khandepc@gmail.com"
] |
khandepc@gmail.com
|
ac1f040cb3738fe1771300341001c1a391bb690d
|
3ba8bca6c557936450a4fa08de9dea68f97681ec
|
/tests/test_trade.py
|
1f874ffc64c9718937d976db1a709d4b88db8198
|
[
"MIT"
] |
permissive
|
liuyang1/xalpha
|
08a801d360cf77a0057414e84f846fdf68388a2d
|
8672a7a11f2b2ded652cd37df0fc0db6dd6aac8a
|
refs/heads/master
| 2020-05-06T13:36:28.806387
| 2020-01-09T04:57:00
| 2020-01-09T04:58:50
| 180,146,115
| 0
| 1
| null | 2019-04-08T12:39:21
| 2019-04-08T12:39:21
| null |
UTF-8
|
Python
| false
| false
| 3,594
|
py
|
import sys
sys.path.insert(0, "../")
import xalpha as xa
import pytest
import pandas as pd
path = 'demo.csv'
cm = xa.fundinfo('164818')
statb = xa.record(path).status
cm_t = xa.trade(cm, statb)
def test_trade():
assert cm_t.cftable.loc[2, 'share'] == -129.14
assert round(cm_t.xirrrate('2018-03-03'), 3) == -0.24
assert cm_t.dailyreport('2018-07-29').iloc[0]['单位成本'] == 1.346
cm_t.v_tradecost('2018-08-01')
cm_t.v_totvalue('2018-07-31')
cm_t.v_tradevolume(freq='M')
def test_mul():
with pytest.raises(Exception) as excinfo:
cm_m = xa.mulfix(cm_t, totmoney=200)
assert str(excinfo.value) == 'You cannot sell first when you never buy'
with pytest.raises(Exception) as excinfo:
cm_m = xa.mulfix(cm_t, totmoney=300)
assert str(excinfo.value) == 'the initial total cash is too low'
cm_m = xa.mulfix(cm_t, totmoney=500)
cm_m.bcmkset(xa.indexinfo('1399971'), start='2016-09-28')
assert round(cm_m.xirrrate('2018-07-29'), 3) == -0.129
assert round(cm_m.sharpe('2018-07-30'), 3) == -1.734
cm_m.v_netvalue(benchmark=False)
assert round(cm_m.total_return('2018-07-01'), 3) == -0.209
assert round(cm_m.benchmark_volatility('2018-07-22'), 3) == 0.192
assert round(cm_m.max_drawdown('2018-08-01')[2], 2) == -0.24
cm_m.v_tradevolume()
def test_mulfix():
tot = xa.mulfix(status=statb, totmoney=5000)
assert tot.v_positions().options['legend'][0]['data'][1] == '富国中证红利指数增强'
assert tot.v_positions_history('2017-01-01').options['legend'][0]['data'][-1] == '货币基金'
assert round(tot.combsummary('2018-08-04').iloc[0]['投资收益率'], 1) == 1.0
eva = tot.evaluation()
assert round(eva.correlation_table(end='2018-07-30').iloc[2, 4], 3) == 0.095
def test_policy_buyandhold():
allin = xa.policy.buyandhold(cm, '2015-06-01')
cm_t2 = xa.trade(cm, allin.status)
cm_m2 = xa.mulfix(cm_t2)
cm_m2.bcmkset(xa.indexinfo('1399971'))
assert round(cm_m2.correlation_coefficient('2018-07-01'), 3) == 0.980
assert round(cm_m2.information_ratio('2016-07-01'), 3) == -0.385
allin.sellout('2018-06-01')
cm_t2 = xa.trade(cm, allin.status)
assert round(cm_t2.xirrrate('2019-08-12', guess=-0.9), 2) == -0.33
def test_policy_scheduled():
auto = xa.policy.scheduled(cm, 1000, pd.date_range('2015-07-01', '2018-07-01', freq='W-THU'))
cm_t3 = xa.trade(cm, auto.status)
cm_t3.v_tradevolume(freq='W')
assert round(cm_t3.dailyreport('2018-08-03').iloc[0]['投资收益率'], 2) == -42.07
auto2 = xa.policy.scheduled_tune(cm, 1000, pd.date_range('2015-07-01', '2018-07-01', freq='M'),
[(0.9, 2), (1.2, 1)])
def test_policy_grid():
gr = xa.policy.grid(cm, [0, 2, 2], [3, 3, 3], '2018-06-23', '2018-08-03')
tr = xa.trade(cm, gr.status)
assert round(tr.xirrrate('2018-07-13'), 2) == 11.78
def test_policy_indicator_cross():
cm.bbi()
techst = xa.policy.indicator_cross(cm, col=['netvalue', 'BBI'], start='2018-01-01', end='2018-07-07')
cm_tt = xa.trade(cm, techst.status)
assert round(cm_tt.dailyreport('2018-07-09').iloc[0].loc['换手率'], 1) == 14.1
def test_policy_indicator_points():
zz500 = xa.indexinfo('0000905')
zz500.psy()
st = xa.policy.indicator_points(zz500, col='PSYMA12', start='2017-01-01', buy=[(0.6, 1), (0.7, 1)],
sell=[(0.4, 1), (0.3, 1)], buylow=False)
zz500_t = xa.trade(zz500, st.status)
assert zz500_t.dailyreport('2018-05-01').iloc[0].loc['基金收益总额'] == -6302.26
|
[
"kcanamgal@foxmail.com"
] |
kcanamgal@foxmail.com
|
0e80cef9ff865563cc3fdfaa2defb466e14a28db
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/scikit-learn_test_results/files/107_lda_conflict.py
|
511968d49f04cad53c837abded54eb811fa06d03
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003
| 2018-06-08T00:40:53
| 2018-06-08T00:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,691
|
py
|
"""
LDA: Linear Discriminant Analysis
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg, ndimage
from .base import BaseEstimator, ClassifierMixin
class LDA(BaseEstimator, ClassifierMixin):
"""
Linear Discriminant Analysis (LDA)
Parameters
----------
n_components: int
Number of components (< n_classes - 1)
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from scikits.learn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(priors=None)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
QDA
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
def fit(self, X, y, store_covariance=False, tol=1.0e-4, **params):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in self.covariance_ attribute.
"""
self._set_params(**params)
X = np.asanyarray(X)
y = np.asanyarray(y)
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
n_samples = X.shape[0]
n_features = X.shape[1]
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
scaling = 1. / Xc.std(0)
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc * scaling)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (scaling * V.T[:, :rank].T).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
<<<<<<< HEAD
X = np.dot(((np.sqrt((n_samples * self.priors_)*fac)) *
(self.means_ - xbar).T).T, scaling)
=======
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
>>>>>>> remote
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = -0.5 * np.sum(self.coef_ ** 2, axis=1) + \
np.log(self.priors_)
self.classes = classes
return self
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
X = np.asanyarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def transform(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = np.asanyarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:, :n_comp].T) + self.intercept_
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.min(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
loglikelihood = (values - values.min(axis=1)[:, np.newaxis])
normalization = np.logaddexp.reduce(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
|
[
"srhee4@cs.washington.edu"
] |
srhee4@cs.washington.edu
|
0e1c34ce545f90c85d2b94ad44a3e08011787ce8
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/lldptlvsd_54480213540819ecec7096f8f29c5c2f.py
|
88f8bc13bf5f03db36a81cf2871e05693a7dd3e2
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 4,952
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LldpTlvSd(Base):
"""LLDP System Description TLV.
The LldpTlvSd class encapsulates a required lldpTlvSd resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'lldpTlvSd'
def __init__(self, parent):
super(LldpTlvSd, self).__init__(parent)
@property
def Description(self):
"""Advertised Name/Description.
Returns:
str
"""
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def ObjectId(self):
"""Unique identifier for this object
Returns:
str
"""
return self._get_attribute('objectId')
def update(self, Description=None):
"""Updates a child instance of lldpTlvSd on the server.
Args:
Description (str): Advertised Name/Description.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
bd39d18338fdb9e9edfef7a78bef7d4241a5f37c
|
6a9f6e6527afb38611a5c695e5845e492d7676ff
|
/70.climbing-stairs.py
|
e67b2fdcd439792b906fbdf509241afcf5d78dae
|
[] |
no_license
|
Junyangz/leetcode
|
39913d48778d369f9f0a96070352d63b207e7df6
|
5de48fbdcb61ce8a437255a119a4a3ae242ede52
|
refs/heads/master
| 2020-06-09T08:57:26.361138
| 2019-11-14T08:38:07
| 2019-11-14T08:38:07
| 193,413,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
#
# @lc app=leetcode id=70 lang=python3
#
# [70] Climbing Stairs
#
from functools import lru_cache
class Solution:
@lru_cache(None)
def climbStairs(self, n: int) -> int:
if n <= 3: return n
return self.climbStairs(n-1) + self.climbStairs(n-2)
#dp
# if n <= 3: return n
# dp = [0] * (n + 2)
# dp[1] = 1
# dp[2] = 2
# for i in range(3, n+1):
# dp[i] = dp[i-1] + dp[i-2]
# return dp[n]
|
[
"junyangz.iie@gmail.com"
] |
junyangz.iie@gmail.com
|
1e226fa654fee1fa9f4158ccb50e5d5da67a9749
|
92a506dbb59475e4378c0ed9685d52e67dd9dacd
|
/test/project_tests/test_poisson1d.py
|
f1a3d689740bdacaaf9a4d85e84964ab67f6d17d
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
Parallel-in-Time/pyMG-2016
|
c2aca9e5778f40c2e89ee492d01277f7e021825e
|
abf3e47ba374c39fddd576fdaccf5187c1632f8a
|
refs/heads/master
| 2021-01-21T04:31:05.543828
| 2016-06-23T09:18:29
| 2016-06-23T09:18:29
| 53,484,378
| 2
| 11
|
BSD-2-Clause
| 2020-10-02T05:44:46
| 2016-03-09T09:16:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 943
|
py
|
import numpy as np
from project.poisson1d import Poisson1D
def test_has_spatial_order_of_accuracy():
expected_order = 2
k = 4
ntests = 6
ndofs = []
err_list = []
for i in range(ntests):
ndofs.append(2 ** (i + 4) - 1)
prob = Poisson1D(ndofs[-1])
xvalues = np.array([(i + 1) * prob.dx for i in range(prob.ndofs)])
uinit = np.sin(np.pi * k * xvalues)
uexact = (np.pi * k) ** 2 * uinit
ucomp = prob.A.dot(uinit)
err_list.append(np.linalg.norm(uexact - ucomp, np.inf) / np.linalg.norm(uexact, np.inf))
order = []
for i in range(1, len(err_list)):
order.append(np.log(err_list[i - 1] / err_list[i]) / np.log(ndofs[i] / ndofs[i - 1]))
order = np.array(order)
assert (order > expected_order * 0.9).all() and (order < expected_order * 1.1).all(), \
'Order of accuracy of the spatial discretization is not ' + str(expected_order)
|
[
"r.speck@fz-juelich.de"
] |
r.speck@fz-juelich.de
|
e2b9ab8b70a89a2eedddc4ecbf3c75754d27cbef
|
05afca01eccc6d35d20fe4925c5d4bd2c8161379
|
/chat_28374/wsgi.py
|
83f029c803529a9171dffe807aed0c501f86a27c
|
[] |
no_license
|
crowdbotics-apps/chat-28374
|
45261db0fc1d7da5da69536fd3ac776f7b3a0c12
|
74a2159530882d79c8937cb37d0ba285be0a5e7e
|
refs/heads/master
| 2023-06-05T13:41:15.892156
| 2021-07-02T01:01:10
| 2021-07-02T01:01:10
| 382,193,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for chat_28374 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chat_28374.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
06bbb14932b3dc339d410086b48bde70742732a1
|
9a081f87749245666f40402b389167f03375aa27
|
/CNN.py
|
2375fbef13b31125db4dfb4aee2799b62c2f2d95
|
[] |
no_license
|
reddytocode/tensorFlw
|
704e981dc39dfde0e0e9ddb892b484f5d95a111d
|
4c20d23cea87ab7fe086a3d619ea3ff779f478f7
|
refs/heads/master
| 2021-10-23T08:41:49.688089
| 2019-03-15T23:35:41
| 2019-03-15T23:35:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
import tensorflow as tf
import keras
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images.reshape(60000, 28, 28, 1)
training_images = training_images/255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(training_images, training_labels, epochs = 5)
test_loss = model.evaluate(test_images, test_labels)
|
[
"aaabeeelooon@gmail.com"
] |
aaabeeelooon@gmail.com
|
0930f0b6a519782da3bf9118c41a62cf0286bc68
|
7e8668e54d0ea55dc1e0185eff920a85af974fa6
|
/vendor-local/lib/python/celery/tests/tasks/test_states.py
|
4acf8aafeee7d8e35b1d92deeec6dab9aa94cdfa
|
[
"BSD-3-Clause"
] |
permissive
|
mozilla/firefox-flicks
|
3003cee1af0941976ef960a97a0806e19079cd79
|
ad19ed59aac682744badae6d19a149327037f293
|
refs/heads/master
| 2023-07-03T17:33:13.589884
| 2019-03-30T04:45:50
| 2019-03-30T04:45:50
| 3,148,994
| 3
| 6
|
BSD-3-Clause
| 2019-03-30T04:45:52
| 2012-01-10T21:52:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
from __future__ import absolute_import
from celery.states import state
from celery import states
from celery.tests.utils import Case
class test_state_precedence(Case):
def test_gt(self):
self.assertGreater(state(states.SUCCESS),
state(states.PENDING))
self.assertGreater(state(states.FAILURE),
state(states.RECEIVED))
self.assertGreater(state(states.REVOKED),
state(states.STARTED))
self.assertGreater(state(states.SUCCESS),
state('CRASHED'))
self.assertGreater(state(states.FAILURE),
state('CRASHED'))
self.assertFalse(state(states.REVOKED) > state('CRASHED'))
def test_lt(self):
self.assertLess(state(states.PENDING), state(states.SUCCESS))
self.assertLess(state(states.RECEIVED), state(states.FAILURE))
self.assertLess(state(states.STARTED), state(states.REVOKED))
self.assertLess(state('CRASHED'), state(states.SUCCESS))
self.assertLess(state('CRASHED'), state(states.FAILURE))
self.assertTrue(state(states.REVOKED) < state('CRASHED'))
self.assertTrue(state(states.REVOKED) <= state('CRASHED'))
self.assertTrue(state('CRASHED') >= state(states.REVOKED))
|
[
"mkelly@mozilla.com"
] |
mkelly@mozilla.com
|
6cb5ca73a3c4b544f93c530b04ebbfe82214ff41
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/46/usersdata/61/18544/submittedfiles/funcoes1.py
|
a3ae4f4e9c4225d28057fe39cceda6c1ea832ff0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def crescente(a):
cont=0
for i in range (0,len(a)-1,1):
if a[i]>a[i+1]:
cont=cont+1
if cont==0:
return True
else:
return False
def decresc(a):
cont=0
for i in range (0,len(a)-1,1):
if a[i]>a[i-1]:
cont=cont+1
if cont==0:
return True
else:
return False
a=[]
b=[]
c=[]
n=int(input("Digite um valor: "))
for i in range(0,n,1):
a.append(input("Digite um número: "))
if crescente(a):
print ("S")
else:
print ("N")
for i in range(0,n,1):
a.append(input("Digite um número: "))
if decresc(a):
print ("S")
else:
print ("N")
#escreva as demais funções
#escreva o programa principal
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
6bb5e25a7b62fe82c79b3eb82972f7a6e16d8953
|
6ae42d809532e463b2cef2a9648ececde501faa1
|
/MobileRevelator/python/android_facebook_batch.py
|
bb122e07248eac572f9dddd399ebe51edd8e1a60
|
[
"MIT"
] |
permissive
|
bkerler/MR
|
c52fa76a1ae46ebb637cac20925f8543235d85f3
|
5ba553fd0eb4c1d80842074a553119486f005822
|
refs/heads/master
| 2022-04-28T23:48:14.788560
| 2020-06-01T08:25:17
| 2020-06-01T08:25:17
| 168,991,623
| 140
| 35
|
MIT
| 2020-02-10T10:45:50
| 2019-02-03T20:46:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,399
|
py
|
#Pluginname="Facebook app_analytics (Android)"
#Type=App
import os
import struct
import json
import tempfile
def convertdata(filenames):
zfields=[]
for fsname in filenames:
print("Running Facebook conversion: " + fsname[fsname.rfind("/") + 1:])
filename=tempfile.gettempdir()+"/"+fsname[fsname.rfind("/")+1:]
if ctx.fs_file_extract(fsname,filename):
with open(filename, "rb") as ff:
try:
data = str(ff.read().decode("utf-8"))+str("]}")
jdata = json.loads(data)
except:
continue
row = 0
timestamp=""
uid=""
desc=""
if "time" in jdata:
timestamp = str(jdata["time"])
if "uid" in jdata:
uid=str(jdata["uid"])
if "data" in jdata:
fbdata=jdata["data"]
for subdata in fbdata:
if "extra" in subdata:
extra=subdata["extra"]
#if ("network_type" in extra) or ("battery" in extra) or ("connection" in extra) or ("text" in extra):
zfield = {}
zfield["ID"] = row
zfield["Filename"]=fsname
zfield["Type"] = "Generic"
if uid!="":
zfield["Contact"] = uid
else:
zfield["Contact"] = ""
zfield["Timestamp"] = timestamp
description = ""
if "suggestions_at_end_of_session" in extra:
zfield["Type"] = "Suggestions"
dt=extra["suggestions_at_end_of_session"]
for d in dt:
if "text" in d:
description += "suggestion: \"" + d["text"] + "\";"
if "dest_module_uri" in extra:
zfield["Type"] = "Uri"
if "dest_module_uri" in extra:
description+="dest_module_uri: "+extra["dest_module_uri"]+";"
if "click_point" in extra:
description+="click_point: "+extra["click_point"]+";"
if "source_module" in extra:
description+="source_module: "+extra["source_module"]+";"
if "video_id" in extra:
zfield["Type"] = "Video"
if "video_id" in extra:
description+="video_id: "+extra["video_id"]+";"
if "video_last_start_time_position" in extra:
description+="video_last_start_time_position: "+str(extra["video_last_start_time_position"])+";"
if "video_play_reason" in extra:
description+="video_play_reason: "+extra["video_play_reason"]+";"
if "video_time_position" in extra:
description+="video_time_position: "+str(extra["video_time_position"])+";"
if "network_type" in extra:
description+="network_type: "+extra["network_type"]+";"
if "network_subtype" in extra:
description+="network_subtype: "+extra["network_subtype"]+";"
if "connqual" in extra:
description+="connqual: "+extra["connqual"]+";"
if "was_backgrounded" in extra:
description+="was_backgrounded: "+str(extra["was_backgrounded"])+";"
if "airplane_mode_on" in extra:
description+="airplane_mode_on: "+str(extra["airplane_mode_on"])+";"
if "battery" in extra:
zfield["Type"] = "Battery"
if "battery" in extra:
description+="battery: "+str(extra["battery"])+";"
if "charge_state" in extra:
description+="charge_state: "+extra["charge_state"]+";"
if "battery_health" in extra:
description+="battery_health: "+extra["battery_health"]+";"
#description = json.dumps(extra, separators=(',',':'))
if (len(description)>1):
zfield["Other content"] = description
zfields.append(zfield)
row += 1
os.remove(filename)
rows = len(zfields)
# print(zfields)
for i in range(0, rows):
zfield = zfields[i]
oldpos = 0
newpos = int(i / rows * 100)
if (oldpos < newpos):
oldpos = newpos
ctx.gui_setMainProgressBar(oldpos)
ctx.gui_set_data(i, 0, zfield["ID"])
ctx.gui_set_data(i, 1, zfield["Type"])
ctx.gui_set_data(i, 2, zfield["Contact"])
ctx.gui_set_data(i, 3, zfield["Timestamp"])
ctx.gui_set_data(i, 4, zfield["Other content"])
ctx.gui_set_data(i, 5, zfield["Filename"])
def main():
ctx.gui_setMainLabel("Facebook App Analytics: Parsing ...");
ctx.gui_setMainProgressBar(0)
headers = ["rowid (int)", "Type (QString)", "Contact (QString)", "Timestamp (int)", "Other_Content (QString)","Filename (QString)"]
ctx.gui_set_headers(headers)
filenames=ctx.pluginfilenames()
convertdata(filenames)
ctx.gui_update()
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
return "Finished running plugin."
|
[
"info@revskills.de"
] |
info@revskills.de
|
d83cd414c71770b0af62ab4617bc4f101a35b432
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/iMRN9YGK4mcYja9rY_23.py
|
75d7bb82d79759426baf19e08f08c025e9422e86
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def accumulating_product(lst):
if lst:
r=[lst[0]]
for i in range(1,len(lst)):
r.append(lst[i]*r[-1])
return r
else:
return []
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2272e4858674584c2763baa58a85ba9c90c95123
|
795fcb8fca8d4d0b940e5b82e33803bb5f4cfe36
|
/soapbox/templatetags/soapbox.py
|
9431e47bd2d12c81c72efeb846ddb974b8c2a941
|
[
"BSD-3-Clause"
] |
permissive
|
callahad/django-soapbox
|
ef4fc36a4e539cb3008918e8adb08a67ef1d1b1d
|
f9189e1ddf47175f2392b92c7a0a902817ee1e93
|
refs/heads/master
| 2021-01-04T13:21:22.022457
| 2017-06-05T09:26:33
| 2017-06-05T09:26:33
| 240,570,323
| 0
| 0
|
BSD-3-Clause
| 2020-02-14T18:08:01
| 2020-02-14T18:08:00
| null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django import template
from ..models import Message
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_messages_for_page(context, url):
if url == context.template.engine.string_if_invalid:
return []
return Message.objects.match(url)
|
[
"james@b-list.org"
] |
james@b-list.org
|
34bb52a1936b775ded476143836f87df0c213fde
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/s1_3_getHtml_20210209165302.py
|
06251e4a9d3dadbfde199980f5e23866fdb8bc24
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818
| 2021-02-20T06:54:58
| 2021-02-20T06:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,481
|
py
|
# This module is called from 3R Automation Component.
import os
import sys
# pdftotree is available as part of the virtual environment for 3R Python processing
import pdftotree
import json
from pprint import pprint
import pdfminer
import matplotlib.pyplot as plt
import ocr_extract as imgpdf
from utils.ocr.handle_image import *
# pdf_doc = json.loads(sys.argv[1])['doc_name']
pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/images/PAN_Card_Scan_AKC.png'
# html_path = json.loads(sys.argv[1])['html_path']
html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'+os.path.basename(pdf_doc).split('.')[0] + '.html'
print(f'HTML Path is set to {html_path}')
path_if_not_pdf_doc = ''
pdf_doc_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf'
# Use the following for testing
# pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/Sri_khyati_CV.pdf'
# html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/Sri_khyati_CV.html'
def create_hocr(pdf_doc='', html_path='', model_path='./model/model.pkl'):
return pdftotree.parse(pdf_doc, html_path=html_path, model_type=None, model_path=model_path, visualize=False)
create_hocr_output = None
try:
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except pdfminer.pdfparser.PDFSyntaxError as pdfException:
create_hocr_output = pdfException
path_if_not_pdf_doc = pdf_doc
try:
# pdf_doc = extract_pdf_from_image(pdf_doc, pdf_path=pdf_doc_path, action=1, psm=11)
image, line_items_coordinates = mark_region(path_if_not_pdf_doc)
# load the original image
image = cv2.imread(path_if_not_pdf_doc)
# get co-ordinates to crop the image
c = line_items_coordinates[1]
# cropping image img = image[y0:y1, x0:x1]
img = image[c[0][1]:c[1][1], c[0][0]:c[1][0]]
plt.figure(figsize=(10,10))
plt.imshow(img)
# convert the image to black and white for better OCR
ret,thresh1 = cv2.threshold(img,120,255,cv2.THRESH_BINARY)
# pytesseract image to string to get results
text = str(pytesseract.image_to_string(thresh1, config='--psm 6'))
print(text)
convert_text_to_pdf(text, pdf_doc_path, os.path.basename(pdf_doc).split('.')[0])
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except Exception:
create_hocr_output = Exception
print(Exception)
# extract_pdf_from_image(pdf_doc, pdf_path=pdf_doc_path, action=2, psm=6)
# Use the following for testing non PDF files
# print(f'{os.path.basename(pdf_doc).split(".")[0]+".pdf"}')
# print(f'{os.path.abspath(pdf_doc).split(".")[0]+".pdf"}')
# try:
# # imgpdf.convert_image_to_pdf(pdf_doc, os.path(pdf_doc)+os.path.basename(pdf_doc).split('.')[0]+'.pdf')
# imgpdf.convert_image_to_pdf(pdf_doc, os.path.dirname(pdf_doc), os.path.abspath(pdf_doc).split(".")[0]+".pdf")
# except Exception as exc:
# print(exc)
# Output of "print" statement is passed to the calling program
proc_status = "OK" if create_hocr_output == None else "Not a PDF document or unable to process image at path "+path_if_not_pdf_doc
json_out = {"pdf_doc": pdf_doc, "process_status": proc_status}
json_out = {"message": "We are testing/making some changes to this API, please try after in about 30 mins. Sorry for the inconvenience."}
print(json_out)
|
[
"{abhi@third-ray.com}"
] |
{abhi@third-ray.com}
|
e72c98ab5cea32846ce3c45803b0b82ff6c328ab
|
1c69aaf4ff5c9bbabd4e4e3486e3f442808f96ea
|
/models/r4/meta.py
|
ebcab05264342b9983abf18e75edef597276aa0e
|
[] |
no_license
|
glow-mdsol/devday-boston-clinical-research
|
72565289b27e9d6105640ec14749e07d7bc14014
|
560a8141bc3bd1ae5a31b110e82863e25b4ce9f8
|
refs/heads/master
| 2020-03-20T00:48:32.862642
| 2018-06-20T15:33:29
| 2018-06-20T15:33:29
| 137,056,522
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.3.0 (http://hl7.org/fhir/StructureDefinition/Meta) on 2018-05-12.
# 2018, SMART Health IT.
from . import element
class Meta(element.Element):
""" Metadata about a resource.
The metadata about a resource. This is content in the resource that is
maintained by the infrastructure. Changes to the content might not always
be associated with version changes to the resource.
"""
resource_type = "Meta"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.lastUpdated = None
""" When the resource version last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.profile = None
""" Profiles this resource claims to conform to.
List of `str` items. """
self.security = None
""" Security Labels applied to this resource.
List of `Coding` items (represented as `dict` in JSON). """
self.source = None
""" Identifies where the resource comes from.
Type `str`. """
self.tag = None
""" Tags applied to this resource.
List of `Coding` items (represented as `dict` in JSON). """
self.versionId = None
""" Version specific identifier.
Type `str`. """
super(Meta, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Meta, self).elementProperties()
js.extend([
("lastUpdated", "lastUpdated", fhirdate.FHIRDate, False, None, False),
("profile", "profile", str, True, None, False),
("security", "security", coding.Coding, True, None, False),
("source", "source", str, False, None, False),
("tag", "tag", coding.Coding, True, None, False),
("versionId", "versionId", str, False, None, False),
])
return js
from . import coding
from . import fhirdate
|
[
"glow@mdsol.com"
] |
glow@mdsol.com
|
3e6e0e70c38de7ddea9dc2f9b668c0b8f3e0bbdd
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/object_attr_get_none-110.py
|
2998259b825a549bf3b348da7f8b538e5dcc83a0
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
a = B()
print(a.a)
print(b.a)
print($Parameters)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
4de951b94deba051a37c1db4f7ece94e983a7c83
|
2b25aae9266437b657e748f3d6fea4db9e9d7f15
|
/graphics/3d/4/lawrence_lim/matrix.py
|
a041e2c30a8361efb4f61e085905e8ef8449ba7d
|
[] |
no_license
|
Zilby/Stuy-Stuff
|
b1c3bc23abf40092a8a7a80e406e7c412bd22ae0
|
5c5e375304952f62667d3b34b36f0056c1a8e753
|
refs/heads/master
| 2020-05-18T03:03:48.210196
| 2018-11-15T04:50:03
| 2018-11-15T04:50:03
| 24,191,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
import math
def make_bezier():
return [ [-1, 3,-3, 1],
[ 3,-6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0] ]
def make_hermite():
return [ [ 2,-3, 0, 1],
[-2, 3, 0, 0],
[ 1,-2, 1, 0],
[ 1,-1, 0, 0] ]
def generate_curve_coefs( p1, p2, p3, p4, t ):
pmat = [ [p1,p2,p3,p4] ]
return matrix_mult(t,pmat)
def make_translate( x, y, z ):
rmat = new_matrix(4,4)
rmat = ident(rmat)
rmat[3][0] = x
rmat[3][1] = y
rmat[3][2] = z
return rmat
def make_scale( x, y, z ):
rmat = new_matrix(4,4)
rmat[0][0] = x
rmat[1][1] = y
rmat[2][2] = z
rmat[3][3] = 1
return rmat
def make_rotX( theta ):
rmat = new_matrix(4,4)
rad = math.radians(theta)
rmat[1][1] = math.cos(rad)
rmat[2][2] = math.cos(rad)
rmat[1][2] = math.sin(rad)
rmat[2][1] = -math.sin(rad)
rmat[0][0] = 1
rmat[3][3] = 1
return rmat
def make_rotY( theta ):
rmat = new_matrix(4,4)
rad = math.radians(theta)
rmat[0][0] = math.cos(rad)
rmat[2][2] = math.cos(rad)
rmat[0][2] = math.sin(rad)
rmat[2][0] = -math.sin(rad)
rmat[1][1] = 1
rmat[3][3] = 1
return rmat
def make_rotZ( theta ):
rmat = new_matrix(4,4)
rad = math.radians(theta)
rmat[0][0] = math.cos(rad)
rmat[1][1] = math.cos(rad)
rmat[0][1] = math.sin(rad)
rmat[1][0] = -math.sin(rad)
rmat[2][2] = 1
rmat[3][3] = 1
return rmat
def new_matrix(rows = 4, cols = 4):
m = []
for c in range( cols ):
m.append( [] )
for r in range( rows ):
m[c].append( 0 )
return m
def print_matrix( matrix ):
s = ''
for r in range( len( matrix[0] ) ):
for c in range( len(matrix) ):
s+= str(matrix[c][r]) + ' '
s+= '\n'
print s
def print_matrix_vert( matrix ):
for c in matrix:
print c
def ident( matrix ):
idmat = new_matrix( len(matrix), len(matrix) )
for i in range( len( idmat ) ):
idmat[i][i] = 1
return idmat
def matrix_copy( src, dst ):
for c in range( len(src) ):
for r in range( len(src[0]) ):
pass
def scalar_mult( matrix, x ):
for c in range( len(matrix) ):
for r in range( len( matrix[0] ) ):
matrix[c][r] *= x
#m1 * m2 -> m2
def matrix_mult( m1, m2 ):
rmat = new_matrix(len(m1[0]),len(m2))
for c in range( len(m2) ):
for r in range( len(m1[0]) ):
cell = 0
for i in range( len(m1) ):
cell += m1[i][r] * m2[c][i]
rmat[c][r] = cell
return rmat
|
[
"azilby@gmail.com"
] |
azilby@gmail.com
|
8f8adb948cbbabadfc351ff919f05caab0ebb239
|
3d83e5d6c5c3b264dbca94f2fedcd1abaf522278
|
/tests/test_wps_cmip5_regridder.py
|
aadc72a608fab27bdaf0c0cda2f46a48b42b16ef
|
[
"Apache-2.0"
] |
permissive
|
cp4cds/c4cds-wps
|
4767d779a2338d46d52f0c23bb89f0072928c482
|
5abd9281195548bbd1e7653fe2ab1fee26745200
|
refs/heads/master
| 2020-04-02T06:43:19.383112
| 2020-01-14T16:05:36
| 2020-01-14T16:05:36
| 154,164,988
| 0
| 0
|
NOASSERTION
| 2020-01-14T16:05:37
| 2018-10-22T15:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 578
|
py
|
from pywps import Service
from pywps.tests import assert_response_success
from . common import client_for, resource_file
from c4cds.processes.wps_cmip5_regridder import CMIP5Regridder
cfgfiles = [resource_file('test.cfg'), ]
def test_wps_cmip5_regridder():
client = client_for(Service(processes=[CMIP5Regridder()], cfgfiles=cfgfiles))
datainputs = "model=HadGEM2-ES"
resp = client.get(
service='WPS', request='Execute', version='1.0.0', identifier='cmip5_regridder',
datainputs=datainputs)
print(resp.data)
assert_response_success(resp)
|
[
"ehbrecht@dkrz.de"
] |
ehbrecht@dkrz.de
|
c6b598899b53d519f2fdb672ab2f909910a5485a
|
969fed6b9f4c0daa728bda52fea73d94bda6faad
|
/fakeSPS/spss5.py
|
750bdd7ae87c3d3ed6a837b471912130b45e52e3
|
[] |
no_license
|
ess-dmsc/essiip-fakesinqhw
|
7d4c0cb3e412a510db02f011fb9c20edfbd8a84f
|
ad65844c99e64692f07e7ea04d624154a92d57cd
|
refs/heads/master
| 2021-01-18T22:50:50.182268
| 2020-10-01T08:39:30
| 2020-10-01T08:39:30
| 87,077,121
| 0
| 0
| null | 2018-12-07T08:43:00
| 2017-04-03T13:28:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
#!/usr/bin/python
#
# fake SINQ SPS S5. This is a Siemens SPS S5 with a custom RS-232 interface and
# protocol as used at SINQ. The protocol is very simple. What is instrument
# specific is what happens when you set one of the digital inputs. Currently,
# only the AMOR case is implemented.
#
#
# Mark Koennecke, August 2016
#----------------------------------------------------------------------
from twisted.internet import reactor, protocol
from twisted.protocols.basic import LineReceiver
import time
import sys
class SPSS5(LineReceiver):
def __init__(self):
self.b1 = 1
self.b2 = 0
self.b3 = 0
self.b4 = 0
self.b5 = 5
self.b6 = 0
self.b7 = 7
self.b8 = 0
self.b9 = 0
self.b10 = 0
self.b11 = 0
self.b12 = 0
self.b13 = 0
self.b14 = 0
self.b15 = 0
self.b16 = 0
self.a1 = 1
self.a2 = 2
self.a3 = 3
self.a4 = 4
self.a5 = 5
self.a6 = 6
self.a7 = 7
self.a8 = 8
def write(self, data):
print "transmitted:", data
if self.transport is not None:
self.transport.write(data+'\n')
def lineReceived(self, data):
print "lineReceived:", data
data = data.lower().strip()
if data.startswith('r'):
self.write('R %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d %3.3d\r'
% (self.b1, self.b2,self.b3,self.b4,self.b5,self.b6,self.b7,self.b8,self.b9,self.b10,self.b11,
self.b12,self.b13,self.b14,self.b15,self.b16))
return
if data.startswith('a'):
self.write('A %5.5d %5.5d %5.5d %5.5d %5.5d %5.5d %5.5d %5.5d\r'
% (self.a1, self.a2,self.a3,self.a4,self.a5,self.a6,self.a7,self.a8))
return
if data.startswith('s'):
if len(data) < 5:
self.write('?PAR\r')
return
byte = int(data[1:4])
bit = int(data[4])
self.doPush(byte,bit)
self.write(data + '\r')
return
def doPush(self,byte,bit):
# shutter
if byte == 0 and bit == 0:
if self.b5 == 5:
self.b5 = 0
else:
self.b5 = 5
return
# laser light
if byte == 0 and bit == 1:
if self.b16 == 0:
self.b16 = 129
else:
self.b16 = 0
return
# RF flipper
if byte == 0 and bit == 7:
if self.b13 == 0:
self.b13 = 128
else:
self.b13 = 0
return
def main(argv):
if len(argv) > 1:
port = int(argv[1])
else:
port = 63000
factory = protocol.ServerFactory()
factory.protocol = SPSS5
reactor.listenTCP(port, factory)
reactor.run()
if __name__ == "__main__":
main(sys.argv)
|
[
"mark.koennecke@psi.ch"
] |
mark.koennecke@psi.ch
|
387830023b70ccdb90dd7ac0b468f571c24753f0
|
8ded32c55d5223654030d176e9df6acf0d9f8855
|
/mpikat/meerkat/fbfuse/fbfuse_feng_subscription_manager.py
|
d5e55f66b74d8673f94689c739bf3adcefb4a347
|
[
"MIT"
] |
permissive
|
TeepChairin/mpikat
|
d5afb738df69e6e4264aac8829a9d48b9aacbd93
|
464d76113c92e0e8a3106ccc05ef551a1427e582
|
refs/heads/master
| 2020-09-23T20:31:27.677733
| 2019-07-02T15:08:50
| 2019-07-02T15:08:50
| 225,580,332
| 0
| 0
|
MIT
| 2019-12-03T09:27:15
| 2019-12-03T09:27:14
| null |
UTF-8
|
Python
| false
| false
| 7,004
|
py
|
"""
Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import struct
import numpy as np
log = logging.getLogger("mpikat.fbfuse_feng_subscription_manager")
NSPINES = 16
NLEAVES = 4
MAX_SUBS_PER_LEAF = 4
HOST_TO_LEAF_MAP = {
"fbfpn00.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn01.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn02.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn03.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn04.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn05.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn06.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn07.mpifr-be.mkat.karoo.kat.ac.za": 0,
"fbfpn08.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn09.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn10.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn11.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn12.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn13.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn14.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn15.mpifr-be.mkat.karoo.kat.ac.za": 1,
"fbfpn16.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn17.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn18.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn19.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn20.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn21.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn22.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn23.mpifr-be.mkat.karoo.kat.ac.za": 2,
"fbfpn24.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn25.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn26.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn27.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn28.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn29.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn30.mpifr-be.mkat.karoo.kat.ac.za": 3,
"fbfpn31.mpifr-be.mkat.karoo.kat.ac.za": 3
}
class FengToFbfMapper(object):
def __init__(self, nspines=NSPINES, nleaves=NLEAVES,
max_subs_per_leaf=MAX_SUBS_PER_LEAF,
host_to_leaf_map=HOST_TO_LEAF_MAP):
self._h2l_map = host_to_leaf_map
self._nspines = nspines
self._max_subs_per_leaf = max_subs_per_leaf
self._subscriptions = np.zeros((nspines, nleaves))
self._subscription_sets = {}
def validate_ip_ranges(self, ip_ranges):
log.debug("Validating IP ranges")
for ip_range in ip_ranges:
if ip_range.count != 4:
log.error("Count for IP range was not 4")
raise Exception(
"All stream must span 4 consecutive multicast groups")
def group_to_spine(self, group):
subnet = struct.unpack("B"*4, group.packed)[-1]
return subnet % self._nspines
def worker_to_leaf(self, worker):
return self._h2l_map[worker.hostname]
def validate_workers(self, workers):
log.debug("Validating worker servers")
for worker in workers:
if worker.hostname not in self._h2l_map:
log.error(("Could not determine leaf switch ID "
"for worker server: {}").format(
worker.hostname))
raise Exception(
"Worker '{}' does not map to a leaf switch".format(
worker))
def subscribe(self, ordered_ip_ranges, available_workers, subarray_id):
log.debug("Determining safe F-engine subscriptions")
available_workers = available_workers[:]
self.validate_workers(available_workers)
self.validate_ip_ranges(ordered_ip_ranges)
if subarray_id in self._subscription_sets:
raise Exception(
"Subarray {} already has a subscription mapping".format(
subarray_id))
used_workers = []
unallocated_ranges = []
all_indexes = []
mapping = []
for ip_range in ordered_ip_ranges:
log.debug("Attempting to allocate range: {}".format(
ip_range.format_katcp()))
for worker in available_workers:
leaf_idx = self.worker_to_leaf(worker)
can_subscribe = True
indexes = []
for group in ip_range:
spine_idx = self.group_to_spine(group)
indexes.append((spine_idx, leaf_idx))
if self._subscriptions[spine_idx, leaf_idx] >= self._max_subs_per_leaf:
can_subscribe = False
if can_subscribe:
for x, y in indexes:
self._subscriptions[x, y] += 1
mapping.append((worker, ip_range))
all_indexes.extend(indexes)
available_workers.remove(worker)
used_workers.append(worker)
log.info("Allocated {} to {}".format(
ip_range.format_katcp(), worker))
break
else:
continue
else:
log.warning("Unable to allocate {}".format(
ip_range.format_katcp()))
unallocated_ranges.append(ip_range)
self._subscription_sets[subarray_id] = all_indexes
log.debug(self.render_spine_status())
return mapping, available_workers, unallocated_ranges
def unsubscribe(self, subarray_id):
log.debug("Removing subscriptions from subarray: {}".format(
subarray_id))
for x, y in self._subscription_sets[subarray_id]:
self._subscriptions[x, y] -= 1
del self._subscription_sets[subarray_id]
log.debug(self.render_spine_status())
def render_spine_status(self):
status = "Subscription count matrix:\n"
status += "Leaf: 0 | 1 | 2 | 3 \n"
status += "-----------------------\n"
for ii, row in enumerate(self._subscriptions):
status += "Spine {:02d}: {}\n".format(
ii, " | ".join(map(str, map(int, row))))
return status
|
[
"ewan.d.barr@googlemail.com"
] |
ewan.d.barr@googlemail.com
|
2edc2b5179347ab0c63389d5e6b5df02fa39f18f
|
fe62dbd83ac715d640e740e21bf68d9041baab31
|
/api/repository/repository.py
|
16d007ecaa4231c60d357e2aad26266b069d04df
|
[] |
no_license
|
chandler767/flask_api_example
|
ccf2d7ff13a69d1f9a823074e14d7f1073ed215e
|
a5525fbc537d12f1d2492a6174bf66a3662b9969
|
refs/heads/master
| 2021-01-18T10:19:27.456774
| 2016-05-25T00:40:52
| 2016-05-25T00:40:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
from ..database.database_model import *
## Repository layer will handle database transactions.
class UserRepository():
def signup():
pass
def get_userinfo_id(id):
pass
def get_userinfo_username(username):
pass
def does_exsist(username):
check = db.session.query(User).filter(User.username == username).first()
if check is None:
return 1
else:
return 0
class PostRepository():
def post():
pass
|
[
"joshuadparkin@gmail.com"
] |
joshuadparkin@gmail.com
|
51e1436fc9fa4d880c435775e809bd50dfb571fe
|
a045055cb41f7d53e1b103c3655a17dc4cd18d40
|
/python-master/kubernetes/test/test_policy_v1beta1_supplemental_groups_strategy_options.py
|
ef060d462ef5635af99dded85243dfd48b74b671
|
[] |
no_license
|
18271693176/copy
|
22f863b180e65c049e902de0327f1af491736e5a
|
ff2511441a2df03817627ba8abc6b0e213878023
|
refs/heads/master
| 2020-04-01T20:20:28.048995
| 2018-11-05T02:21:53
| 2018-11-05T02:21:53
| 153,599,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.policy_v1beta1_supplemental_groups_strategy_options import PolicyV1beta1SupplementalGroupsStrategyOptions
class TestPolicyV1beta1SupplementalGroupsStrategyOptions(unittest.TestCase):
""" PolicyV1beta1SupplementalGroupsStrategyOptions unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPolicyV1beta1SupplementalGroupsStrategyOptions(self):
"""
Test PolicyV1beta1SupplementalGroupsStrategyOptions
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.policy_v1beta1_supplemental_groups_strategy_options.PolicyV1beta1SupplementalGroupsStrategyOptions()
pass
if __name__ == '__main__':
unittest.main()
|
[
"906317366@qq.com"
] |
906317366@qq.com
|
ec7a4da280ef858b1fa27fea0877fec4c261e8e5
|
6d63aa2f237c0d2662a71b2065026cebad56829f
|
/netease-musicbox-git/lilac.py
|
e466f53e2bda5ee027883177b90d7dd65eaa1518
|
[] |
no_license
|
MaskRay/repo
|
dbc407f5a30ac69d9aad27592e71337a4c194fe7
|
e8def5b474d55dfbabdefd20c64cf1a12e3d950c
|
refs/heads/master
| 2021-01-13T11:09:18.404283
| 2018-05-05T08:11:03
| 2018-05-05T08:11:03
| 77,249,750
| 0
| 0
| null | 2016-12-23T20:44:24
| 2016-12-23T20:44:24
| null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#!/usr/bin/env python3
from lilaclib import *
build_prefix = 'extra-x86_64'
pre_build = vcs_update
def post_build():
git_add_files("PKGBUILD")
git_commit()
update_aur_repo()
if __name__ == '__main__':
single_main()
|
[
"farseerfc@gmail.com"
] |
farseerfc@gmail.com
|
28e13bc91b63fc55d62a0d4a6677303b7db657ad
|
6e8d58340f2be5f00d55e2629052c0bbc9dcf390
|
/eggs/mercurial-2.1.2-py2.6-linux-x86_64-ucs4.egg/mercurial/lock.py
|
cc2c533a2b2474daea45ead213c5e4f8b3c81b16
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
JCVI-Cloud/galaxy-tools-prok
|
e57389750d33ac766e1658838cdb0aaf9a59c106
|
3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c
|
refs/heads/master
| 2021-05-02T06:23:05.414371
| 2014-03-21T18:12:43
| 2014-03-21T18:12:43
| 6,092,693
| 0
| 2
|
NOASSERTION
| 2020-07-25T20:38:17
| 2012-10-05T15:57:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,642
|
py
|
# lock.py - simple advisory locking scheme for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, error
import errno, os, socket, time
import warnings
class lock(object):
'''An advisory lock held by one process to control access to a set
of files. Non-cooperating processes or incorrectly written scripts
can ignore Mercurial's locking scheme and stomp all over the
repository, so don't do that.
Typically used via localrepository.lock() to lock the repository
store (.hg/store/) or localrepository.wlock() to lock everything
else under .hg/.'''
# lock is symlink on platforms that support it, file on others.
# symlink is used because create of directory entry and contents
# are atomic even over nfs.
# old-style lock: symlink to pid
# new-style lock: symlink to hostname:pid
_host = None
def __init__(self, file, timeout=-1, releasefn=None, desc=None):
self.f = file
self.held = 0
self.timeout = timeout
self.releasefn = releasefn
self.desc = desc
self.postrelease = []
self.lock()
def __del__(self):
if self.held:
warnings.warn("use lock.release instead of del lock",
category=DeprecationWarning,
stacklevel=2)
# ensure the lock will be removed
# even if recursive locking did occur
self.held = 1
self.release()
def lock(self):
timeout = self.timeout
while True:
try:
self.trylock()
return 1
except error.LockHeld, inst:
if timeout != 0:
time.sleep(1)
if timeout > 0:
timeout -= 1
continue
raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
inst.locker)
def trylock(self):
if self.held:
self.held += 1
return
if lock._host is None:
lock._host = socket.gethostname()
lockname = '%s:%s' % (lock._host, os.getpid())
while not self.held:
try:
util.makelock(lockname, self.f)
self.held = 1
except (OSError, IOError), why:
if why.errno == errno.EEXIST:
locker = self.testlock()
if locker is not None:
raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
locker)
else:
raise error.LockUnavailable(why.errno, why.strerror,
why.filename, self.desc)
def testlock(self):
"""return id of locker if lock is valid, else None.
If old-style lock, we cannot tell what machine locker is on.
with new-style lock, if locker is on this machine, we can
see if locker is alive. If locker is on this machine but
not alive, we can safely break lock.
The lock file is only deleted when None is returned.
"""
locker = util.readlock(self.f)
try:
host, pid = locker.split(":", 1)
except ValueError:
return locker
if host != lock._host:
return locker
try:
pid = int(pid)
except ValueError:
return locker
if util.testpid(pid):
return locker
# if locker dead, break lock. must do this with another lock
# held, or can race and break valid lock.
try:
l = lock(self.f + '.break', timeout=0)
util.unlink(self.f)
l.release()
except error.LockError:
return locker
def release(self):
"""release the lock and execute callback function if any
If the lock have been aquired multiple time, the actual release is
delayed to the last relase call."""
if self.held > 1:
self.held -= 1
elif self.held == 1:
self.held = 0
if self.releasefn:
self.releasefn()
try:
util.unlink(self.f)
except OSError:
pass
for callback in self.postrelease:
callback()
def release(*locks):
for lock in locks:
if lock is not None:
lock.release()
|
[
"root@ip-10-118-137-129.ec2.internal"
] |
root@ip-10-118-137-129.ec2.internal
|
066de1d07307922afb7ea23dfc46e85906ab1c9f
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r9/Gen/DecFiles/options/12113086.py
|
ce8a5b592a5552f40e8070fd8db1fbe8f91f91d7
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/12113086.py generated: Fri, 27 Mar 2015 16:10:07
#
# Event Type: 12113086
#
# ASCII decay Descriptor: [B+ -> K+ (Higgs0 -> mu+ mu-) ]cc
#
from Configurables import Generation
Generation().EventType = 12113086
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_KDarkBoson2MuMu,m=2000MeV,t=100ps,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
from Gauss.Configuration import *
from Configurables import LHCb__ParticlePropertySvc as ParticlePropertySvc
from Configurables import Gauss, PrintMCTree, PrintMCDecayTreeTool, HistogramPersistencySvc, NTupleSvc, DumpHepMCDecay, DumpHepMCTree, GaussMonitor__CheckLifeTimeHepMC, GaussMonitor__CheckLifeTimeMC, GiGa, GiGaPhysListModular, GiGaHiggsParticles, GenerationToSimulation, PythiaProduction
ParticlePropertySvc().Particles = [ "H_10 87 25 0.0 2.0 1.0000e-10 Higgs0 25 0.000000e+000" ]
ApplicationMgr().ExtSvc += [ ParticlePropertySvc() ]
gigaHiggsPart = GiGaHiggsParticles()
gigaHiggsPart.Higgses = ["H_10"] # H_10, H_20, H_30
GiGaPhysListModular("ModularPL").PhysicsConstructors += [ gigaHiggsPart ]#
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12113086
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
c9af53687ffb088e04769d9ad518028fef96b976
|
c8ae7695a26ec273a04e8043b7cf6fff8e1d6f71
|
/supervised_learning/0x11-attention/11-transformer.py
|
c85826c82e240f06cbaf00a0ad0414cdb76df3e6
|
[] |
no_license
|
xica369/holbertonschool-machine_learning
|
479e1c1675f8a256375bc65470233a261daf0039
|
4a7a8ff0c4f785656a395d0abf4f182ce1fef5bc
|
refs/heads/master
| 2020-12-22T00:04:16.702927
| 2020-10-08T21:32:44
| 2020-10-08T21:32:44
| 236,605,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
#!/usr/bin/env python3
"""
class Transformer
inherits from tensorflow.keras.Model
to create a transformer network
"""
import tensorflow as tf
Encoder = __import__('9-transformer_encoder').Encoder
Decoder = __import__('10-transformer_decoder').Decoder
class Transformer(tf.keras.Model):
"""
Transformer Network
"""
def __init__(self, N, dm, h, hidden, input_vocab, target_vocab,
max_seq_input, max_seq_target, drop_rate=0.1):
"""
Class constructor
- N: the number of blocks in the encoder and decoder
- dm: the dimensionality of the model
- h: the number of heads
- hidden: the number of hidden units in the fully connected layers
- input_vocab: the size of the input vocabulary
- target_vocab: the size of the target vocabulary
- max_seq_input: the maximum sequence length possible for the input
- max_seq_target: the maximum sequence length possible for the target
- drop_rate: the dropout rate
Public instance attributes:
- encoder: the encoder layer
- decoder: the decoder layer
- linear: a final Dense layer with target_vocab units
"""
super(Transformer, self).__init__()
self.encoder = Encoder(N, dm, h, hidden, input_vocab,
max_seq_input, drop_rate)
self.decoder = Decoder(N, dm, h, hidden, target_vocab,
max_seq_target, drop_rate)
self.linear = tf.keras.layers.Dense(target_vocab)
def call(self, inputs, target, training, encoder_mask, look_ahead_mask,
decoder_mask):
"""
- inputs: a tensor of shape (batch, input_seq_len, dm)
containing the inputs
- target: a tensor of shape (batch, target_seq_len, dm)
containing the target
- training: a boolean to determine if the model is training
- encoder_mask: the padding mask to be applied to the encoder
- look_ahead_mask: the look ahead mask to be applied to the decoder
- decoder_mask: the padding mask to be applied to the decoder
Returns:
a tensor of shape (batch, target_seq_len, target_vocab)
containing the transformer output
"""
# encoder_output.shape = (batch_size, inp_seq_len, d_model)
encoder_output = self.encoder(inputs, training, encoder_mask)
# decoder_output.shape = (batch_size, tar_seq_len, d_model)
decoder_output = self.decoder(target, encoder_output, training,
look_ahead_mask, decoder_mask)
output = self.linear(decoder_output)
return output
|
[
"761@holbertonschool.com"
] |
761@holbertonschool.com
|
93a584d292f25e782b1154a567808936ade4b16a
|
6237a1d1aa61be65f1a71bb645ec6d8689f24715
|
/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/utilities/lambdify.py
|
81ba6af14496dd3d20f877144929107ab2954d76
|
[] |
no_license
|
Tkizzy/PythonistaAppTemplate
|
cf8485dc8e9e45c65af624916d71b38e2e11ce56
|
f196581cfee4a1c10168892a20ddcf16270c91b0
|
refs/heads/master
| 2021-01-16T00:17:04.628848
| 2016-03-13T08:15:35
| 2016-03-13T08:15:35
| 52,062,119
| 1
| 0
| null | 2016-02-19T04:48:36
| 2016-02-19T04:48:35
| null |
UTF-8
|
Python
| false
| false
| 16,291
|
py
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
from sympy.external import import_module
from sympy.core.compatibility import exec_, is_sequence, iterable, string_types
import inspect
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"Abs": "fabs",
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"Abs": "abs",
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"Max": "amax",
"Min": "amin",
"oo": "inf",
"re": "real",
}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from sympy.mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
def lambdify(args, expr, modules=None, printer=None, use_imps=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted.
If you want to view the lambdified function or provide "sympy" as the
module, you should probably set dummify=False.
Usage
=====
(1) Use one of the provided modules:
>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>> import numpy
>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>> f = lambdify((x,y), tan(x*y), "numpy")
>> f(1, 2)
-2.18503986326
>> from numpy import array
>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use own dictionaries:
>> def my_cool_function(x): ...
>> dic = {"sin" : my_cool_function}
>> f = lambdify(x, sin(x), dic)
Now f would look like:
>> lambda x: my_cool_function(x)
Examples
========
>>> from sympy.utilities.lambdify import implemented_function, lambdify
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> f = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> f(1, 2)
Matrix([[1, 3]])
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=True)
return eval(lstr, namespace)
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
flatten = lambda *n: (e for a in n for e in
(flatten(*a) if iterable(a) else (a,)))
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
if isinstance(args, Function):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of sympy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
|
[
"olezorn@gmx.net"
] |
olezorn@gmx.net
|
a36d44be1afafe9974039f5346e460075beb65c3
|
adea9fc9697f5201f4cb215571025b0493e96b25
|
/napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes_/delay_metric/state/__init__.py
|
71a2f010109896f0136ecf7c9f1861f51899b598
|
[
"Apache-2.0"
] |
permissive
|
andyjsharp/napalm-yang
|
d8a8b51896ef7c6490f011fe265db46f63f54248
|
ef80ebbfb50e188f09486380c88b058db673c896
|
refs/heads/develop
| 2021-09-09T02:09:36.151629
| 2018-03-08T22:44:04
| 2018-03-08T22:44:04
| 114,273,455
| 0
| 0
| null | 2018-03-08T22:44:05
| 2017-12-14T16:33:35
|
Python
|
UTF-8
|
Python
| false
| false
| 19,242
|
py
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-internal-reachability/prefixes/prefixes/delay-metric/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of delay-metric.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__metric','__flags',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
self.__flags = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'ipv4-internal-reachability', u'prefixes', u'prefixes', u'delay-metric', u'state']
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with oc-isis-types:narrow-metric""",
'defined-type': "oc-isis-types:narrow-metric",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
YANG Description: ISIS Delay Metric Flags.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: ISIS Delay Metric Flags.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flags must be of a type compatible with isis-metric-flags""",
'defined-type': "openconfig-network-instance:isis-metric-flags",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)""",
})
self.__flags = t
if hasattr(self, '_set'):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
metric = __builtin__.property(_get_metric)
flags = __builtin__.property(_get_flags)
_pyangbind_elements = {'metric': metric, 'flags': flags, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-internal-reachability/prefixes/prefixes/delay-metric/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of delay-metric.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__metric','__flags',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
self.__flags = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'ipv4-internal-reachability', u'prefixes', u'prefixes', u'delay-metric', u'state']
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/metric (oc-isis-types:narrow-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: ISIS delay metric value. This metric measures the transit delay of
the associated circuit. It is an optional metric, which if assigned
to a circuit shall have a positive integral value. Higher values
indicate a longer transit delay.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with oc-isis-types:narrow-metric""",
'defined-type': "oc-isis-types:narrow-metric",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..63']}), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:narrow-metric', is_config=False)
def _get_flags(self):
"""
Getter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
YANG Description: ISIS Delay Metric Flags.
"""
return self.__flags
def _set_flags(self, v, load=False):
"""
Setter method for flags, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_internal_reachability/prefixes/prefixes/delay_metric/state/flags (isis-metric-flags)
If this variable is read-only (config: false) in the
source YANG file, then _set_flags is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flags() directly.
YANG Description: ISIS Delay Metric Flags.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flags must be of a type compatible with isis-metric-flags""",
'defined-type': "openconfig-network-instance:isis-metric-flags",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)""",
})
self.__flags = t
if hasattr(self, '_set'):
self._set()
def _unset_flags(self):
self.__flags = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INTERNAL': {}, u'UNSUPPORTED': {}},)), is_leaf=False, yang_name="flags", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='isis-metric-flags', is_config=False)
metric = __builtin__.property(_get_metric)
flags = __builtin__.property(_get_flags)
_pyangbind_elements = {'metric': metric, 'flags': flags, }
|
[
"dbarrosop@dravetech.com"
] |
dbarrosop@dravetech.com
|
8654792c1a49543b79c8d56cef1ace102e942f3b
|
802040662d6b0978480f8c72e0bd91c8c08201a9
|
/clindmri/registration/fsl/__init__.py
|
13241d900a67e2e7dafbb9da9ea434bb656fcd66
|
[
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
neurospin/caps-clindmri
|
a07fa214f5b6f7adf0f0f0e558830727bd7087ac
|
3105d2b1e4458c3be398391436be54bf59949a34
|
refs/heads/master
| 2022-06-16T19:05:51.125370
| 2016-03-30T08:28:14
| 2016-03-30T08:28:14
| 38,047,302
| 0
| 10
|
NOASSERTION
| 2022-05-19T10:16:34
| 2015-06-25T12:14:17
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
#! /usr/bin/env python
##########################################################################
# NSAP - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
from .flirt import flirt
|
[
"antoine.grigis@cea.fr"
] |
antoine.grigis@cea.fr
|
4fd25df95f1c71fd0fc0a613ae9326102b596302
|
3693e668644ca63ac02d442ab93a63c4ca2ac125
|
/webfiles/models.py
|
321ae90a6255639a09f393dbe89e410f10c038ea
|
[] |
no_license
|
akumulouisa/cs-final
|
32e4c0965c0464e3fafd90f1f4c8c3fad220c7e1
|
5dc2eee8c251ede97f14370ddfe0f7e9c5410314
|
refs/heads/main
| 2023-06-10T06:56:13.124862
| 2021-07-07T05:32:34
| 2021-07-07T05:32:34
| 382,359,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,132
|
py
|
import sqlalchemy
from webfiles import db, login_manager
from webfiles import bcrypt
from flask_login import UserMixin
from sqlalchemy.sql import exists
from sqlalchemy.ext.automap import automap_base
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id=db.Column(db.Integer(), primary_key=True)
username=db.Column(db.String(length=30), nullable=False, unique=True)
email = db.Column(db.String(length=50), nullable=False, unique=True)
password_hash = db.Column(db.String(length=60), nullable=False)
creditcardamount = db.Column(db.Integer(),default=100000)
contractaccountbalance = db.Column(db.Integer(),default=100000)
@property
def password(self):
return self.password
@password.setter
def password(self, plain_text_password):
self.password_hash = bcrypt.generate_password_hash(plain_text_password).decode('utf-8')#decode generated password
def check_password_correction(self,attempted_password):
return bcrypt.check_password_hash(self.password_hash,attempted_password)#returns true or false
def can_purchase(self,itemobj):
return self.creditcardamount>=itemobj.devamount
def can_purchasecontract(self,itemobjcontract):
return self.contractaccountbalance>=itemobjcontract.devamount
def can_purchaselaptop(self,itemobject):
return self.creditcardamount>=itemobject.lapamount
def can_purchasecontractlaptop(self,itemobjectcontract):
return self.contractaccountbalance>=itemobjectcontract.lapamount
def can_purchaseaudio(self,itemobjects):
return self.creditcardamount>=itemobjects.audioamount
def can_purchasecontractaudio(self,itemobjectscontract):
return self.contractaccountbalance>=itemobjectscontract.audioamount
class Employee(db.Model, UserMixin):
id= db.Column(db.Integer(), primary_key=True,nullable=False)
username=db.Column(db.Text(), nullable = False, unique=True)
email = db.Column(db.Text(), nullable = False, unique=True)
password_hash = db.Column(db.Text(), nullable = False)
employeecode = db.Column(db.Text(), nullable = False)
@property
def password(self):
return self.password
@password.setter
def password(self, plain_text_password):
self.password_hash = bcrypt.generate_password_hash(plain_text_password).decode('utf-8')#decode generated password
def check_password_correction(self,attempted_password):
return bcrypt.check_password_hash(self.password_hash,attempted_password)#returns true or false
Base = automap_base()
Base.prepare(db.engine, reflect = True)
Company = Base.classes.company
Model = Base.classes.model
class Devices(db.Model):
devid = db.Column(db.Integer(), primary_key=True)
class Smartphones(db.Model):
devid = db.Column(db.Integer(), db.ForeignKey('devices.devid'),primary_key=True)
devname = db.Column(db.Text(), nullable = False)
devcolor = db.Column(db.Text(), nullable = False)
devstorage = db.Column(db.Text(),nullable = False)
devamount = db.Column(db.Integer(), nullable = False)
owner = db.Column(db.Integer(), db.ForeignKey('user.id'))
def buy(self,user):
self.owner = user.id
user.creditcardamount -= self.devamount
db.session.commit()
def buycontract(self,user):
self.owner = user.id
user.contractaccountbalance -= self.devamount
db.session.commit()
class Laptops(db.Model):
devid = db.Column(db.Integer(), db.ForeignKey('devices.devid'),primary_key=True)
lapname = db.Column(db.Text(), nullable = False)
lapcolor = db.Column(db.Text(), nullable = False)
lapram = db.Column(db.Text(), nullable = False)
lapprocessor = db.Column(db.Text(), nullable = False)
lapstorage = db.Column(db.Text(),nullable = False)
lapamount = db.Column(db.Integer(), nullable = False)
lapowner = db.Column(db.Integer(), db.ForeignKey('user.id'))
def buylap(self,user):
self.lapowner = user.id
user.creditcardamount -= self.lapamount
db.session.commit()
def buylapcontract(self,user):
self.lapowner = user.id
user.contractaccountbalance -= self.lapamount
db.session.commit()
class Audio(db.Model):
devid = db.Column(db.Integer(), db.ForeignKey('devices.devid'),primary_key=True)
audioname = db.Column(db.Text(), nullable = False)
audiocolor = db.Column(db.Text(), nullable = False)
audioamount = db.Column(db.Integer(), nullable = False)
adowner = db.Column(db.Integer(), db.ForeignKey('user.id'))
def buyad(self,user):
self.adowner = user.id
user.creditcardamount -= self.audioamount
db.session.commit()
def buyadcontract(self,user):
self.adowner = user.id
user.contractaccountbalance -= self.audioamount
db.session.commit()
class Madeby(db.Model):
companyname = db.Column(db.Text(), db.ForeignKey('company.companyname'), primary_key=True, nullable = False)
modelname = db.Column(db.Text(), db.ForeignKey(' model.modelname'), primary_key=True, nullable = False)
class Deliverycompany(db.Model):
delid = db.Column(db.Integer(), primary_key=True,nullable = False)
deltrackno = db.Column(db.Integer(), primary_key=True,nullable = False)
delname = db.Column(db.Text(), nullable = False)
class Of(db.Model):
devid = db.Column(db.Integer(), db.ForeignKey('devices.devid'), primary_key=True, nullable = False)
modelname = db.Column(db.Text(), db.ForeignKey('model.modelname'), primary_key=True, nullable = False)
class Creditcard(db.Model):
creditcardid = db.Column(db.Integer(), primary_key=True, nullable = False)
creditcardnumber = db.Column(db.Integer(), unique=True , nullable = False)#like name of card
creditcardcode = db.Column(db.Integer(), nullable = False)#like password for card
creditcardservice = db.Column(db.Text(), nullable = False)
class Debitcard(db.Model):
debitid = db.Column(db.Integer(), primary_key=True, nullable = False)
creditcardid = db.Column(db.Integer(), db.ForeignKey('creditcard.creditcardid'),nullable = False)
regularuser = db.Column(db.Integer(), db.ForeignKey('user.id'), nullable = False)
datedebited = db.Column(db.Date(), nullable = False)
class Regularuser(db.Model):
regid = db.Column(db.Integer(), primary_key=True, nullable = False)
regularuser = db.Column(db.Integer(), db.ForeignKey('user.id'),nullable = False)
class Contractaccount(db.Model):
contractaccountid = db.Column(db.Integer(), primary_key=True, nullable = False)
password_hash = db.Column(db.Text(), nullable = False)
datecreated = db.Column(db.Date(), nullable = False)
names = db.Column(db.Text(), unique=True , nullable = False)
dateend = db.Column(db.Date(), nullable = False)
@property
def password(self):
return self.password
@password.setter
def password(self, plain_text_password):
self.password_hash = bcrypt.generate_password_hash(plain_text_password).decode('utf-8')#decode generated password
def check_password_correction(self,attempted_password1):
return bcrypt.check_password_hash(self.password_hash,attempted_password1)
class Contractuser(db.Model):
contractno = db.Column(db.Integer(), primary_key=True, nullable = False)
contractuserid = db.Column(db.Integer(), db.ForeignKey('user.id'),nullable = False)
class Bill(db.Model):
billid = db.Column(db.Integer(), primary_key=True, nullable = False)
contractaccountid = db.Column(db.Integer(), db.ForeignKey('contractaccount.contractaccountid'), nullable = False)
contractuser = db.Column(db.Integer(), db.ForeignKey('user.id'), nullable = False)
datebilled = db.Column(db.Date(), nullable = False)
class Purchase(db.Model):
purchaseid = db.Column(db.Integer(), primary_key=True, nullable = False,autoincrement = True)
purchaseamount = db.Column(db.Integer(), nullable = False)
class Recordedin(db.Model):
devid = db.Column(db.Integer(), db.ForeignKey('devices.devid'), primary_key=True, nullable = False)
purchaseid = db.Column(db.Integer(), db.ForeignKey('purchase.purchaseid'), primary_key=True, nullable = False)
class Doneby(db.Model):
purchaseid = db.Column(db.Integer(), db.ForeignKey('purchase.purchaseid'), primary_key=True, nullable = False)
userid = db.Column(db.Integer(), db.ForeignKey('user.id'), primary_key=True, nullable = False)
class Sentto(db.Model):
purchaseid = db.Column(db.Integer(), db.ForeignKey('purchase.purchaseid'), primary_key=True, nullable = False)
delid = db.Column(db.Integer(), db.ForeignKey('deliverycompany.delid'), primary_key=True, nullable = False)
class Hevadaelectronics(db.Model):
deltrackno = db.Column(db.Integer(), nullable = False)
orderuserid = db.Column(db.Integer(), nullable = False)
orderid = db.Column(db.Integer(),primary_key=True, nullable = False)
orderdate = db.Column(db.Date(), nullable = False)
devid = db.Column(db.Integer(),db.ForeignKey('devices.devid'), nullable = False)
class Store(db.Model):
orderid = db.Column(db.Integer(), db.ForeignKey('hevadaelectronics.orderid'), primary_key=True, nullable = False)
deltrackno = db.Column(db.Integer(), db.ForeignKey('deliverycompany.deltrackno'),nullable = False)
class With(db.Model):
userid = db.Column(db.Integer(), db.ForeignKey('user.id'), nullable = False)
deliverdetailid = db.Column(db.Integer(), db.ForeignKey('userdeliverydetails.deliverdetailid'), primary_key=True, nullable = False)
class Userdeliverydetails(db.Model):
userdeliverid = db.Column(db.Integer(),nullable = False)
useraddress = db.Column(db.Text(), nullable = False)
deliverdetailid = db.Column(db.Integer(),primary_key=True, nullable = False)
|
[
"admin@Admins-MacBook-Pro.local"
] |
admin@Admins-MacBook-Pro.local
|
246bbf69992559ed5836a1bd059223841ff94817
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02801/s643704920.py
|
e6c533210a3087a40548c3ca4a17dec387ba724e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
C=input()
A=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
for i in range(len(A)-1):
if A[i]==C:
print(A[i+1])
break
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5abd60fd92bb98ae630bdbd52647696582f27caa
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_091/ch39_2020_10_07_03_48_28_137004.py
|
0d991cb66ed650d17f8e64e0156902c94b3b9ed1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
d = 1
termos = 1
maior = 1
resultado = 1
while d < 1000:
a = d
termos = 1
while a != 1:
if a % 2 == 0:
a = a/2
termos += 1
else:
a = 3*a + 1
termos += 1
if a == 1:
if termos > maior:
maior = termos
resultado = d
d += 1
else:
d += 1
print(resultado)
|
[
"you@example.com"
] |
you@example.com
|
35d2a07f62d4095ba2f43918c7ac2da2ecf3d934
|
37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7
|
/035.py
|
33832baca3cd3737190cce0c0e3ffe86590269e5
|
[] |
no_license
|
Jane11111/Leetcode2021
|
d9f4987792938597bf89ff72ba6bbcb4a3f9d081
|
a95b871578aae0103066962c33b8c0f4ec22d0f2
|
refs/heads/master
| 2023-07-14T21:29:41.196752
| 2021-08-23T03:28:02
| 2021-08-23T03:28:02
| 344,804,297
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021-03-02 13:43
# @Author : zxl
# @FileName: 035.py
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
i = 0
while i < len(nums) and nums[i]<target:
i += 1
return i
|
[
"791057615@qq.com"
] |
791057615@qq.com
|
bd1567cacdd578097bce86eceb5a80609d8254db
|
5db44def243996321c33a9961de82b9d6f6aafd3
|
/rkmt/engines/converter.py
|
5698f65f1f53a058bb3f40ecb821ec6f3f2fe508
|
[
"MIT"
] |
permissive
|
BokyLiu/rknn-model-tools
|
fa010b17b0a1f35fdee5f29d47cb6bbceffd3bdd
|
8af9c062ea4955a76ba9986a6cab6f771c9e678a
|
refs/heads/master
| 2022-04-09T12:49:29.417800
| 2020-02-25T13:48:38
| 2020-02-25T13:48:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
#!/usr/bin/env python3
import os
import sys
import shutil
from rknn.api import RKNN
from rkmt.engines.base import BaseEngine
from rkmt.utils.util import check_success
class Converter(BaseEngine):
def __init__(self, opt) -> None:
super().__init__(opt)
# Create RKNN object
self.rknn = RKNN(opt.verbose)
def convert(self) -> None:
"""Convert models form other platforms into RKNN format."""
opt = self.opt
# Config model
print('--> Configuring model')
self.rknn.config(channel_mean_value=opt.channel_mean_value,
reorder_channel=opt.reorder_channel)
print('done')
# Load model
print('--> Loading model...')
if opt.platform == 'tensorflow':
ret = self.rknn.load_tensorflow(
tf_pb=opt.model_file_path,
inputs=opt.inputs,
outputs=opt.outputs,
input_size_list=opt.input_size_list)
elif opt.platform == 'tflite':
ret = self.rknn.load_tflite(model=opt.model_file_path)
elif opt.platform == 'caffe':
ret = self.rknn.load_caffe(model=opt.graph_file_path,
proto='caffe',
blobs=opt.model_file_path)
elif opt.platform == 'onnx':
ret = self.rknn.load_onnx(model=opt.model_file_path)
elif opt.platform == 'darknet':
ret = self.rknn.load_darknet(model=opt.graph_file_path,
weight=opt.model_file_path)
elif opt.platform == 'pytorch':
ret = self.rknn.load_pytorch(model=opt.model_file_path,
input_size_list=opt.input_size_list)
elif opt.platform == 'mxnet':
ret = self.rknn.load_mxnet(symbol=opt.graph_file_path,
params=opt.model_file_path,
input_size_list=opt.input_size_list)
else:
raise RuntimeError('Unsupported platform: {} !'.format(
opt.platform))
check_success(ret, 'load model failed.')
print('done')
# Build model
print('--> Building model')
ret = self.rknn.build(do_quantization=not opt.no_quantization,
pre_compile=not opt.no_pre_compile,
dataset=opt.dataset_file_path)
check_success(ret, 'build model failed.')
print('done')
# Analyse model
if not opt.no_quantization and opt.analyse_accuracy:
print('--> Analyse model')
analysis_results_dir = '/tmp/accuracy_analysis/{}'.format(opt.name)
if os.path.exists(analysis_results_dir):
shutil.rmtree(analysis_results_dir)
os.makedirs(analysis_results_dir, exist_ok=True)
ret = self.rknn.accuracy_analysis(
inputs=opt.dataset_for_analysis_file_path
or opt.dataset_file_path,
output_dir=analysis_results_dir,
calc_qnt_error=True)
check_success(ret, 'analyse model failed.')
print('done')
# Export RKNN model
print('--> Export RKNN model')
ret = self.rknn.export_rknn(opt.output_path)
check_success(ret, 'export model failed.')
print('done')
if __name__ == '__main__':
model_path = sys.argv[1]
out_path = sys.argv[2]
pre_compile = sys.argv[3] in ['true', '1', 'True']
convert_model(model_path, out_path, pre_compile)
|
[
"xxdsox@gmail.com"
] |
xxdsox@gmail.com
|
a0fd9f8124403e36d3014d05f4728d5c9eb92625
|
4a31bfe6ebbf6d474b0c05ae4db55183acee2c25
|
/run/gram_ctc/cnn/test.py
|
421536d06b4b2cbf61ea021689cb836af1aa5f35
|
[] |
no_license
|
musyoku/chainer-speech-recognition
|
3c1a939d259abf6ff41faff7a81d109b93407e7a
|
de83fc497ec3f629ff43431ef863d45e8a9cdf68
|
refs/heads/master
| 2021-01-21T19:12:34.873720
| 2017-09-25T07:49:39
| 2017-09-25T07:49:39
| 92,125,978
| 11
| 1
| null | 2017-06-28T07:00:14
| 2017-05-23T03:40:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,922
|
py
|
# coding: utf8
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import sys, argparse, time, cupy, math, os
import chainer
import numpy as np
import chainer.functions as F
from chainer import optimizers, cuda, serializers
sys.path.append("../../")
import config
from error import compute_minibatch_error
from dataset import wav_path_test, trn_path_test, cache_path, get_vocab, AugmentationOption, TestMinibatchIterator
from model import load_model
from util import stdout, print_bold
def main():
# データの読み込み
vocab, vocab_inv, BLANK = get_vocab()
vocab_size = len(vocab)
# ミニバッチを取れないものは除外
# GTX 1080 1台基準
batchsizes = [96, 64, 64, 64, 64, 64, 64, 64, 48, 48, 48, 32, 32, 24, 24, 24, 24, 24, 24, 24, 24, 24]
augmentation = AugmentationOption()
if args.augmentation:
augmentation.change_vocal_tract = True
augmentation.change_speech_rate = True
augmentation.add_noise = True
model = load_model(args.model_dir)
assert model is not None
if args.gpu_device >= 0:
chainer.cuda.get_device(args.gpu_device).use()
model.to_gpu(args.gpu_device)
xp = model.xp
# テスト
with chainer.using_config("train", False):
iterator = TestMinibatchIterator(wav_path_test, trn_path_test, cache_path, batchsizes, BLANK, buckets_limit=args.buckets_limit, option=augmentation, gpu=args.gpu_device >= 0)
buckets_errors = []
for batch in iterator:
x_batch, x_length_batch, t_batch, t_length_batch, bucket_idx, progress = batch
if args.filter_bucket_id and bucket_idx != args.filter_bucket_id:
continue
sys.stdout.write("\r" + stdout.CLEAR)
sys.stdout.write("computing CER of bucket {} ({} %)".format(bucket_idx + 1, int(progress * 100)))
sys.stdout.flush()
y_batch = model(x_batch, split_into_variables=False)
y_batch = xp.argmax(y_batch.data, axis=2)
error = compute_minibatch_error(y_batch, t_batch, BLANK, print_sequences=True, vocab=vocab_inv)
while bucket_idx >= len(buckets_errors):
buckets_errors.append([])
buckets_errors[bucket_idx].append(error)
avg_errors = []
for errors in buckets_errors:
avg_errors.append(sum(errors) / len(errors))
sys.stdout.write("\r" + stdout.CLEAR)
sys.stdout.flush()
print_bold("bucket CER")
for bucket_idx, error in enumerate(avg_errors):
print("{} {}".format(bucket_idx + 1, error * 100))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--model-dir", "-m", type=str, default="model")
parser.add_argument("--buckets-limit", type=int, default=None)
parser.add_argument("--filter-bucket-id", type=int, default=None)
parser.add_argument("--seed", "-seed", type=int, default=0)
parser.add_argument("--augmentation", "-augmentation", default=False, action="store_true")
args = parser.parse_args()
main()
|
[
"musyoku@users.noreply.github.com"
] |
musyoku@users.noreply.github.com
|
32a051a44ceb309b3121ec4546c25eb2f786ead4
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/test/test_manhwa.py
|
a0e51954cc4efc701f2fe99e296fb3d938a402b6
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024
| 2020-05-12T23:22:54
| 2020-05-12T23:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,197
|
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import dbpedia
from dbpedia.models.manhwa import Manhwa # noqa: E501
from dbpedia.rest import ApiException
class TestManhwa(unittest.TestCase):
"""Manhwa unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Manhwa
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = dbpedia.models.manhwa.Manhwa() # noqa: E501
if include_optional :
return Manhwa(
previous_work = [
None
],
coden = [
'0'
],
translator = [
None
],
alternative_title = [
'0'
],
description = [
'0'
],
subsequent_work = [
None
],
chief_editor = [
None
],
music_composer = [
None
],
last_publication_date = [
'0'
],
type = [
'0'
],
lcc = [
'0'
],
lccn = [
'0'
],
main_character = [
None
],
id = '0',
literary_genre = [
None
],
based_on = [
None
],
first_publisher = [
None
],
first_publication_date = [
'0'
],
film_version = [
None
],
release_date = [
'0'
],
number_of_volumes = [
56
],
composer = [
None
],
author = [
None
],
preface_by = [
None
],
runtime = [
None
],
production_company = [
None
],
label = [
'0'
],
original_language = [
None
],
license = [
None
],
subject_term = [
'0'
],
original_title = [
'0'
],
circulation = [
56
],
oclc = [
'0'
],
producer = [
None
],
starring = [
None
],
completion_date = [
'0'
],
writer = [
None
],
magazine = [
None
]
)
else :
return Manhwa(
)
def testManhwa(self):
"""Test Manhwa"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
6bbd64d5e2f22efb216a8983e5526f60a4225e0c
|
6c05654182024f5e4590d5d5b428a4e9016e27b4
|
/InvenTree/build/test_build.py
|
b560a4f9c9b12d38b7a70f5a3a2ca8086c5109c0
|
[
"MIT"
] |
permissive
|
amishHammer/InvenTree
|
13fc53dd002dfbbbf14c616308966321e045594e
|
fc54a9d195d0ffa2308b9333e6fc376adde753d7
|
refs/heads/master
| 2021-07-08T00:17:37.316432
| 2020-11-06T14:51:25
| 2020-11-06T14:51:25
| 212,488,242
| 0
| 0
|
MIT
| 2020-10-27T17:48:43
| 2019-10-03T03:13:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 9,437
|
py
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from build.models import Build, BuildItem
from stock.models import StockItem
from part.models import Part, BomItem
from InvenTree import status_codes as status
class BuildTest(TestCase):
"""
Run some tests to ensure that the Build model is working properly.
"""
def setUp(self):
"""
Initialize data to use for these tests.
"""
# Create a base "Part"
self.assembly = Part.objects.create(
name="An assembled part",
description="Why does it matter what my description is?",
assembly=True,
trackable=True,
)
self.sub_part_1 = Part.objects.create(
name="Widget A",
description="A widget",
component=True
)
self.sub_part_2 = Part.objects.create(
name="Widget B",
description="A widget",
component=True
)
# Create BOM item links for the parts
BomItem.objects.create(
part=self.assembly,
sub_part=self.sub_part_1,
quantity=10
)
BomItem.objects.create(
part=self.assembly,
sub_part=self.sub_part_2,
quantity=25
)
# Create a "Build" object to make 10x objects
self.build = Build.objects.create(
title="This is a build",
part=self.assembly,
quantity=10
)
# Create some build output (StockItem) objects
self.output_1 = StockItem.objects.create(
part=self.assembly,
quantity=5,
is_building=True,
build=self.build
)
self.output_2 = StockItem.objects.create(
part=self.assembly,
quantity=5,
is_building=True,
build=self.build,
)
# Create some stock items to assign to the build
self.stock_1_1 = StockItem.objects.create(part=self.sub_part_1, quantity=1000)
self.stock_1_2 = StockItem.objects.create(part=self.sub_part_1, quantity=100)
self.stock_2_1 = StockItem.objects.create(part=self.sub_part_2, quantity=5000)
def test_init(self):
# Perform some basic tests before we start the ball rolling
self.assertEqual(StockItem.objects.count(), 5)
# Build is PENDING
self.assertEqual(self.build.status, status.BuildStatus.PENDING)
# Build has two build outputs
self.assertEqual(self.build.output_count, 2)
# None of the build outputs have been completed
for output in self.build.get_build_outputs().all():
self.assertFalse(self.build.isFullyAllocated(output))
self.assertFalse(self.build.isPartFullyAllocated(self.sub_part_1, self.output_1))
self.assertFalse(self.build.isPartFullyAllocated(self.sub_part_2, self.output_2))
self.assertEqual(self.build.unallocatedQuantity(self.sub_part_1, self.output_1), 50)
self.assertEqual(self.build.unallocatedQuantity(self.sub_part_1, self.output_2), 50)
self.assertEqual(self.build.unallocatedQuantity(self.sub_part_2, self.output_1), 125)
self.assertEqual(self.build.unallocatedQuantity(self.sub_part_2, self.output_2), 125)
self.assertFalse(self.build.is_complete)
def test_build_item_clean(self):
# Ensure that dodgy BuildItem objects cannot be created
stock = StockItem.objects.create(part=self.assembly, quantity=99)
# Create a BuiltItem which points to an invalid StockItem
b = BuildItem(stock_item=stock, build=self.build, quantity=10)
with self.assertRaises(ValidationError):
b.save()
# Create a BuildItem which has too much stock assigned
b = BuildItem(stock_item=self.stock_1_1, build=self.build, quantity=9999999)
with self.assertRaises(ValidationError):
b.clean()
# Negative stock? Not on my watch!
b = BuildItem(stock_item=self.stock_1_1, build=self.build, quantity=-99)
with self.assertRaises(ValidationError):
b.clean()
# Ok, what about we make one that does *not* fail?
b = BuildItem(stock_item=self.stock_1_1, build=self.build, install_into=self.output_1, quantity=10)
b.save()
def test_duplicate_bom_line(self):
# Try to add a duplicate BOM item - it should fail!
with self.assertRaises(IntegrityError):
BomItem.objects.create(
part=self.assembly,
sub_part=self.sub_part_1,
quantity=99
)
def allocate_stock(self, q11, q12, q21, output):
# Assign stock to this build
if q11 > 0:
BuildItem.objects.create(
build=self.build,
stock_item=self.stock_1_1,
quantity=q11,
install_into=output
)
if q12 > 0:
BuildItem.objects.create(
build=self.build,
stock_item=self.stock_1_2,
quantity=q12,
install_into=output
)
if q21 > 0:
BuildItem.objects.create(
build=self.build,
stock_item=self.stock_2_1,
quantity=q21,
install_into=output,
)
# Attempt to create another identical BuildItem
b = BuildItem(
build=self.build,
stock_item=self.stock_2_1,
quantity=q21
)
with self.assertRaises(ValidationError):
b.clean()
def test_partial_allocation(self):
"""
Partially allocate against output 1
"""
self.allocate_stock(50, 50, 200, self.output_1)
self.assertTrue(self.build.isFullyAllocated(self.output_1))
self.assertFalse(self.build.isFullyAllocated(self.output_2))
self.assertTrue(self.build.isPartFullyAllocated(self.sub_part_1, self.output_1))
self.assertTrue(self.build.isPartFullyAllocated(self.sub_part_2, self.output_1))
self.assertFalse(self.build.isPartFullyAllocated(self.sub_part_1, self.output_2))
self.assertFalse(self.build.isPartFullyAllocated(self.sub_part_2, self.output_2))
# Check that the part has been allocated
self.assertEqual(self.build.allocatedQuantity(self.sub_part_1, self.output_1), 100)
self.build.unallocateStock(output=self.output_1)
self.assertEqual(BuildItem.objects.count(), 0)
# Check that the part has been unallocated
self.assertEqual(self.build.allocatedQuantity(self.sub_part_1, self.output_1), 0)
def test_auto_allocate(self):
"""
Test auto-allocation functionality against the build outputs
"""
allocations = self.build.getAutoAllocations(self.output_1)
self.assertEqual(len(allocations), 1)
self.build.autoAllocate(self.output_1)
self.assertEqual(BuildItem.objects.count(), 1)
# Check that one part has been fully allocated to the build output
self.assertTrue(self.build.isPartFullyAllocated(self.sub_part_2, self.output_1))
# But, the *other* build output has not been allocated against
self.assertFalse(self.build.isPartFullyAllocated(self.sub_part_2, self.output_2))
def test_cancel(self):
"""
Test cancellation of the build
"""
# TODO
"""
self.allocate_stock(50, 50, 200, self.output_1)
self.build.cancelBuild(None)
self.assertEqual(BuildItem.objects.count(), 0)
"""
pass
def test_complete(self):
"""
Test completion of a build output
"""
self.allocate_stock(50, 50, 250, self.output_1)
self.allocate_stock(50, 50, 250, self.output_2)
self.assertTrue(self.build.isFullyAllocated(self.output_1))
self.assertTrue(self.build.isFullyAllocated(self.output_2))
self.build.completeBuildOutput(self.output_1, None)
self.assertFalse(self.build.can_complete)
self.build.completeBuildOutput(self.output_2, None)
self.assertTrue(self.build.can_complete)
self.build.complete_build(None)
self.assertEqual(self.build.status, status.BuildStatus.COMPLETE)
# the original BuildItem objects should have been deleted!
self.assertEqual(BuildItem.objects.count(), 0)
# New stock items should have been created!
self.assertEqual(StockItem.objects.count(), 4)
A = StockItem.objects.get(pk=self.stock_1_1.pk)
# This stock item has been depleted!
with self.assertRaises(StockItem.DoesNotExist):
StockItem.objects.get(pk=self.stock_1_2.pk)
C = StockItem.objects.get(pk=self.stock_2_1.pk)
# Stock should have been subtracted from the original items
self.assertEqual(A.quantity, 900)
self.assertEqual(C.quantity, 4500)
# And 10 new stock items created for the build output
outputs = StockItem.objects.filter(build=self.build)
self.assertEqual(outputs.count(), 2)
for output in outputs:
self.assertFalse(output.is_building)
|
[
"oliver.henry.walters@gmail.com"
] |
oliver.henry.walters@gmail.com
|
6256896c95eb5d80d7e6e914a3902189aa23c3a6
|
dfab6798ece135946aebb08f93f162c37dd51791
|
/core/aokuang/aokuang/core/actors/htmldocument/Basic.py
|
7a2947db8a0a531affbd985cdd932ba6a3fffc75
|
[] |
no_license
|
yxqd/luban
|
405f5f7dcf09015d214079fe7e23d644332be069
|
00f699d15c572c8bf160516d582fa37f84ac2023
|
refs/heads/master
| 2020-03-20T23:08:45.153471
| 2012-05-18T14:52:43
| 2012-05-18T14:52:43
| 137,831,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import luban
from ....DemoPanelActor import Actor as base
class Actor(base):
title='A html document'
description = [
]
def createDemoPanel(self, **kwds):
text = '''
<h1>Title here</h1>
<p>
Some more items
</p>
<ul>
<li> a </li>
<li> b </li>
</ul>
<p>a paragraph with a <a href="http://a.b.com" target="_blank">link</a> </p>
<p>©</p>
'''
return luban.e.htmldocument(text=text)
# End of file
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
e05684095f5f0f3229aa98005b7dd348d30b3d27
|
fdef3562ae7ef1d4bccdfe8f44173308ca40476a
|
/user/apps.py
|
54252ad03bba63e4c93b8b31cc8588abdfaf2f96
|
[
"MIT"
] |
permissive
|
leeyoshinari/TestPlatform
|
72abb5cbc5c081b940b1754319c18120f8147d49
|
07011d22d0b525abb1dfc0667d3af4018de3e4c8
|
refs/heads/master
| 2020-11-26T12:16:05.548741
| 2020-10-18T15:26:42
| 2020-10-18T15:26:42
| 229,067,589
| 3
| 2
|
MIT
| 2020-10-18T15:26:43
| 2019-12-19T14:08:03
|
Python
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
from django.apps import AppConfig
class UserConfig(AppConfig):
name = 'user'
|
[
"lee_jc@outlook.com"
] |
lee_jc@outlook.com
|
4935ff00986227c99934d3901139ca001f13bea9
|
f0157f79b147d3ecd9c5aba32c4dc6c6ac48e43b
|
/emop/emop_run.py
|
1bbf3d2793fd4b17ec8bc4f6509a2ae91dec1f3a
|
[] |
no_license
|
idhmc-tamu/emop-controller
|
5bdea6454da588f9b46161cd925e41f8059230ed
|
b5e299d92afa4ef187a5a80f0970de8b849ce6d5
|
refs/heads/master
| 2021-01-19T06:05:55.359480
| 2015-03-27T02:52:59
| 2015-03-27T02:52:59
| 28,823,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,669
|
py
|
import json
import logging
import signal
import sys
from emop.lib.emop_base import EmopBase
from emop.lib.emop_payload import EmopPayload
from emop.lib.emop_job import EmopJob
from emop.lib.emop_scheduler import EmopScheduler
from emop.lib.processes.tesseract import Tesseract
from emop.lib.processes.xml_to_text import XML_To_Text
from emop.lib.processes.denoise import Denoise
from emop.lib.processes.multi_column_skew import MultiColumnSkew
from emop.lib.processes.page_evaluator import PageEvaluator
from emop.lib.processes.page_corrector import PageCorrector
from emop.lib.processes.juxta_compare import JuxtaCompare
from emop.lib.processes.retas_compare import RetasCompare
logger = logging.getLogger('emop')
job_ids = []
instance = None
def signal_exit(signum, frame):
"""Signal handler
This function will mark all non-completed jobs as failed
and exit. This is intended to catch SIGUSR1 signals that indicate
a job is nearing its time limit.
"""
for job_id in job_ids:
if job_id not in instance.jobs_completed:
results = "%s JOB %s: time limit reached" % (instance.scheduler.name, instance.scheduler.job_id)
logger.error(results)
instance.jobs_failed.append({"id": job_id, "results": results})
current_results = instance.get_results()
instance.payload.save_output(data=current_results, overwrite=True)
sys.exit(1)
class EmopRun(EmopBase):
def __init__(self, config_path, proc_id):
""" Initialize EmopRun object and attributes
Args:
config_path (str): path to application config file
proc_id (str or int): proc-id of this run
"""
super(self.__class__, self).__init__(config_path)
self.proc_id = proc_id
self.payload = EmopPayload(self.settings, proc_id)
self.scheduler = EmopScheduler.get_scheduler_instance(name=self.settings.scheduler, settings=self.settings)
self.results = {}
self.jobs_completed = []
self.jobs_failed = []
self.page_results = []
self.postproc_results = []
def append_result(self, job, results, failed=False):
"""Append a page's results to job's results payload
The results are saved to the output JSON file so that the status
of each page is saved upon failure or success.
Args:
job (EmopJob): EmopJob object
results (str): The error output of a particular process
failed (bool, optional): Sets if the result is a failure
"""
if failed:
results_ext = "%s JOB %s: %s" % (self.scheduler.name, self.scheduler.job_id, results)
logger.error(results_ext)
self.jobs_failed.append({"id": job.id, "results": results_ext})
else:
self.jobs_completed.append(job.id)
# TODO: Do we need to handle adding page_results and postproc_results differently??
if job.page_result.has_data():
self.page_results.append(job.page_result.to_dict())
if job.postproc_result.has_data():
self.postproc_results.append(job.postproc_result.to_dict())
current_results = self.get_results()
self.payload.save_output(data=current_results, overwrite=True)
def get_results(self):
"""Get this object's results
Returns:
dict: Results to be used as payload to API
"""
job_queues_data = {
"completed": self.jobs_completed,
"failed": self.jobs_failed,
}
data = {
"job_queues": job_queues_data,
"page_results": self.page_results,
"postproc_results": self.postproc_results,
}
return data
@EmopBase.run_timing
def do_process(self, obj, job, **kwargs):
""" Run a process
This function is intended to handle calling and getting the
success or failure of a job's post process.
If a process does not return an exitcode of 0 then a failure has occurred
and the stderr is added to the job's results.
Args:
obj (object): The class of a process
job (EmopJob): EmopJob object
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
"""
klass = obj.__class__.__name__
if self.settings.controller_skip_existing and not obj.should_run():
logger.info("Skipping %s job [%s]" % (klass, job.id))
return True
result = obj.run(**kwargs)
if result.exitcode != 0:
err = "%s Failed: %s" % (klass, result.stderr)
# TODO need to rework so failed doesn't mean done
self.append_result(job=job, results=err, failed=True)
return False
else:
return True
@EmopBase.run_timing
def do_ocr(self, job):
"""Run the OCR
The actual OCR class is called from here. Based on the value
of the ocr_engine, a different class will be called.
The ocr_results returned by the OCR class are used to determine if
the ocr was successful and the results are appended to global results.
Args:
job (EmopJob): EmopJob object
Returns:
bool: True if successful, False otherwise.
"""
logger.info(
"Got job [%s] - Batch: %s JobType: %s OCR Engine: %s" %
(job.id, job.batch_job.name, job.batch_job.job_type, job.batch_job.ocr_engine)
)
# OCR #
ocr_engine = job.batch_job.ocr_engine
if ocr_engine == "tesseract":
ocr = Tesseract(job=job)
else:
ocr_engine_err = "OCR with %s not yet supported" % ocr_engine
self.append_result(job=job, results=ocr_engine_err, failed=True)
return False
if self.settings.controller_skip_existing and not ocr.should_run():
logger.info("Skipping OCR job [%s]" % job.id)
return True
ocr_result = ocr.run()
if ocr_result.exitcode != 0:
ocr_err = "%s OCR Failed: %s" % (ocr_engine, ocr_result.stderr)
self.append_result(job=job, results=ocr_err, failed=True)
return False
else:
return True
def do_postprocesses(self, job):
"""Run the post processes
Each post process class is called from here.
Currently the steps are executed in the following order:
* Denoise
* MultiColumnSkew
* XML_To_Text
* PageEvaluator
* PageCorrector
* JuxtaCompare (postprocess)
* JuxtaCompare - COMMENTED OUT
* RetasCompare (postprocess)
* RetasCompare - COMMENTED OUT
If any step fails, the function terminates and returns False.
Args:
job (EmopJob): EmopJob object
Returns:
bool: True if successful, False otherwise.
"""
# DeNoise #
denoise = Denoise(job=job)
denoise_proc = self.do_process(obj=denoise, job=job)
if not denoise_proc:
return False
# MultiColumnSkew #
if self.settings.multi_column_skew_enabled:
multi_column_skew = MultiColumnSkew(job=job)
multi_column_skew_proc = self.do_process(obj=multi_column_skew, job=job)
if not multi_column_skew_proc:
return False
# _IDHMC.xml to _IDHMC.txt #
xml_to_text = XML_To_Text(job=job)
xml_to_text_proc = self.do_process(obj=xml_to_text, job=job)
if not xml_to_text_proc:
return False
# PageEvaluator #
page_evaluator = PageEvaluator(job=job)
page_evaluator_proc = self.do_process(obj=page_evaluator, job=job)
if not page_evaluator_proc:
return False
# PageCorrector #
page_corrector = PageCorrector(job=job)
page_corrector_proc = self.do_process(obj=page_corrector, job=job)
if not page_corrector_proc:
return False
# JuxtaCompare postprocess and OCR output #
juxta_compare = JuxtaCompare(job=job)
juxta_compare_proc_pp = self.do_process(obj=juxta_compare, job=job, postproc=True)
if not juxta_compare_proc_pp:
return False
# juxta_compare_proc = self.do_process(obj=juxta_compare, job=job, postproc=False)
# if not juxta_compare_proc:
# return False
# RetasCompare postprocess and OCR output #
# retas_compare = RetasCompare(job=job)
# retas_compare_proc_pp = self.do_process(obj=retas_compare, job=job, postproc=True)
# if not retas_compare_proc_pp:
# return False
# retas_compare_proc = self.do_process(obj=retas_compare, job=job, postproc=False)
# if not retas_compare_proc:
# return False
return True
@EmopBase.run_timing
def do_job(self, job):
"""Execute the parts of a page's job
Args:
job (EmopJob): EmopJob object
Returns:
bool: True if successful, False otherwise.
"""
if not self.do_ocr(job=job):
return False
if not self.do_postprocesses(job=job):
return False
return True
@EmopBase.run_timing
def run(self, force=False):
"""Run the EmopJob
This function is intended to be what's called by external scripts
like emop.py to start all work.
Based on the payload's data, all pages are iterated over from here.
Once the loop of all jobs is complete the final results are saved
to a file as completed payload
Args:
force (bool): Run even if output file exists.
Returns:
bool: True if successful, False otherwise.
"""
global instance
global job_ids
data = self.payload.load_input()
logger.debug("Payload: \n%s" % json.dumps(data, sort_keys=True, indent=4))
if not data:
logger.error("No payload data to load.")
return False
if not force:
if self.payload.output_exists():
logger.error("Output file %s already exists." % self.payload.output_filename)
return False
if self.payload.completed_output_exists():
logger.error("Output file %s already exists." % self.payload.completed_output_filename)
return False
# Assign global variables and respond to signals
for job in data:
job_ids.append(job["id"])
instance = self
signal.signal(signal.SIGUSR1, signal_exit)
# Loop over jobs to perform actual work
for job in data:
emop_job = EmopJob(job_data=job, settings=self.settings, scheduler=self.scheduler)
if emop_job.batch_job.job_type == "ocr":
job_succcessful = self.do_job(job=emop_job)
if not job_succcessful:
continue
# Append successful completion of page #
self.append_result(job=emop_job, results=None, failed=False)
# TODO
# elif batch_job.job_type == "ground truth compare":
else:
logger.error("JobType of %s is not yet supported." % emop_job.batch_job.job_type)
return False
logger.debug("Payload: \n%s" % json.dumps(self.get_results(), sort_keys=True, indent=4))
self.payload.save_completed_output(data=self.get_results(), overwrite=force)
return True
|
[
"treydock@tamu.edu"
] |
treydock@tamu.edu
|
ad4355bf766f6babe97522e5183c3ee460733155
|
ef0d8fd55fbdb526e20d6c2b05e601f1d86587c5
|
/frappe/core/doctype/page/test_page.py
|
9ad215c1df1a7c2bb10a3b584693420811e33145
|
[
"MIT"
] |
permissive
|
indictranstech/v4_frappe
|
8976e84c14346196b8895ad6274740dca7fd6504
|
dba708c8aa83f503b9f4a264850307111a2b5f19
|
refs/heads/master
| 2021-09-26T12:26:29.994294
| 2018-10-30T06:09:36
| 2018-10-30T06:09:36
| 103,262,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Page')
class TestPage(unittest.TestCase):
pass
|
[
"sagarshiragawakar@gmail.com"
] |
sagarshiragawakar@gmail.com
|
2add2a167b6f6ecc71809a1c88703d1a559ddd69
|
8f15e2170d08e61b4ac70f75ab755967b0009338
|
/mason/util/exception.py
|
2770ab632ea621e7b2718484c56eb27af1ca7de3
|
[
"Apache-2.0"
] |
permissive
|
malave/mason
|
eb86d60b96b16b6e49482097474c05c9805b5f24
|
bf45672124ef841bc16216c293034f4ccc506621
|
refs/heads/master
| 2023-06-12T21:59:46.858046
| 2021-06-11T16:07:18
| 2021-06-11T16:07:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import traceback
def message(e: Exception):
return ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
|
[
"kprifogle1@gmail.com"
] |
kprifogle1@gmail.com
|
943202dc3b2c3630faa41c6e214b2888d8ef7e92
|
37a7fcb84fb33e28680fc7b0ddb50aa18985a66f
|
/seleniumbase/translate/spanish.py
|
7260bbf117b6c209eaf82b970d224aa136ba3d0a
|
[
"MIT"
] |
permissive
|
dc-avasilev/SeleniumBase
|
99004ec7f978424ff2a7f8d3e9c8f84a185832bf
|
a854d75b6b0c4c5a6bd77ef634742a3b2eefa581
|
refs/heads/master
| 2022-08-28T07:33:47.560383
| 2020-05-22T21:13:44
| 2020-05-22T21:13:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,215
|
py
|
# Spanish / Español - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class CasoDePrueba(BaseCase):
def abrir_url(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def haga_clic(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def doble_clic(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def haga_clic_lentamente(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def haga_clic_en_el_texto_del_enlace(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def actualizar_texto(self, *args, **kwargs):
# update_text(selector, new_value)
return self.update_text(*args, **kwargs)
def agregar_texto(self, *args, **kwargs):
# add_text(selector, new_value)
return self.add_text(*args, **kwargs)
def obtener_texto(self, *args, **kwargs):
# get_text(selector, new_value)
return self.get_text(*args, **kwargs)
def verificar_texto(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def verificar_texto_exacto(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def verificar_texto_del_enlace(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def verificar_elemento(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def verificar_elemento_se_muestre(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def verificar_elemento_no_se_muestre(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def verificar_elemento_presente(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def verificar_elemento_ausente(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def verificar_título(self, *args, **kwargs): # noqa
# assert_title(title)
return self.assert_title(*args, **kwargs)
def verificar_verdad(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def verificar_falso(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def verificar_igual(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def verificar_diferente(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def actualizar_la_página(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def obtener_url_actual(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def obtener_html_de_la_página(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def volver(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def adelante(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def se_muestra_el_texto(self, *args, **kwargs):
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def se_muestra_el_elemento(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def está_presente_el_elemento(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def espera_el_texto(self, *args, **kwargs):
# wait_for_text(text, selector)
return self.wait_for_text(*args, **kwargs)
def espera_el_elemento(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def espera_el_elemento_se_muestre(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def espera_el_elemento_no_se_muestre(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def espera_el_elemento_presente(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def espera_el_elemento_ausente(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def dormir(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def espera(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def enviar(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def js_haga_clic(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def comprobar_html(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def guardar_captura_de_pantalla(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def seleccionar_archivo(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def ejecutar_script(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def bloquear_anuncios(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def saltar(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def verificar_si_hay_enlaces_rotos(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def verificar_si_hay_errores_js(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def cambiar_al_marco(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def cambiar_al_contenido_predeterminado(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def abrir_una_nueva_ventana(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def cambiar_a_la_ventana(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def cambiar_a_la_ventana_predeterminada(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def resalte(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def resalte_clic(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def desplazarse_a(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def desplazarse_a_la_parte_superior(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def desplazarse_hasta_la_parte_inferior(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def pasar_el_ratón_y_hacer_clic(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def está_seleccionado(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def presione_la_flecha_hacia_arriba(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def presione_la_flecha_hacia_abajo(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def presione_la_flecha_izquierda(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def presione_la_flecha_derecha(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def haga_clic_en_elementos_visibles(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def seleccionar_opción_por_texto(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def seleccionar_opción_por_índice(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def seleccionar_opción_por_valor(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
def crear_una_gira(self, *args, **kwargs):
# create_tour(name=None, theme=None)
return self.create_tour(*args, **kwargs)
def crear_una_gira_shepherd(self, *args, **kwargs):
# create_shepherd_tour(name=None, theme=None)
return self.create_shepherd_tour(*args, **kwargs)
def crear_una_gira_bootstrap(self, *args, **kwargs):
# create_bootstrap_tour(name=None, theme=None)
return self.create_bootstrap_tour(*args, **kwargs)
def crear_una_gira_hopscotch(self, *args, **kwargs):
# create_hopscotch_tour(name=None, theme=None)
return self.create_hopscotch_tour(*args, **kwargs)
def crear_una_gira_introjs(self, *args, **kwargs):
# create_introjs_tour(name=None, theme=None)
return self.create_introjs_tour(*args, **kwargs)
def agregar_paso_a_la_gira(self, *args, **kwargs):
# add_tour_step(message, selector=None, name=None,
# title=None, theme=None, alignment=None)
return self.add_tour_step(*args, **kwargs)
def reproducir_la_gira(self, *args, **kwargs):
# play_tour(name=None)
return self.play_tour(*args, **kwargs)
def exportar_la_gira(self, *args, **kwargs):
# export_tour(name=None, filename="my_tour.js", url=None)
return self.export_tour(*args, **kwargs)
def fallar(self, *args, **kwargs):
# fail(msg=None) # Inherited from "unittest"
return self.fail(*args, **kwargs)
def obtener_url(self, *args, **kwargs):
# get(url) # Same as open(url)
return self.get(*args, **kwargs)
def visita_url(self, *args, **kwargs):
# visit(url) # Same as open(url)
return self.visit(*args, **kwargs)
def obtener_elemento(self, *args, **kwargs):
# get_element(selector) # Element can be hidden
return self.get_element(*args, **kwargs)
def encontrar_elemento(self, *args, **kwargs):
# find_element(selector) # Element must be visible
return self.find_element(*args, **kwargs)
def obtener_atributo(self, *args, **kwargs):
# get_attribute(selector, attribute)
return self.get_attribute(*args, **kwargs)
def establecer_atributo(self, *args, **kwargs):
# set_attribute(selector, attribute, value)
return self.set_attribute(*args, **kwargs)
def establecer_atributos(self, *args, **kwargs):
# set_attributes(selector, attribute, value)
return self.set_attributes(*args, **kwargs)
def entrada(self, *args, **kwargs):
# input(selector, new_value) # Same as update_text()
return self.type(*args, **kwargs)
def escribir(self, *args, **kwargs):
# write(selector, new_value) # Same as update_text()
return self.write(*args, **kwargs)
def imprimir(self, *args, **kwargs):
# _print(msg) # Same as Python print()
return self._print(*args, **kwargs)
class MasterQA_Español(MasterQA, CasoDePrueba):
def verificar(self, *args, **kwargs):
# "Manual Check"
self.DEFAULT_VALIDATION_TITLE = "Comprobación manual"
# "Does the page look good?"
self.DEFAULT_VALIDATION_MESSAGE = "¿Se ve bien la página?"
# verify(QUESTION)
return self.verify(*args, **kwargs)
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
3cff2276a49a7356bbc1bbd7c7052699a5e74df7
|
3876b862c61d64c9c225eb8a6305853a2ac16325
|
/pal/writer/access_mechanism/none.py
|
246e92b2175a0a072f4958ee21826c2b4c011205
|
[
"MIT"
] |
permissive
|
connojd/pal
|
0d122de7e9b6fe659e35dd44310b9783830c4838
|
800f8bd6de0004313d4208da619b4ef98d2e1e76
|
refs/heads/master
| 2020-09-11T06:17:08.837738
| 2019-11-21T19:18:31
| 2019-11-21T19:23:23
| 221,967,287
| 0
| 0
|
MIT
| 2019-11-15T16:56:22
| 2019-11-15T16:56:22
| null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
from pal.writer.access_mechanism.access_mechanism \
import AccessMechanismWriter
class NoneAccessMechanismWriter(AccessMechanismWriter):
def declare_access_mechanism_dependencies(self, outfile, register):
pass
def call_readable_access_mechanism(self, outfile, register,
access_mechanism, var):
pass
def call_writable_access_mechanism(self, outfile, register,
access_mechanism, value):
pass
|
[
"jared.wright12@gmail.com"
] |
jared.wright12@gmail.com
|
fd03014fc14d457010bc0bdcd28a64aefbdd894e
|
f72fe33d1a181f89d2464cc07744dbd275a7d071
|
/CNNectome/postprocessing/partner_annotations/luigi_pipeline_spec_dir/prepare_luigi.py
|
280ade177af970bf8a531ddad83d057ed3b73873
|
[
"BSD-2-Clause"
] |
permissive
|
saalfeldlab/CNNectome
|
6c8d44d8cc2e161a91b10abb7b4a425d7fc64d1b
|
c043e3111ff5ec6707a68edffae54eb902a1652d
|
refs/heads/master
| 2023-04-03T15:11:36.586030
| 2022-06-15T14:12:17
| 2022-06-15T14:12:17
| 124,144,317
| 8
| 10
|
BSD-2-Clause
| 2023-03-24T22:16:04
| 2018-03-06T22:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
import luigi
import os
class CheckCheckpoint(luigi.ExternalTask):
it = luigi.IntParameter()
path = luigi.Parameter()
@property
def priority(self):
if int(self.it) % 10000 == 0:
return 1.0 / int(self.it)
else:
return 0.0
def output(self):
base = os.path.join(self.path, "unet_checkpoint_" + str(self.it))
return [
luigi.LocalTarget(base + ".data-00000-of-00001"),
luigi.LocalTarget(base + ".index"),
luigi.LocalTarget(base + ".meta"),
]
class MakeItFolder(luigi.ExternalTask):
it = luigi.IntParameter()
path = luigi.Parameter()
data_eval = luigi.TupleParameter()
@property
def priority(self):
return self.it
def requires(self):
return CheckCheckpoint(self.it, self.path)
def output(self):
base = os.path.dirname(self.input()[0].fn)
return luigi.LocalTarget(
os.path.join(base, "evaluation", str(self.it), self.data_eval[-1])
)
def run(self):
# make the folders
base = os.path.dirname(self.input()[0].fn)
for de in self.data_eval:
if not os.path.exists(os.path.join(base, "evaluation", str(self.it), de)):
os.makedirs(os.path.join(base, "evaluation", str(self.it), de))
|
[
"heinrichl@janelia.hhmi.org"
] |
heinrichl@janelia.hhmi.org
|
69b6aec08c2ed56cfcd0213106d16764a708984a
|
ec1deb682fb96a1f937f2fca5f161aa951462876
|
/pythonTextBook/exercises/files/exTenThree.py
|
edbaf04d420fb624cf41f51648403964fd6d11d4
|
[] |
no_license
|
AnatoliKosarev/Python-beginner-course--Teclado-
|
31d82f5e9a1f39e2970323bed9de1fd539990565
|
fa91199938d6975b5874341585343566caaf3600
|
refs/heads/main
| 2023-06-30T12:14:33.779827
| 2021-07-24T11:16:19
| 2021-07-24T11:16:19
| 376,371,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
def save_guest_name():
name = input("Enter your name: ")
if name:
with open("guest.txt", "a") as f:
f.write(f"{name}\n")
if __name__ == "__main__":
save_guest_name()
|
[
"anatoli.kosarev@gmail.com"
] |
anatoli.kosarev@gmail.com
|
6d5b1b017e641d25258e4a6f3936bcc0de658724
|
c2acac76572d0784f29d1a0cc529c0f753aac184
|
/apmec/db/migration/alembic_migrations/versions/6e56d4474b2a_blob_to_json_text.py
|
8469f5fbc095fe33ef7934b322650f3f92daf624
|
[
"Apache-2.0"
] |
permissive
|
openMECPlatform/apmec
|
6d1d8380385beda7c2e59539c5001dffe9da8672
|
1046bf4730d2bdf95e3cd7efe487cb3fbf1fcd22
|
refs/heads/master
| 2023-03-26T13:52:43.647511
| 2021-03-31T16:40:03
| 2021-03-31T16:40:03
| 353,424,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""blob-to-json-text
Revision ID: 6e56d4474b2a
Revises: f958f58e5daa
Create Date: 2016-06-01 09:50:46.296206
"""
import json
import pickle
from alembic import op
import sqlalchemy as sa
from apmec.db import types
# revision identifiers, used by Alembic.
revision = '6e56d4474b2a'
down_revision = 'f958f58e5daa'
def _migrate_data(table, column_name):
meta = sa.MetaData(bind=op.get_bind())
t = sa.Table(table, meta, autoload=True)
for r in t.select().execute():
stmt = t.update().where(t.c.id == r.id).values(
{column_name: json.dumps(pickle.loads(getattr(r, column_name)))})
op.execute(stmt)
op.alter_column(table,
column_name,
type_=types.Json)
def upgrade(active_plugins=None, options=None):
_migrate_data('vims', 'placement_attr')
_migrate_data('vimauths', 'vim_project')
_migrate_data('vimauths', 'auth_cred')
_migrate_data('devices', 'placement_attr')
|
[
"tung.doan_van@tu-dresden.de"
] |
tung.doan_van@tu-dresden.de
|
7f1679ac1ba910f2b2bf2f711c9cb9d730731be5
|
a14ec6e367e6a471bfc74c066fb958ef585bc269
|
/2020/01/a.py
|
2d6e886022516fb0bde3df14250fdb6fd3e78fad
|
[] |
no_license
|
jimhendy/AoC
|
90641814ed431f46a8500ff0f022c6c957567563
|
a1727f88bc2e6f739d65902dce188377966b3fb4
|
refs/heads/master
| 2023-09-02T14:48:39.860352
| 2023-08-28T08:09:19
| 2023-08-28T08:09:19
| 225,152,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import os
def run(inputs):
nums = list(map(int, inputs.split(os.linesep)))
for i, n_i in enumerate(nums[:-1]):
for n_j in nums[i + 1 :]:
if n_i + n_j == 2020:
return n_i * n_j
return None
|
[
"jimhendy88@gmail.com"
] |
jimhendy88@gmail.com
|
b7e3d0e691bf434020466185cae8f091158ce029
|
4863a76461ebdb2abf384df672bf07170b5cc4ce
|
/scanner.py
|
3ca86d2aea1e70a868641fc142691d5e07527b56
|
[
"MIT"
] |
permissive
|
squidgameholders/Website-Scanner
|
73a3db8ea39ab5b91c309da6491db60a1cdc83a7
|
ee27f265913957c5357ba673b61338336c222672
|
refs/heads/master
| 2023-03-19T04:12:29.324705
| 2015-02-17T18:20:53
| 2015-02-17T18:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
#!/usr/bin/env python
import difflib
import hashlib
import requests
bags_urls = [
'http://www.mansurgavriel.com/collections/all',
]
def get_content():
#r = requests.get("http://www.mansurgavriel.com/collections/all")
r = requests.get("http://new.yancao.me")
return r.content
def hash_obj(content):
hash_obj = hashlib.md5(content)
return hash_obj.hexdigest()
def diff(old, new):
"""
Helper function. Returns a string containing the unified diff of two multiline strings.
"""
old=old.splitlines(1)
new=new.splitlines(1)
diff=difflib.unified_diff(old, new)
return ''.join(diff)
# c1 = get_content()
# print hash_obj(c1)
# from time import sleep
# sleep(10)
# c2 = get_content()
# print hash_obj(c2)
# print diff(c1, c2)
# print hash_obj(c1) == hash_obj(c2)
r = requests.get("http://new.yancao.me")
|
[
"cyandterry@hotmail.com"
] |
cyandterry@hotmail.com
|
298dca1095a9f092a9bd979bec50465e9fcc6f65
|
5406d6bce007c90ca36e7557e060296e04b7c1b6
|
/scripts/spp02/spp.py
|
a5f22d94ac733a722292942abc919a18f06f3ec2
|
[] |
no_license
|
darraghdog/dfake
|
10df7c19acc3447c1833ab2754ec61b5699b9a05
|
e128d0d907b0cb2d4e5d841de4bcfb0ddb0af276
|
refs/heads/master
| 2022-04-12T02:46:02.272497
| 2020-04-05T19:18:30
| 2020-04-05T19:18:30
| 229,479,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,128
|
py
|
import os
import sys
import glob
import json
import cv2
from PIL import Image
import numpy as np
import pandas as pd
#import dlib
import torch
from itertools import product
from time import time
import datetime
import collections
from tqdm import tqdm
import skvideo.io
import skvideo.datasets
import random
import optparse
import itertools
#from facenet_pytorch import MTCNN, InceptionResnetV1
import matplotlib.pylab as plt
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn.functional as F
from torch import nn
import torch.optim as optim
from albumentations import Compose, ShiftScaleRotate, Resize
from albumentations.pytorch import ToTensor
from torch.utils.data import Dataset
from sklearn.metrics import log_loss
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
from albumentations import (Cutout, Compose, Normalize, RandomRotate90, HorizontalFlip, RandomBrightnessContrast,
VerticalFlip, ShiftScaleRotate, Transpose, OneOf, IAAAdditiveGaussianNoise,
GaussNoise, RandomGamma, RandomContrast, RandomBrightness, HueSaturationValue,
RandomBrightnessContrast, Lambda, NoOp, CenterCrop, Resize
)
from tqdm import tqdm
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
# Print info about environments
parser = optparse.OptionParser()
parser.add_option('-a', '--seed', action="store", dest="seed", help="model seed", default="1234")
parser.add_option('-b', '--fold', action="store", dest="fold", help="Fold for split", default="0")
parser.add_option('-c', '--rootpath', action="store", dest="rootpath", help="root directory", default="")
parser.add_option('-d', '--vidpath', action="store", dest="vidpath", help="root directory", default="data/mount/video/train")
parser.add_option('-e', '--imgpath', action="store", dest="imgpath", help="root directory", default="data/mount/npimg/train")
parser.add_option('-f', '--wtspath', action="store", dest="wtspath", help="root directory", default="weights")
parser.add_option('-g', '--fps', action="store", dest="fps", help="Frames per second", default="8")
parser.add_option('-i', '--size', action="store", dest="size", help="image size", default="224")
parser.add_option('-j', '--metafile', action="store", dest="metafile", help="Meta file", default="trainmeta.csv.gz")
parser.add_option('-k', '--batchsize', action="store", dest="batchsize", help="Batch size", default="8")
parser.add_option('-l', '--epochs', action="store", dest="epochs", help="epochs", default="10")
parser.add_option('-m', '--lr', action="store", dest="lr", help="learning rate", default="0.0001")
parser.add_option('-n', '--decay', action="store", dest="decay", help="Weight Decay", default="0.0")
parser.add_option('-o', '--lrgamma', action="store", dest="lrgamma", help="Scheduler Learning Rate Gamma", default="1.0")
parser.add_option('-p', '--start', action="store", dest="start", help="Start epochs", default="0")
parser.add_option('-q', '--infer', action="store", dest="infer", help="root directory", default="TRN")
parser.add_option('-r', '--lrmult', action="store", dest="lrmult", help="learning rate multiplier", default="4")
parser.add_option('-s', '--accum', action="store", dest="accum", help="accumulation steps", default="1")
options, args = parser.parse_args()
INPATH = options.rootpath
#INPATH='/Users/dhanley2/Documents/Personal/dfake'
sys.path.append(os.path.join(INPATH, 'utils' ))
from logs import get_logger
from utils import dumpobj, loadobj, GradualWarmupScheduler, chunks, pilimg, SpatialDropout
from sort import *
from sppnet import SPPNet
# Print info about environments
logger = get_logger('Video to image :', 'INFO')
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Device : {}'.format(torch.cuda.get_device_name(0)))
logger.info('Cuda available : {}'.format(torch.cuda.is_available()))
n_gpu = torch.cuda.device_count()
logger.info('Cuda n_gpus : {}'.format(n_gpu ))
logger.info('Load params')
for (k,v) in options.__dict__.items():
logger.info('{}{}'.format(k.ljust(20), v))
SEED = int(options.seed)
SIZE = int(options.size)
FOLD = int(options.fold)
BATCHSIZE = int(options.batchsize)
METAFILE = os.path.join(INPATH, 'data', options.metafile)
WTSFILES = os.path.join(INPATH, options.wtspath)
WTSPATH = os.path.join(INPATH, options.wtspath)
IMGDIR = os.path.join(INPATH, options.imgpath)
EPOCHS = int(options.epochs)
START = int(options.start)
LR=float(options.lr)
LRGAMMA=float(options.lrgamma)
DECAY=float(options.decay)
INFER=options.infer
ACCUM=int(options.accum)
LRMULT=float(options.lrmult)
# METAFILE='/Users/dhanley2/Documents/Personal/dfake/data/trainmeta.csv.gz'
metadf = pd.read_csv(METAFILE)
logger.info('Full video file shape {} {}'.format(*metadf.shape))
# https://www.kaggle.com/bminixhofer/speed-up-your-rnn-with-sequence-bucketing
class SPPSeqNet(nn.Module):
def __init__(self, backbone, embed_size, pool_size=(1, 2, 6), pretrained=True, \
dense_units = 256, dropout = 0.2):
# Only resnet is supported in this version
super(SPPSeqNet, self).__init__()
self.sppnet = SPPNet(backbone=34, pool_size=pool_size, folder=WTSPATH)
self.dense_units = dense_units
self.lstm1 = nn.LSTM(embed_size, self.dense_units, bidirectional=True, batch_first=True)
self.linear1 = nn.Linear(self.dense_units*2, self.dense_units*2)
self.linear_out = nn.Linear(self.dense_units*2, 1)
self.embedding_dropout = SpatialDropout(dropout)
def forward(self, x):
# Input is batch of image sequences
batch_size, seqlen = x.size()[:2]
# Flatten to make a single long list of frames
x = x.view(batch_size * seqlen, *x.size()[2:])
# Pass each frame thru SPPNet
emb = self.sppnet(x.permute(0,3,1,2))
# Split back out to batch
emb = emb.view(batch_size, seqlen, emb.size()[1])
emb = self.embedding_dropout(emb)
# Pass batch thru sequential model(s)
h_lstm1, _ = self.lstm1(emb)
max_pool, _ = torch.max(h_lstm1, 1)
h_pool_linear = F.relu(self.linear1(max_pool))
# Max pool and linear layer
hidden = max_pool + h_pool_linear
# Classifier
out = self.linear_out(hidden)
return out
# IMGDIR='/Users/dhanley2/Documents/Personal/dfake/data/npimg'
# https://www.kaggle.com/alexanderliao/image-augmentation-demo-with-albumentation/notebook
def augment(aug, image):
return aug(image=image)['image']
class DFakeDataset(Dataset):
def __init__(self, df, imgdir, aug_ratio = 5, train = True, labels = True, maxlen = 37):
self.data = df.copy()
logger.info('Full data shape {} {}'.format(*self.data.shape))
self.data.label = (self.data.label == 'FAKE').astype(np.int8)
self.imgdir = imgdir
self.framels = os.listdir(IMGDIR)
self.labels = labels
self.data = self.data[self.data.video.str.replace('.mp4', '.npz').isin(self.framels)]
logger.info('Fitered on frames on disk {} {}'.format(*self.data.shape))
self.data = pd.concat([self.data.query('label == 0')]*5+\
[self.data.query('label == 1')])
self.data = self.data.sample(frac=1).reset_index(drop=True)
# self.data = pd.concat([ self.data[self.data.video.str.contains('qirlrtrxba')], self.data[:500].copy() ]).reset_index(drop=True)
self.maxlen = maxlen
logger.info('Expand the REAL class {} {}'.format(*self.data.shape))
meanimg = [0.4258249 , 0.31385377, 0.29170314]
stdimg = [0.22613944, 0.1965406 , 0.18660679]
self.augflip = Compose([HorizontalFlip(p=1.)])
self.augbrcn = Compose([RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.7)])
self.augnorm = Compose([ Normalize(mean=meanimg, std=stdimg,
max_pixel_value=255.0, p=1.0), ToTensor()])
self.train = train
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
vid = self.data.loc[idx]
# Apply constant augmentation on combined frames
fname = os.path.join(self.imgdir, vid.video.replace('mp4', 'npz'))
try:
frames = np.load(fname)['arr_0']
d0,d1,d2,d3 = frames.shape
# logger.info('Vid shape {}'.format(frames.shape))
# logger.info(15*'__')
frames = frames.reshape(d0*d1, d2, d3)
# Augment and normalise; renadom brightness on real images only for now
if self.train:
frames = augment(self.augflip, frames)
if vid.label==0: frames = augment(self.augbrcn, frames)
frames = augment(self.augnorm, frames)
frames = frames.reshape(d0,d1,d2,d3)
# Cut the frames to max 37 with a sliding window
if d0>self.maxlen:
xtra = frames.shape[0]-self.maxlen
shift = random.randint(0, xtra)
frames = frames[xtra-shift:-shift]
if self.train:
labels = torch.tensor(vid.label)
return {'frames': frames, 'idx': idx, 'labels': labels}
else:
return {'frames': frames, 'idx': idx}
except:
logger.info('Failed to load numpy array {}'.format(fname))
def collatefn(batch):
# Remove error reads
batch = [b for b in batch if b is not None]
seqlen = torch.tensor([l['frames'].shape[0] for l in batch])
ids = torch.tensor([l['idx'] for l in batch])
maxlen = seqlen.max()
# get shapes
d0,d1,d2,d3 = batch[0]['frames'].shape
# Pad with zero frames
x_batch = [l['frames'] if l['frames'].shape[0] == maxlen else \
torch.cat((l['frames'], torch.zeros((maxlen-sl,d1,d2,d3))), 0)
for l,sl in zip(batch, seqlen)]
x_batch = torch.cat([x.unsqueeze(0) for x in x_batch])
if 'labels' in batch[0]:
y_batch = torch.tensor([l['labels'] for l in batch])
return {'frames': x_batch, 'ids': ids, 'seqlen': seqlen, 'labels': y_batch}
else:
return {'frames': x_batch, 'ids': ids, 'seqlen': seqlen}
logger.info('Create loaders...')
# IMGDIR='/Users/dhanley2/Documents/Personal/dfake/data/npimg'
# BATCHSIZE=2
trndf = metadf.query('fold != @FOLD').reset_index(drop=True)
valdf = metadf.query('fold == @FOLD').reset_index(drop=True)
trndataset = DFakeDataset(trndf, IMGDIR, labels=True, train = True)
valdataset = DFakeDataset(valdf, IMGDIR, labels=True, train = False)
trnloader = DataLoader(trndataset, batch_size=BATCHSIZE, shuffle=False, num_workers=8, collate_fn=collatefn)
valloader = DataLoader(valdataset, batch_size=BATCHSIZE, shuffle=False, num_workers=8, collate_fn=collatefn)
logger.info('Create model')
poolsize=(1, 2, 6)
embedsize = 512*sum(i**2 for i in poolsize)
model = SPPSeqNet(backbone=50, pool_size=poolsize, dense_units = 256, \
dropout = 0.2, embed_size = embedsize)
model = model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
plist = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': DECAY},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optim.Adam(plist, lr=LR)
scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, EPOCHS)
scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=LRMULT, total_epoch=1, after_scheduler=scheduler_cosine)
# scheduler = StepLR(optimizer, 1, gamma=LRGAMMA, last_epoch=-1)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
criterion = torch.nn.BCEWithLogitsLoss()
for tt, epoch in enumerate(range(EPOCHS)):
logger.info('Epoch {}/{}'.format(epoch, EPOCHS - 1))
scheduler_warmup.step()
tr_loss = 0.
for param_group in optimizer.param_groups:
logger.info('Epoch: {} lr: {}'.format(epoch+1, param_group['lr']))
logger.info('-' * 10)
if epoch<START:
input_model_file = 'weights/sppnet_fold{}_accum{}.bin'.format(epoch, FOLD, ACCUM)
model.load_state_dict(torch.load(input_model_file))
model.to(device)
continue
if INFER not in ['TST', 'EMB', 'VAL']:
for param in model.parameters():
param.requires_grad = True
model.train()
for step, batch in enumerate(trnloader):
x = batch['frames'].to(device, dtype=torch.float)
y = batch['labels'].to(device, dtype=torch.float)
x = torch.autograd.Variable(x, requires_grad=True)
y = torch.autograd.Variable(y)
y = y.unsqueeze(1)
out = model(x)
# Get loss
loss = criterion(out, y)
tr_loss += loss.item()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if step % ACCUM == ACCUM -1 : # Wait for several backward steps
optimizer.step() # Now we can do an optimizer step
optimizer.zero_grad()
if step%100==0:
logger.info('Trn step {} of {} trn lossavg {:.5f}'. \
format(step, len(trnloader), (tr_loss/(1+step))))
output_model_file = 'weights/sppnet_fold{}.bin'.format(epoch, FOLD)
torch.save(model.state_dict(), output_model_file)
else:
input_model_file = 'weights/sppnet_fold{}_accum{}.bin'.format(epoch, FOLD, ACCUM)
model.load_state_dict(torch.load(input_model_file))
model.to(device)
if INFER in ['VAL', 'TRN']:
model.eval()
ypredval = []
valids = []
with torch.no_grad():
for step, batch in enumerate(valloader):
x = batch['frames'].to(device, dtype=torch.float)
out = model(x)
out = torch.sigmoid(out)
ypredval.append(out.cpu().detach().numpy())
valids.append(batch['ids'].cpu().detach().numpy())
if step%200==0:
logger.info('Val step {} of {}'.format(step, len(valloader)))
ypredval = np.concatenate(ypredval).flatten()
valids = np.concatenate(valids).flatten()
yactval = valdataset.data.iloc[valids].label.values
valloss = log_loss(yactval, ypredval.clip(.00001,.99999))
logger.info('Epoch {} val logloss {:.5f}'.format(epoch, valloss))
logger.info('Write out bagged prediction to preds folder')
yvaldf = valdataset.data.iloc[valids][['video', 'label']]
yvaldf['pred'] = ypredval
yvaldf.to_csv('preds/dfake_sppnet_sub_epoch{}.csv.gz'.format(epoch), \
index = False, compression = 'gzip')
|
[
"darragh.hanley@gmail.com"
] |
darragh.hanley@gmail.com
|
cf5e67e597f03b6997a0db48a15f099ec801cb3b
|
6e5bd4eb27bbd2e4cd248334d4eeeeda3954a541
|
/trainer.py
|
f333469c056ea81a666efe51bfe2a38df79cb28e
|
[
"Apache-2.0"
] |
permissive
|
arunava-de-e3172/JointBERT
|
c85053aff52f195b8d02872d234707c1909416cb
|
1eee7c09f141c4649addf09d3db37072e9860fc1
|
refs/heads/master
| 2021-05-21T15:54:22.810359
| 2020-04-03T08:53:12
| 2020-04-03T08:53:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,589
|
py
|
import os
import logging
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertConfig, AdamW, get_linear_schedule_with_warmup
from utils import MODEL_CLASSES, set_seed, compute_metrics, get_intent_labels, get_slot_labels
logger = logging.getLogger(__name__)
class Trainer(object):
def __init__(self, args, train_dataset=None, dev_dataset=None, test_dataset=None):
self.args = args
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
self.test_dataset = test_dataset
self.intent_label_lst = get_intent_labels(args)
self.slot_label_lst = get_slot_labels(args)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
self.pad_token_label_id = args.ignore_index
self.config_class, self.model_class, _ = MODEL_CLASSES[args.model_type]
self.bert_config = self.config_class.from_pretrained(args.model_name_or_path, finetuning_task=args.task)
self.model = self.model_class.from_pretrained(args.model_name_or_path,
config=self.bert_config,
args=args,
intent_label_lst=self.intent_label_lst,
slot_label_lst=self.slot_label_lst)
# GPU or CPU
self.device = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
self.model.to(self.device)
def train(self):
train_sampler = RandomSampler(self.train_dataset)
train_dataloader = DataLoader(self.train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size)
if self.args.max_steps > 0:
t_total = self.args.max_steps
self.args.num_train_epochs = self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay},
{'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.train_dataset))
logger.info(" Num Epochs = %d", self.args.num_train_epochs)
logger.info(" Total train batch size = %d", self.args.train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Logging steps = %d", self.args.logging_steps)
logger.info(" Save steps = %d", self.args.save_steps)
global_step = 0
tr_loss = 0.0
self.model.zero_grad()
train_iterator = trange(int(self.args.num_train_epochs), desc="Epoch")
set_seed(self.args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
self.model.train()
batch = tuple(t.to(self.device) for t in batch) # GPU or CPU
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'intent_label_ids': batch[3],
'slot_labels_ids': batch[4]}
if self.args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2]
outputs = self.model(**inputs)
loss = outputs[0]
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
self.model.zero_grad()
global_step += 1
if self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0:
self.evaluate("dev")
if self.args.save_steps > 0 and global_step % self.args.save_steps == 0:
self.save_model()
if 0 < self.args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < self.args.max_steps < global_step:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(self, mode):
if mode == 'test':
dataset = self.test_dataset
elif mode == 'dev':
dataset = self.dev_dataset
else:
raise Exception("Only dev and test dataset available")
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation on %s dataset *****", mode)
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", self.args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
intent_preds = None
slot_preds = None
out_intent_label_ids = None
out_slot_labels_ids = None
self.model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'intent_label_ids': batch[3],
'slot_labels_ids': batch[4]}
if self.args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2]
outputs = self.model(**inputs)
tmp_eval_loss, (intent_logits, slot_logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
# Intent prediction
if intent_preds is None:
intent_preds = intent_logits.detach().cpu().numpy()
out_intent_label_ids = inputs['intent_label_ids'].detach().cpu().numpy()
else:
intent_preds = np.append(intent_preds, intent_logits.detach().cpu().numpy(), axis=0)
out_intent_label_ids = np.append(
out_intent_label_ids, inputs['intent_label_ids'].detach().cpu().numpy(), axis=0)
# Slot prediction
if slot_preds is None:
if self.args.use_crf:
# decode() in `torchcrf` returns list with best index directly
slot_preds = np.array(self.model.crf.decode(slot_logits))
else:
slot_preds = slot_logits.detach().cpu().numpy()
out_slot_labels_ids = inputs["slot_labels_ids"].detach().cpu().numpy()
else:
if self.args.use_crf:
slot_preds = np.append(slot_preds, np.array(self.model.crf.decode(slot_logits)), axis=0)
else:
slot_preds = np.append(slot_preds, slot_logits.detach().cpu().numpy(), axis=0)
out_slot_labels_ids = np.append(out_slot_labels_ids, inputs["slot_labels_ids"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
results = {
"loss": eval_loss
}
# Intent result
intent_preds = np.argmax(intent_preds, axis=1)
# Slot result
if not self.args.use_crf:
slot_preds = np.argmax(slot_preds, axis=2)
slot_label_map = {i: label for i, label in enumerate(self.slot_label_lst)}
out_slot_label_list = [[] for _ in range(out_slot_labels_ids.shape[0])]
slot_preds_list = [[] for _ in range(out_slot_labels_ids.shape[0])]
for i in range(out_slot_labels_ids.shape[0]):
for j in range(out_slot_labels_ids.shape[1]):
if out_slot_labels_ids[i, j] != self.pad_token_label_id:
out_slot_label_list[i].append(slot_label_map[out_slot_labels_ids[i][j]])
slot_preds_list[i].append(slot_label_map[slot_preds[i][j]])
total_result = compute_metrics(intent_preds, out_intent_label_ids, slot_preds_list, out_slot_label_list)
results.update(total_result)
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results
def save_model(self):
# Save model checkpoint (Overwrite)
if not os.path.exists(self.args.model_dir):
os.makedirs(self.args.model_dir)
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
model_to_save.save_pretrained(self.args.model_dir)
# Save training arguments together with the trained model
torch.save(self.args, os.path.join(self.args.model_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", self.args.model_dir)
def load_model(self):
# Check whether model exists
if not os.path.exists(self.args.model_dir):
raise Exception("Model doesn't exists! Train first!")
try:
self.model = self.model_class.from_pretrained(self.args.model_dir)
self.model.to(self.device)
logger.info("***** Model Loaded *****")
except:
raise Exception("Some model files might be missing...")
|
[
"adieujw@gmail.com"
] |
adieujw@gmail.com
|
5ea20db9e23c11fc2b7e25e17da92ee9a931ec95
|
1eed777d0174fecc14fee1bf1ca6e47c412666cd
|
/virtual/bin/chardetect
|
2097c185eb4a4d78969f5693629a1c542ed3b987
|
[] |
no_license
|
ALKEMIA-CHARLES/Postify
|
9ff3d36b8962660febc56ef0de139b792f074756
|
a915da3a9b9c2011f47e7a31e6cf2e4d75d4fd39
|
refs/heads/master
| 2022-12-21T21:28:03.813680
| 2020-03-30T06:50:40
| 2020-03-30T06:50:40
| 243,502,216
| 0
| 0
| null | 2022-12-08T03:41:52
| 2020-02-27T11:23:19
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
#!/home/charles/Documents/moringa-school-projects/myposts/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"charlesmtawaliJr@gmail.com"
] |
charlesmtawaliJr@gmail.com
|
|
80b11878a88ff37497c211fb5f2bd12f6c51e000
|
616c3c02be31b9ae4d06bd7c5a8d4a2e7c446aa1
|
/796.旋转字符串.py
|
2a3f2c04330031f2887eb2ce899980a55c92aab2
|
[] |
no_license
|
L1nwatch/leetcode-python
|
8b7c47c04ee9400d50d8b0764a544a0463df8f06
|
0484cbc3273ada25992c72105658cd67411c5d39
|
refs/heads/master
| 2023-01-11T14:53:15.339276
| 2023-01-11T05:24:43
| 2023-01-11T05:24:43
| 194,516,548
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
#
# @lc app=leetcode.cn id=796 lang=python3
#
# [796] 旋转字符串
#
# @lc code=start
class Solution:
def rotateString(self, s: str, goal: str) -> bool:
if len(s) != len(goal):
return False
for i in range(len(s)):
if s[i:]+s[:i] == goal:
return True
return False
# @lc code=end
|
[
"watch1602@gmail.com"
] |
watch1602@gmail.com
|
c821fc32436da400c7d10c1e029fc1b0c73e5cfc
|
037a03d4b8b81bc39dc41cb4f3726f8297c8b672
|
/0348.py
|
4cabe4d37e2f40b1fd122a1cc55f51f9e02481ec
|
[] |
no_license
|
Agchai52/Leetcode1
|
ee3433ef6f6c3ddd800204c25a456dc7c3fd0053
|
9535d038bee690b7c7aeca352a4ab32d188684bb
|
refs/heads/master
| 2021-08-22T02:59:45.632548
| 2020-05-21T00:31:45
| 2020-05-21T00:31:45
| 185,273,962
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
class TicTacToe(object):
def __init__(self, n):
"""
Initialize your data structure here.
:type n: int
"""
self.grid = [['']*n for _ in range(n)]
def move(self, row, col, player):
"""
Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int
"""
if player == 1:
mark = 'X'
else:
mark = 'O'
n = len(self.grid)
self.grid[row][col] = mark
sum_of_row = sum([self.grid[row][c] == mark for c in range(n)])
sum_of_col = sum([self.grid[r][col]== mark for r in range(n)])
sum_of_left_d = sum([self.grid[i][i] == mark for i in range(n)])
sum_of_right_d = sum([self.grid[i][n-1-i] == mark for i in range(n)])
if sum_of_row == n or sum_of_col == n or sum_of_left_d== n or sum_of_right_d == n:
return player
else:
return 0
#https://blog.csdn.net/danspace1/article/details/86616981
# Your TicTacToe object will be instantiated and called as such:
# obj = TicTacToe(n)
# param_1 = obj.move(row,col,player)
|
[
"noreply@github.com"
] |
Agchai52.noreply@github.com
|
7ad5eaabf7279f414bab5d88d5d8c71193018ee8
|
c6fea702b817b719d9774b66d76c7cbaf1369d7d
|
/plilja-python3/day10/day10.py
|
81c932996f18e594549035d1a6645113a8d9b5c3
|
[] |
no_license
|
piksel/advent_of_code_2016
|
eae359228372b53f88430360b38e48210ac6da40
|
996fe2a999949fab420115474b32b40ed8ba8414
|
refs/heads/master
| 2021-01-12T10:12:19.865894
| 2016-12-18T16:55:58
| 2016-12-18T16:55:58
| 76,386,900
| 1
| 0
| null | 2016-12-13T18:29:15
| 2016-12-13T18:29:15
| null |
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
from collections import *
import sys
def solve(inp):
value_to_bot = {}
bot_to_values = defaultdict(set)
outputs = defaultdict(list)
giveaways = []
Giveaway = namedtuple('Giveaway', 'bot low_to low_type high_to high_type')
for s in inp:
instruction = s.split()
if instruction[0] == 'value':
value = int(instruction[1])
bot = int(instruction[5])
bot_to_values[bot] |= {value}
value_to_bot[value] = bot
else:
assert instruction[0] == 'bot'
bot = int(instruction[1])
low_type = instruction[5]
low_to = int(instruction[6])
high_type = instruction[10]
high_to = int(instruction[11])
giveaways += [(bot, Giveaway(bot, low_to, low_type, high_to, high_type))]
while giveaways:
if 61 in value_to_bot and 17 in value_to_bot and value_to_bot[61] == value_to_bot[17]:
step1 = value_to_bot[61]
for i in range(0, len(giveaways)):
(bot, giveaway) = giveaways[i]
if len(bot_to_values[bot]) == 2:
low = min(bot_to_values[bot])
high = max(bot_to_values[bot])
bot_to_values[bot] -= {low, high}
value_to_bot.pop(low)
value_to_bot.pop(high)
if giveaway.low_type == 'bot':
bot_to_values[giveaway.low_to] |= {low}
value_to_bot[low] = giveaway.low_to
else:
outputs[giveaway.low_to] += [low]
if giveaway.high_type == 'bot':
bot_to_values[giveaway.high_to] |= {high}
value_to_bot[high] = giveaway.high_to
else:
outputs[giveaway.high_to] += [high]
giveaways = giveaways[:i] + giveaways[i + 1:]
break
step2 = outputs[0][0] * outputs[1][0] * outputs[2][0]
return (step1, step2)
inp = sys.stdin.readlines()
(step1, step2) = solve(inp)
print(step1)
print(step2)
|
[
"patlil@kth.se"
] |
patlil@kth.se
|
914e6f7c89aa39a56fd10010b11450b11fe0aa41
|
7a1b88d06ea18772b065b43d775cec6dd2acdf80
|
/1769.py
|
335ddf10102d99cbef71424085f8d7fa2bffbf8f
|
[] |
no_license
|
skaurl/baekjoon-online-judge
|
28144cca45168e79b1ae0baa9a351f498f8d19ab
|
1620d298c2f429e03c5f9387d8aca13763f5c731
|
refs/heads/master
| 2023-07-26T10:07:29.724066
| 2021-09-07T09:21:02
| 2021-09-07T09:21:02
| 299,019,978
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def asdf(n):
result = 0
for i in n:
result+=int(i)
return str(result)
n = input()
cnt = 0
if int(asdf(n))%3==0:
check="YES"
else:
check="NO"
while len(n)!=1:
n = asdf(n)
cnt+=1
print(cnt)
print(check)
|
[
"dr_lunars@naver.com"
] |
dr_lunars@naver.com
|
5c0876753f3a3beecafacee2e309160167af9ad7
|
073e5e503e01b44881edffc81e6ad1efe04f4520
|
/python/collibra-core/collibra_core/model/paged_response_workflow_task.py
|
56ad3e4d40f3107fd98e85069d58a6a87c47f06c
|
[] |
no_license
|
AaronCWacker/collibra
|
f251d3556192c03b2be5acb0101608b4f2d87c9d
|
8bd1de3eecc5835bc96feacc17c6dd86ed70ac85
|
refs/heads/main
| 2023-03-05T05:00:48.940201
| 2021-01-20T03:03:17
| 2021-01-29T21:17:49
| 416,506,152
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,369
|
py
|
"""
Collibra Data Governance Center Core API
<p>The Core REST API allows you to create your own integrations with Collibra Data Governance Center.</p><p><i>Create custom applications to help users get access to the right data.</i></p> # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from collibra_core.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from collibra_core.model.workflow_task import WorkflowTask
globals()['WorkflowTask'] = WorkflowTask
class PagedResponseWorkflowTask(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'total': (int,), # noqa: E501
'offset': (int,), # noqa: E501
'limit': (int,), # noqa: E501
'results': ([WorkflowTask],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'total': 'total', # noqa: E501
'offset': 'offset', # noqa: E501
'limit': 'limit', # noqa: E501
'results': 'results', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PagedResponseWorkflowTask - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
total (int): The total number of results.. [optional] # noqa: E501
offset (int): The offset for the results.. [optional] # noqa: E501
limit (int): The maximum number of results to be returned.. [optional] # noqa: E501
results ([WorkflowTask]): The list of results.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"srid@streamsets.com"
] |
srid@streamsets.com
|
7919fddec59e58186fdf0241675e0b0acd49cd6e
|
a63410602c5bb9ffbf4d37769a1df1a271d7ff8f
|
/src/sentry/nodestore/riak/backend.py
|
539d2d182d500d29d5564f4b69c0f3a94e6125cc
|
[
"BSD-2-Clause"
] |
permissive
|
meituan/sentry
|
d824d5e9096fe4e8604cebbea0a6c939ce12ac44
|
cd35f2345aaef1346e95b4ce5fed12fb0b648db7
|
refs/heads/master
| 2023-06-07T06:43:20.007879
| 2013-12-27T05:18:12
| 2013-12-27T05:18:12
| 15,473,792
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
"""
sentry.nodestore.riak.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import riak
import riak.resolver
from sentry.nodestore.base import NodeStorage
class RiakNodeStorage(NodeStorage):
"""
A Riak-based backend for storing node data.
>>> RiakNodeStorage(nodes=[{'host':'127.0.0.1','http_port':8098}])
"""
def __init__(self, nodes, bucket='nodes',
resolver=riak.resolver.last_written_resolver, **kwargs):
self.conn = riak.RiakClient(
nodes=nodes, resolver=resolver, **kwargs)
self.bucket = self.conn.bucket(bucket)
super(RiakNodeStorage, self).__init__(**kwargs)
def create(self, data):
obj = self.bucket.new(data=data)
obj.store()
return obj.key
def delete(self, id):
obj = self.bucket.new(key=id)
obj.delete()
def get(self, id):
# just fetch it from a random backend, we're not aiming for consistency
obj = self.bucket.get(key=id, r=1)
if not obj:
return None
return obj.data
def get_multi(self, id_list, r=1):
result = self.bucket.multiget(id_list)
return dict(
(obj.key, obj.data)
for obj in result
)
def set(self, id, data):
obj = self.bucket.new(key=id, data=data)
obj.store()
def cleanup(self, cutoff_timestamp):
# TODO(dcramer): we should either index timestamps or have this run
# a map/reduce (probably the latter)
raise NotImplementedError
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
78637d9daeacf6d6c1a53731dbbbe95ed1f0eb3f
|
2d93403fac1645fdbf1727f0d17fbea6eeef470a
|
/decorators/demo.py
|
bf5bfdf65750333628011b83fb028c9a19c0a484
|
[
"MIT"
] |
permissive
|
Minkov/python-oop-2020-02
|
d13c8c8feaa9ad41c524fc82887a98745115ac57
|
d2acb1504c1a135cded2ae6ff42acccb303d9ab1
|
refs/heads/master
| 2021-02-04T00:43:14.997404
| 2020-03-26T18:21:03
| 2020-03-26T18:21:03
| 243,588,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# def f1():
# print(1)
#
#
# def execute_operation(func):
# print(f'Started execution of {func.__name__}')
# func()
# print(f'Execution of {func.__name__} ended')
#
#
# execute_operation(f1)
# execute_operation(lambda: print(2))
def sum2(x):
def sum_internal(y):
return x + y + z
return sum_internal
sum3 = sum2(3)
sum4 = sum2(4)
print(sum3(2))
print(sum4(2))
|
[
"DonchoMinkov@gmail.com"
] |
DonchoMinkov@gmail.com
|
27415cb1d4b42809e8acaf23400ac4b0155a6eba
|
b1cf54e4d6f969d9084160fccd20fabc12c361c2
|
/dsa/recursion/reverse_string.py
|
0f8c0fc52f7f09d9560c02f86b5d4e92a141f047
|
[] |
no_license
|
zarkle/code_challenges
|
88a53477d6f9ee9dd71577678739e745b9e8a694
|
85b7111263d4125b362184df08e8a2265cf228d5
|
refs/heads/master
| 2021-06-10T11:05:03.048703
| 2020-01-23T06:16:41
| 2020-01-23T06:16:41
| 136,668,643
| 0
| 1
| null | 2019-02-07T23:35:59
| 2018-06-08T21:44:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
"""
Reverse a String
This interview question requires you to reverse a string using recursion. Make sure to think of the base case here.
Again, make sure you use recursion to accomplish this. Do not slice (e.g. string[::-1]) or use iteration, there must be a recursive call for the function.
"""
def reverse(s):
pass
|
[
"beverly.pham@gmail.com"
] |
beverly.pham@gmail.com
|
cad321d76d8a31d63c2ff825f3c60e5531e36aed
|
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
|
/gmn/src/d1_gmn/tests/test_proxy_mode.py
|
02187cb5bb6855cde698e87f8f984985f130fdc4
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/d1_python
|
5e685f1af0c356190f2d6df45d1ac849e2f56972
|
d72a9461894d9be7d71178fb7310101b8ef9066a
|
refs/heads/master
| 2023-08-29T03:16:38.131760
| 2023-06-27T21:59:37
| 2023-06-27T21:59:37
| 60,103,877
| 15
| 12
|
Apache-2.0
| 2023-09-06T18:27:53
| 2016-05-31T16:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,028
|
py
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GMN can handle storage of the object bytes itself, or it can defer storage of the
object bytes to another web server (proxy mode).
The mode is selectable on a per object basis
"""
import base64
import json
import re
import django.test
import freezegun
import pytest
import requests
import responses
import d1_common.type_conversions
import d1_common.types.exceptions
import d1_common.url
import d1_gmn.app.proxy
import d1_gmn.app.sciobj_store
import d1_gmn.tests.gmn_test_case
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.mock_api.catch_all
import d1_test.mock_api.get
import d1_gmn.tests.gmn_mock
AUTH_USERNAME = "Auth user name"
AUTH_PASSWORD = "Auth user password !@#$%"
@d1_test.d1_test_case.reproducible_random_decorator("TestProxyMode")
@freezegun.freeze_time("1999-09-09")
class TestProxyMode(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def create_and_check_proxy_obj(self, client, do_redirect, use_invalid_url=False):
"""Create a sciobj that wraps object bytes stored on a 3rd party server. We use
Responses to simulate the 3rd party server.
If ``do_redirect`` is True, a 302 redirect operation is added. This tests that
GMN is able to follow redirects when establishing the proxy stream.
"""
# Use the MNRead.get() mock API to simulate a remote 3rd party server that holds
# proxy objects.
d1_test.mock_api.get.add_callback(
d1_test.d1_test_case.MOCK_REMOTE_BASE_URL
)
# Create a proxy object.
pid = d1_test.instance_generator.identifier.generate_pid()
if not use_invalid_url:
proxy_url = self.get_remote_sciobj_url(pid, client)
else:
proxy_url = self.get_invalid_sciobj_url(pid, client)
pid, sid, sciobj_bytes, sysmeta_pyxb = self.create_obj(
client, pid, sid=True, vendor_dict=self.vendor_proxy_mode(proxy_url)
)
# Check that object was not stored locally
assert not d1_gmn.app.sciobj_store.is_existing_sciobj_file(pid)
# Retrieve the proxy object and check it
response = self.call_d1_client(client.get, pid)
recv_sciobj_bytes = response.content
assert recv_sciobj_bytes == sciobj_bytes
return response
def get_remote_sciobj_url(self, pid, client):
return d1_common.url.joinPathElements(
d1_test.d1_test_case.MOCK_REMOTE_BASE_URL,
d1_common.type_conversions.get_version_tag_by_pyxb_binding(
client.pyxb_binding
),
"object",
d1_common.url.encodePathElement(pid),
)
def get_invalid_sciobj_url(self, pid, client):
return d1_common.url.joinPathElements(
d1_test.d1_test_case.MOCK_INVALID_BASE_URL,
d1_common.type_conversions.get_version_tag_by_pyxb_binding(
client.pyxb_binding
),
"object",
d1_common.url.encodePathElement(pid),
)
def get_remote_sciobj_bytes(self, pid, client):
sciobj_url = self.get_remote_sciobj_url(pid, client)
return requests.get(sciobj_url).content
def decode_basic_auth(self, basic_auth_str):
"""Decode a Basic Authentication header to (username, password)."""
m = re.match(r"Basic (.*)", basic_auth_str)
return (
base64.standard_b64decode(m.group(1).encode("utf-8"))
.decode("utf-8")
.split(":")
)
def test_1000(self, gmn_client_v1_v2):
"""create(): Proxy mode: Create and retrieve proxy object, no redirect."""
self.create_and_check_proxy_obj(gmn_client_v1_v2, do_redirect=False)
def test_1020(self, gmn_client_v1_v2):
"""create(): Proxy mode: Create and retrieve proxy object with redirect."""
self.create_and_check_proxy_obj(gmn_client_v1_v2, do_redirect=True)
def test_1040(self):
"""create(): Proxy mode: Passing invalid url raises InvalidRequest."""
with pytest.raises(d1_common.types.exceptions.InvalidRequest):
self.create_and_check_proxy_obj(
self.client_v2,
self.v2,
# do_redirect=False,
use_invalid_url=True,
)
@django.test.override_settings(
PROXY_MODE_BASIC_AUTH_ENABLED=False,
PROXY_MODE_BASIC_AUTH_USERNAME=AUTH_USERNAME,
PROXY_MODE_BASIC_AUTH_PASSWORD=AUTH_PASSWORD,
PROXY_MODE_STREAM_TIMEOUT=30,
)
def test_1050(self):
"""get(): Authentication headers: Not passed to remote server when
AUTH_ENABLED=False.
We check this implicitly by checking that the method that generates the
Authentication header IS NOT called.
"""
with d1_gmn.tests.gmn_mock.detect_proxy_auth() as m:
self.create_and_check_proxy_obj(self.client_v2, do_redirect=False)
assert m.call_count == 0
@django.test.override_settings(
PROXY_MODE_BASIC_AUTH_ENABLED=True,
PROXY_MODE_BASIC_AUTH_USERNAME=AUTH_USERNAME,
PROXY_MODE_BASIC_AUTH_PASSWORD=AUTH_PASSWORD,
PROXY_MODE_STREAM_TIMEOUT=30,
)
def test_1060(self):
"""get(): Authentication headers: Passed to remote server when
AUTH_ENABLED=True.
We check this implicitly by checking that the method that generates the
Authentication header IS called.
"""
with d1_gmn.tests.gmn_mock.detect_proxy_auth() as m:
self.create_and_check_proxy_obj(self.client_v2, do_redirect=False)
assert m.call_count ==1
@django.test.override_settings(
PROXY_MODE_BASIC_AUTH_ENABLED=True,
PROXY_MODE_BASIC_AUTH_USERNAME=AUTH_USERNAME,
PROXY_MODE_BASIC_AUTH_PASSWORD=AUTH_PASSWORD,
PROXY_MODE_STREAM_TIMEOUT=30,
)
def test_1070(self):
"""_mk_http_basic_auth_header(): Returns a correctly encoded basic auth
header value.
"""
auth_str = d1_gmn.app.proxy._mk_http_basic_auth_header()["Authorization"]
user_str, pw_str = self.decode_basic_auth(auth_str)
assert user_str == AUTH_USERNAME
assert pw_str == AUTH_PASSWORD
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
d4bceb1aa1d9448ab60826ebb39c69fc254e7129
|
b1e7fecc6b177acc0730d5b882d2ed9418233f01
|
/wedding/rsvp/admin.py
|
0a685d61961822a0f056f9a1cd26eb4a5d5bc86a
|
[] |
no_license
|
mavroskardia/wedding
|
0607a3905456c017b0c1e4d9cc051a13fa20f849
|
e8909ceaa6c7665966344994c641c0a997705714
|
refs/heads/master
| 2020-05-20T16:46:47.343124
| 2014-09-03T17:21:28
| 2014-09-03T17:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
from django.contrib import admin
from rsvp.models import Guest
admin.site.register(Guest)
|
[
"chryso@gmail.com"
] |
chryso@gmail.com
|
1f6b679170bc33780d485262b7c0cf107349373d
|
f6cb3563a412f148a8a9f47204ac1e2226ae7b2e
|
/examples/svc/metrics/attention.py
|
096c433cdc7662bd0095ddcc4c570cfe3686cbe0
|
[
"MIT"
] |
permissive
|
sundogrd/tensorflow_end2end_speech_recognition
|
424789888a54d7149aa9a35a68e781df553abfd9
|
61e4a65fb5c9f3d9f690d713dcd77a48b1de0a14
|
refs/heads/master
| 2020-05-17T19:21:34.206076
| 2019-04-28T14:03:12
| 2019-04-28T14:03:12
| 183,913,207
| 0
| 0
|
MIT
| 2019-04-28T13:40:36
| 2019-04-28T13:40:36
| null |
UTF-8
|
Python
| false
| false
| 12,373
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Define evaluation method for Attention-based model (SVC corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from tqdm import tqdm
from experiments.svc.metrics.ctc import read_trans
def do_eval_fmeasure(session, decode_op, model, dataset,
eval_batch_size=None, progressbar=False):
"""Evaluate trained model by F-measure.
Args:
session: session of training model
decode_op: operation for decoding
model: the model to evaluate
dataset: An instance of a `Dataset' class
label_type (string): phone39 or phone48 or phone61
is_test (bool, optional): set to True when evaluating by the test set
eval_batch_size (int, optional): the batch size when evaluating the model
progressbar (bool, optional): if True, visualize the progressbar
Return:
fmean (float): mean of f-measure of laughter and filler
"""
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
tp_l, fp_l, fn_l = 0, 0, 0
tp_f, fp_f, fn_f = 0, 0, 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, labels_seq_len, _ = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
batch_size = inputs[0].shape[0]
# Decode
labels_pred = session.run(decode_op, feed_dict=feed_dict)
for i_batch in range(batch_size):
detected_l_num = np.sum(np.array(labels_pred[i_batch]) == 1)
detected_f_num = np.sum(np.array(labels_pred[i_batch]) == 2)
true_l_num = np.sum(labels_true[0][i_batch] == 1)
true_f_num = np.sum(labels_true[0][i_batch] == 2)
# Laughter
if detected_l_num <= true_l_num:
tp_l += detected_l_num
fn_l += true_l_num - detected_l_num
else:
tp_l += true_l_num
fp_l += detected_l_num - true_l_num
# Filler
if detected_f_num <= true_f_num:
tp_f += detected_f_num
fn_f += true_f_num - detected_f_num
else:
tp_f += true_f_num
fp_f += detected_f_num - true_f_num
if progressbar:
pbar.update(1)
if is_new_epoch:
break
# Compute F-measure
p_l = tp_l / (tp_l + fp_l) if (tp_l + fp_l) != 0 else 0
r_l = tp_l / (tp_l + fn_l) if (tp_l + fn_l) != 0 else 0
f_l = 2 * r_l * p_l / (r_l + p_l) if (r_l + p_l) != 0 else 0
r_f = tp_f / (tp_f + fn_f) if (tp_f + fn_f) != 0 else 0
p_f = tp_f / (tp_f + fp_f) if (tp_f + fp_f) != 0 else 0
f_f = 2 * r_f * p_f / (r_f + p_f) if (r_f + p_f) != 0 else 0
# confusion_l = [tp_l, fp_l, fn_l, tp_l + fp_l + fn_l]
# confusion_f = [tp_f, fp_f, fn_f, tp_f + fp_f + fn_f]
acc_l = [p_l, r_l, f_l]
acc_f = [p_f, r_f, f_f]
mean = [(p_l + p_f) / 2., (r_l + r_f) / 2., (f_l + f_f) / 2.]
# df_confusion = pd.DataFrame({'Laughter': confusion_l, 'Filler': confusion_f},
# columns=['Laughter', 'Filler'],
# index=['TP', 'FP', 'FN', 'Sum'])
# print(df_confusion)
df_acc = pd.DataFrame({'Laughter': acc_l, 'Filler': acc_f, 'Mean': mean},
columns=['Laughter', 'Filler', 'Mean'],
index=['Precision', 'Recall', 'F-measure'])
# print(df_acc)
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return mean[2], df_acc
def do_eval_fmeasure_time(session, decode_op, attention_weights_op, model, dataset,
eval_batch_size=None, progressbar=False):
"""Evaluate trained model by F-measure.
Args:
session: session of training model
decode_op: operation for decoding
attention_weights_op: operation for computing attention weights
model: the model to evaluate
dataset: An instance of a `Dataset' class
label_type (string): phone39 or phone48 or phone61
is_test (bool, optional): set to True when evaluating by the test set
eval_batch_size (int, optional): the batch size when evaluating the model
progressbar (bool, optional): if True, visualize the progressbar
Returns:
fmean (float): mean of f-measure of laughter and filler
"""
threshold_l = threshold_f = 0.5
# Load ground truth labels
utterance_dict = read_trans(
label_path='/n/sd8/inaguma/corpus/svc/data/labels.txt')
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
tp_l, fp_l, fn_l = 0, 0, 0
tp_f, fp_f, fn_f = 0, 0, 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, labels_seq_len, input_names = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
batch_size = inputs[0].shape[0]
max_frame_num = inputs.shape[1]
attention_weights_list = session.run(
[attention_weights_op], feed_dict=feed_dict)
raise NotImplementedError
for i_batch in range(batch_size):
# posteriors of each class
posteriors_index = np.array([i_batch + (batch_size * j)
for j in range(max_frame_num)])
posteriors_each = posteriors[posteriors_index]
posteriors_l = posteriors_each[:, 1]
posteriors_f = posteriors_each[:, 2]
predict_frames_l = np.where(posteriors_l >= threshold_l)[0]
predict_frames_f = np.where(posteriors_f >= threshold_f)[0]
# summarize consecutive frames in each spike
predict_frames_l_summary = []
predict_frames_f_summary = []
for i_frame in range(len(predict_frames_l)):
# not last frame
if i_frame != len(predict_frames_l) - 1:
# not consecutive
if predict_frames_l[i_frame] + 1 != predict_frames_l[i_frame + 1]:
predict_frames_l_summary.append(
predict_frames_l[i_frame])
else:
predict_frames_l_summary.append(predict_frames_l[i_frame])
for i_frame in range(len(predict_frames_f)):
# not last frame
if i_frame != len(predict_frames_f) - 1:
# not consecutive
if predict_frames_f[i_frame] + 1 != predict_frames_f[i_frame + 1]:
predict_frames_f_summary.append(
predict_frames_f[i_frame])
else:
predict_frames_f_summary.append(predict_frames_f[i_frame])
# compute true interval of each class
utt_info_list = utterance_dict[input_names[i_batch]]
true_frames_l = np.zeros((max_frame_num,))
true_frames_f = np.zeros((max_frame_num,))
for i_label in range(len(utt_info_list)):
start_frame = utt_info_list[i_label][1]
end_frame = utt_info_list[i_label][2]
if utt_info_list[i_label][0] == 'laughter':
true_frames_l[start_frame:end_frame] = 1
elif utt_info_list[i_label][0] == 'filler':
true_frames_f[start_frame:end_frame] = 1
detect_l_num = len(predict_frames_l_summary)
detect_f_num = len(predict_frames_f_summary)
true_l_num = np.sum(labels_true[i_batch] == 1)
true_f_num = np.sum(labels_true[i_batch] == 2)
####################
# laughter
####################
for frame in predict_frames_l_summary:
# prediction is true
if true_frames_l[frame] == 1:
# TODO: まだ予測してない
tp_l += 1
# TODO: すでに予測してたら無視
else:
fp_l += 1
# could not predict
if true_l_num > detect_l_num:
fn_l += true_l_num - detect_l_num
####################
# filler
####################
for frame in predict_frames_f_summary:
# prediction is true
if true_frames_f[frame] == 1:
# TODO: まだ予測してない
tp_f += 1
# TODO: すでに予測してたら無視
else:
fp_f += 1
# could not predict
if true_f_num > detect_f_num:
fn_f += true_f_num - detect_f_num
if progressbar:
pbar.update(1)
p_l = tp_l / (tp_l + fp_l) if (tp_l + fp_l) != 0 else 0
r_l = tp_l / (tp_l + fn_l) if (tp_l + fn_l) != 0 else 0
f_l = 2 * r_l * p_l / (r_l + p_l) if (r_l + p_l) != 0 else 0
r_f = tp_f / (tp_f + fn_f) if (tp_f + fn_f) != 0 else 0
p_f = tp_f / (tp_f + fp_f) if (tp_f + fp_f) != 0 else 0
f_f = 2 * r_f * p_f / (r_f + p_f) if (r_f + p_f) != 0 else 0
# confusion_l = [tp_l, fp_l, fn_l, tp_l + fp_l + fn_l]
# confusion_f = [tp_f, fp_f, fn_f, tp_f + fp_f + fn_f]
acc_l = [p_l, r_l, f_l]
acc_f = [p_f, r_f, f_f]
mean = [(p_l + p_f) / 2., (r_l + r_f) / 2., (f_l + f_f) / 2.]
# df_confusion = pd.DataFrame({'Laughter': confusion_l, 'Filler': confusion_f},
# columns=['Laughter', 'Filler'],
# index=['TP', 'FP', 'FN', 'Sum'])
# print(df_confusion)
df_acc = pd.DataFrame({'Laughter': acc_l, 'Filler': acc_f, 'Mean': mean},
columns=['Laughter', 'Filler', 'Mean'],
index=['Precision', 'Recall', 'F-measure'])
# print(df_acc)
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return mean[2], df_acc
def do_eval_ler(session, ler_op, model, dataset, progressbar=False):
"""Evaluate trained model by Label Error Rate.
Args:
session: session of training model
ler_op: operation for computing label error rate
model: the model to evaluate
dataset: An instance of a `Dataset` class
progressbar (bool, optional): if True, visualize the progressbar
Returns:
ler_mean (float): An average of LER
"""
ler_mean = 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, _, _ = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
batch_size = inputs[0].shape[0]
ler_batch = session.run(ler_op, feed_dict=feed_dict)
ler_mean += ler_batch * batch_size
if progressbar:
pbar.update(batch_size)
ler_mean /= dataset.data_num
return ler_mean
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
a75c0da167a87c4e4c4d0f4d60cba2a79742103d
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_None/trend_ConstantTrend/cycle_0/ar_12/test_artificial_1024_None_ConstantTrend_0_12_100.py
|
f6d41bf4691b38fb611f12d410521c6f82596ac1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
fa3a11c01c620e678186a5d1ba7fb254f3c4cfd8
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/image_os_disk.py
|
db287a53c829cd6de75306f14adce5f33c0b6c14
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageOSDisk(Model):
"""Describes an Operating System disk.
All required parameters must be populated in order to send to Azure.
:param os_type: Required. This property allows you to specify the type of
the OS that is included in the disk if creating a VM from a custom image.
<br><br> Possible values are: <br><br> **Windows** <br><br> **Linux**.
Possible values include: 'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2018_06_01.models.OperatingSystemTypes
:param os_state: Required. The OS State. Possible values include:
'Generalized', 'Specialized'
:type os_state: str or
~azure.mgmt.compute.v2018_06_01.models.OperatingSystemStateTypes
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2018_06_01.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2018_06_01.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2018_06_01.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the
managed disk. UltraSSD_LRS cannot be used with OS Disk. Possible values
include: 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2018_06_01.models.StorageAccountTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'os_state': {'key': 'osState', 'type': 'OperatingSystemStateTypes'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ImageOSDisk, self).__init__(**kwargs)
self.os_type = kwargs.get('os_type', None)
self.os_state = kwargs.get('os_state', None)
self.snapshot = kwargs.get('snapshot', None)
self.managed_disk = kwargs.get('managed_disk', None)
self.blob_uri = kwargs.get('blob_uri', None)
self.caching = kwargs.get('caching', None)
self.disk_size_gb = kwargs.get('disk_size_gb', None)
self.storage_account_type = kwargs.get('storage_account_type', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
f58135f4e2c1093cf3d082d8408c503226f4a87e
|
9a55969cdf85b30873f33aae1410be1cdb91fca5
|
/gym_wrapper.py
|
a9056fcd2d3e3dbef297b98a9edc2e3102534c8b
|
[
"Apache-2.0"
] |
permissive
|
StepNeverStop/RL-TF1
|
65f296bce00ba00185df080c7f770d59ef92e4ed
|
c9e75819504a8db4c587e2aa3e4c9c8845fd9f08
|
refs/heads/master
| 2022-12-11T18:04:06.306955
| 2020-09-26T03:47:01
| 2020-09-26T03:47:01
| 223,076,782
| 5
| 2
|
Apache-2.0
| 2022-12-08T06:16:32
| 2019-11-21T03:06:17
|
Python
|
UTF-8
|
Python
| false
| false
| 8,020
|
py
|
import gym
import numpy as np
import threading
class FakeMultiThread(threading.Thread):
def __init__(self, func, args=()):
super().__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
class gym_envs(object):
def __init__(self, gym_env_name, n, render_mode='first'):
'''
Input:
gym_env_name: gym training environment id, i.e. CartPole-v0
n: environment number
render_mode: mode of rendering, optional: first, last, all, random_[num] -> i.e. random_2, [list] -> i.e. [0, 2, 4]
'''
self.n = n # environments number
self.envs = [gym.make(gym_env_name) for _ in range(self.n)]
# process observation
self.obs_space = self.envs[0].observation_space
if isinstance(self.obs_space, gym.spaces.box.Box):
self.obs_high = self.obs_space.high
self.obs_low = self.obs_space.low
self.obs_type = 'visual' if len(self.obs_space.shape) == 3 else 'vector'
self.reward_threshold = self.envs[0].env.spec.reward_threshold # reward threshold refer to solved
# process action
self.action_space = self.envs[0].action_space
if isinstance(self.action_space, gym.spaces.box.Box):
self.action_type = 'continuous'
self.action_high = self.action_space.high
self.action_low = self.action_space.low
elif isinstance(self.action_space, gym.spaces.tuple.Tuple):
self.action_type = 'Tuple(Discrete)'
else:
self.action_type = 'discrete'
self.action_mu, self.action_sigma = self._get_action_normalize_factor()
self._get_render_index(render_mode)
def _get_render_index(self, render_mode):
'''
get render windows list, i.e. [0, 1] when there are 4 training enviornment.
'''
assert isinstance(render_mode, (list, str)), 'render_mode must have type of str or list.'
if isinstance(render_mode, list):
assert all([isinstance(i, int) for i in render_mode]), 'items in render list must have type of int'
assert min(index) >= 0, 'index must larger than zero'
assert max(index) <= self.n, 'render index cannot larger than environment number.'
self.render_index = render_mode
elif isinstance(render_mode, str):
if render_mode == 'first':
self.render_index = [0]
elif render_mode == 'last':
self.render_index = [-1]
elif render_mode == 'all':
self.render_index = [i for i in range(self.n)]
else:
a, b = render_mode.split('_')
if a == 'random' and 0 < int(b) <= self.n:
import random
self.render_index = random.sample([i for i in range(self.n)], int(b))
else:
raise Exception('render_mode must be first, last, all, [list] or random_[num]')
def render(self):
'''
render game windows.
'''
[self.envs[i].render() for i in self.render_index]
def close(self):
'''
close all environments.
'''
[env.close() for env in self.envs]
def sample_action(self):
'''
generate ramdom actions for all training environment.
'''
return np.array([env.action_space.sample() for env in self.envs])
def reset(self):
self.dones_index = []
threadpool = []
for i in range(self.n):
th = FakeMultiThread(self.envs[i].reset, args=())
threadpool.append(th)
for th in threadpool:
th.start()
for th in threadpool:
threading.Thread.join(th)
obs = np.array([threadpool[i].get_result() for i in range(self.n)])
obs = self._maybe_one_hot(obs)
return obs
# if self.obs_type == 'visual':
# return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.n)])
# else:
# return np.array([threadpool[i].get_result() for i in range(self.n)])
def step(self, actions, scale=True):
if scale == True:
actions = self.action_sigma * actions + self.action_mu
if self.action_type == 'discrete':
actions = actions.reshape(-1,)
elif self.action_type == 'Tuple(Discrete)':
actions = actions.reshape(self.n, -1).tolist()
threadpool = []
for i in range(self.n):
th = FakeMultiThread(self.envs[i].step, args=(actions[i], ))
threadpool.append(th)
for th in threadpool:
th.start()
for th in threadpool:
threading.Thread.join(th)
results = [threadpool[i].get_result() for i in range(self.n)]
# if self.obs_type == 'visual':
# results = [
# [threadpool[i].get_result()[0][np.newaxis, :], *threadpool[i].get_result()[1:]]
# for i in range(self.n)]
# else:
# results = [threadpool[i].get_result() for i in range(self.n)]
obs, reward, done, info = [np.array(e) for e in zip(*results)]
obs = self._maybe_one_hot(obs)
self.dones_index = np.where(done)[0]
return obs, reward, done, info
def partial_reset(self):
threadpool = []
for i in self.dones_index:
th = FakeMultiThread(self.envs[i].reset, args=())
threadpool.append(th)
for th in threadpool:
th.start()
for th in threadpool:
threading.Thread.join(th)
obs = np.array([threadpool[i].get_result() for i in range(self.dones_index.shape[0])])
obs = self._maybe_one_hot(obs, is_partial=True)
return obs
# if self.obs_type == 'visual':
# return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.dones_index.shape[0])])
# else:
# return np.array([threadpool[i].get_result() for i in range(self.dones_index.shape[0])])
def _get_action_normalize_factor(self):
'''
get action mu and sigma. mu: action bias. sigma: action scale
input:
self.action_low: [-2, -3],
self.action_high: [2, 6]
return:
mu: [0, 1.5],
sigma: [2, 4.5]
'''
if self.action_type == 'continuous':
return (self.action_high + self.action_low) / 2, (self.action_high - self.action_low) / 2
else:
return 0, 1
def _maybe_one_hot(self, obs, is_partial=False):
"""
Change discrete observation from list(int) to list(one_hot) format.
for example:
action: [[1, 0], [2, 1]]
observation space: [3, 4]
environment number: 2
then, output: [[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]]
"""
obs_number = len(self.dones_index) if is_partial else self.n
if hasattr(self.obs_space, 'n'):
obs = obs.reshape(obs_number, -1)
if isinstance(self.obs_space.n, (int, np.int32)):
dim = [int(self.obs_space.n)]
else:
dim = list(self.obs_space.n) # 在CliffWalking-v0环境其类型为numpy.int32
multiplication_factor = dim[1:] + [1]
n = np.array(dim).prod()
ints = obs.dot(multiplication_factor)
x = np.zeros([obs.shape[0], n])
for i, j in enumerate(ints):
x[i, j] = 1
return x
else:
return obs
|
[
"271668153@qq.com"
] |
271668153@qq.com
|
2ef9028b99e09a2662b0d9c7461782c15a37d51d
|
ebb63b057a82b8a10df305252cbcda4186ec02f7
|
/taichi_blend/bundle-packages/meltblend/__init__.py
|
86e44f3f0f3dde2931cfa10dc1339672c07d53d1
|
[] |
no_license
|
yjchoi1/taichi_blend
|
aa2d6f0129c8068b9a2c8bb5a7677b3c60923d5b
|
907fdbee6027375324c9605ffc14db16e590f992
|
refs/heads/master
| 2023-03-21T12:31:04.126621
| 2021-03-01T15:06:13
| 2021-03-01T15:06:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
bl_info = {
'name': 'Taichi Blend Physics',
'description': 'Taichi Blender intergration',
'author': 'Taichi Developers',
'version': (0, 0, 5),
'blender': (2, 81, 0),
'location': 'Taichi Blend Window',
'support': 'COMMUNITY',
'wiki_url': 'https://github.com/taichi-dev/taichi_blend/wiki',
'tracker_url': 'https://github.com/taichi-dev/taichi_blend/issues',
'category': 'Physics',
}
from . import node_system, user_iface
modules = [
node_system,
user_iface,
]
def register():
for module in modules:
module.register()
def unregister():
for module in reversed(modules):
module.unregister()
|
[
"1931127624@qq.com"
] |
1931127624@qq.com
|
8345cc43468edb997ff7b911356f1e9ae0cd0f9d
|
1bfa2c800b2d76787e224e5cf25d69fec3a9eff1
|
/tests/synapses/FI.py
|
3c5d7fb4185c1d5c6e3c707a5e73cf44a5af1738
|
[
"MIT"
] |
permissive
|
OpenSourceBrain/MiglioreEtAl14_OlfactoryBulb3D
|
fdd55b324695c82deb04b70c4f2f238af5e92285
|
edaf58abd6b3e0195125fb730e9654ae937d790b
|
refs/heads/master
| 2023-06-30T23:14:33.842015
| 2023-06-17T08:53:44
| 2023-06-17T08:53:44
| 31,535,489
| 2
| 2
| null | 2018-02-08T21:43:43
| 2015-03-02T10:37:44
|
OpenEdge ABL
|
UTF-8
|
Python
| false
| false
| 807
|
py
|
import sys; sys.path.insert(0,'..')
from tests.synapses.NEURONSynapseTest import NEURONSynapseTest
from tests.synapses.NeuroMLSynapseTest import NeuroMLSynapseTest
class NEURON(NEURONSynapseTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = "../NEURON/fi.mod"
self.label = "FI"
self.resultsFile = "results/synapses/FI/NEURON.json"
def prepare(self, h, soma, syn):
syn.gmax = 1
syn.tau2 = 100
class NeuroML(NeuroMLSynapseTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = "../NeuroML2/Synapses/FI.synapse.xml"
self.label = "FI"
self.resultsFile = "results/synapses/FI/NeuroML.json"
def prepare(self, h, soma, syn):
syn.gbase = 1
|
[
"jbirgio@gmail.com"
] |
jbirgio@gmail.com
|
6987d77bade5bae99b89fae4d7412d77288a691a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02973/s456069800.py
|
2c1e3100d68ffea4badc684c47be28c5e8d55461
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
from bisect import bisect_right
n = int(input())
a = [-int(input()) for _ in range(n)]
li = list()
for e in a:
i = bisect_right(li, e)
if i == len(li):
li.append(e)
else:
li[i] = e
ans = len(li)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
610b8f2dbd7222992ae2de1ebf4f382a854940ba
|
2d82d4c6574bd6d32f2cf1c781615f7951f55f66
|
/muntjac/addon/google_maps/overlay/polygon.py
|
8990b4e16f9d503dbd0e2cb1dc99ee6f8bc9eb97
|
[
"Apache-2.0"
] |
permissive
|
metaperl/muntjac
|
f83f745ee03942a61af92ee7fba7285aa9c46f3c
|
8db97712edd81b4d25deaaa48587d2a08010f2c8
|
refs/heads/master
| 2021-01-15T22:04:25.057862
| 2012-11-09T03:52:59
| 2012-11-09T03:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
from muntjac.addon.google_maps.overlay.poly_overlay \
import PolyOverlay
class Polygon(PolyOverlay):
def __init__(self, Id, points, strokeColor='#ffffff', strokeWeight=1,
strokeOpacity=1.0, fillColor='#777777', fillOpacity=0.2,
clickable=False):
super(Polygon, self).__init__(Id, points, strokeColor, strokeWeight,
strokeOpacity, clickable)
self._fillColor = fillColor
self._fillOpacity = fillOpacity
def getFillColor(self):
return self._fillColor
def setFillColor(self, fillColor):
self._fillColor = fillColor
def getFillOpacity(self):
return self._fillOpacity
def setFillOpacity(self, fillOpacity):
self._fillOpacity = fillOpacity
|
[
"r.w.lincoln@gmail.com"
] |
r.w.lincoln@gmail.com
|
08f8f65d622de581829d8817089108087adc12fb
|
46cdf1f348c1fe1cf46ea2e14ecbef9bf59006bd
|
/resolwe/flow/executors/null.py
|
1fac4c33744cd01297cfa7344e0b9ebf41374fca
|
[
"Apache-2.0"
] |
permissive
|
mzganec/resolwe
|
b08dd971f1b19f55052d857063eb43afc4a827dc
|
fd5bbbc459289811ae34ad263b96b498ba15ba7d
|
refs/heads/master
| 2021-06-27T11:30:09.883345
| 2017-09-26T10:11:00
| 2017-09-26T10:16:03
| 105,652,149
| 0
| 0
| null | 2017-10-03T13:00:16
| 2017-10-03T13:00:16
| null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
"""Local workflow executor."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from resolwe.flow.executors import BaseFlowExecutor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class FlowExecutor(BaseFlowExecutor): # pylint: disable=abstract-method
"""Null dataflow executor proxy.
This executor is intended to be used in tests where you want to save
the object to the database but don't need to run it.
"""
name = 'null'
def run(self, data_id, script, verbosity=1):
"""Do nothing :)."""
pass
|
[
"domen@blenkus.com"
] |
domen@blenkus.com
|
019915d58f638d00af71e64044703bf60cd85e7a
|
160b65e1bb780f0409f4a34cc0f0e8693c67ebc4
|
/test_haystack/elasticsearch5_tests/test_query.py
|
cbddc2d8db4b74e94a1bff03a20b32d2fda3d732
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
webbyfox/django-haystack
|
83fd8a8c36ebe3e62cd7a1b0972b913c37251f9a
|
4910ccb01c31d12bf22dcb000894eece6c26f74b
|
refs/heads/master
| 2020-03-22T05:14:13.137009
| 2018-06-18T19:42:35
| 2018-06-18T19:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,497
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import elasticsearch
from django.test import TestCase
from haystack import connections
from haystack.inputs import Exact
from haystack.models import SearchResult
from haystack.query import SQ, SearchQuerySet
from haystack.utils.geo import D, Point
from ..core.models import AnotherMockModel, MockModel
class Elasticsearch5SearchQueryTestCase(TestCase):
def setUp(self):
super(Elasticsearch5SearchQueryTestCase, self).setUp()
self.sq = connections["elasticsearch"].get_query()
def test_build_query_all(self):
self.assertEqual(self.sq.build_query(), "*:*")
def test_build_query_single_word(self):
self.sq.add_filter(SQ(content="hello"))
self.assertEqual(self.sq.build_query(), "(hello)")
def test_build_query_boolean(self):
self.sq.add_filter(SQ(content=True))
self.assertEqual(self.sq.build_query(), "(True)")
def test_regression_slash_search(self):
self.sq.add_filter(SQ(content="hello/"))
self.assertEqual(self.sq.build_query(), "(hello\\/)")
def test_build_query_datetime(self):
self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))
self.assertEqual(self.sq.build_query(), "(2009-05-08T11:28:00)")
def test_build_query_multiple_words_and(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_filter(SQ(content="world"))
self.assertEqual(self.sq.build_query(), "((hello) AND (world))")
def test_build_query_multiple_words_not(self):
self.sq.add_filter(~SQ(content="hello"))
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(self.sq.build_query(), "(NOT ((hello)) AND NOT ((world)))")
def test_build_query_multiple_words_or(self):
self.sq.add_filter(~SQ(content="hello"))
self.sq.add_filter(SQ(content="hello"), use_or=True)
self.assertEqual(self.sq.build_query(), "(NOT ((hello)) OR (hello))")
def test_build_query_multiple_words_mixed(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(content="hello"), use_or=True)
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(
self.sq.build_query(), "(((why) OR (hello)) AND NOT ((world)))"
)
def test_build_query_phrase(self):
self.sq.add_filter(SQ(content="hello world"))
self.assertEqual(self.sq.build_query(), "(hello AND world)")
self.sq.add_filter(SQ(content__exact="hello world"))
self.assertEqual(
self.sq.build_query(), '((hello AND world) AND ("hello world"))'
)
def test_build_query_boost(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_boost("world", 5)
self.assertEqual(self.sq.build_query(), "(hello) world^5")
def test_build_query_multiple_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00")))
self.sq.add_filter(SQ(author__gt="daniel"))
self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00")))
self.sq.add_filter(SQ(title__gte="B"))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(
self.sq.build_query(),
'((why) AND pub_date:([* TO "2009-02-10 01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12 12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))',
)
def test_build_query_multiple_filter_types_with_datetimes(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))
self.sq.add_filter(SQ(author__gt="daniel"))
self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))
self.sq.add_filter(SQ(title__gte="B"))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(
self.sq.build_query(),
'((why) AND pub_date:([* TO "2009-02-10T01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12T12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))',
)
def test_build_query_in_filter_multiple_words(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"]))
self.assertEqual(
self.sq.build_query(),
'((why) AND title:("A Famous Paper" OR "An Infamous Article"))',
)
def test_build_query_in_filter_datetime(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))
self.assertEqual(
self.sq.build_query(), '((why) AND pub_date:("2009-07-06T01:56:21"))'
)
def test_build_query_in_with_set(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in={"A Famous Paper", "An Infamous Article"}))
self.assertTrue("((why) AND title:(" in self.sq.build_query())
self.assertTrue('"A Famous Paper"' in self.sq.build_query())
self.assertTrue('"An Infamous Article"' in self.sq.build_query())
def test_build_query_wildcard_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__startswith="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack*))")
def test_build_query_fuzzy_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__fuzzy="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack~))")
def test_clean(self):
self.assertEqual(self.sq.clean("hello world"), "hello world")
self.assertEqual(self.sq.clean("hello AND world"), "hello and world")
self.assertEqual(
self.sq.clean(
'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / world'
),
'hello and or not to \\+ \\- \\&& \\|| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\" \\~ \\* \\? \\: \\\\ \\/ world',
)
self.assertEqual(
self.sq.clean("so please NOTe i am in a bAND and bORed"),
"so please NOTe i am in a bAND and bORed",
)
def test_build_query_with_models(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_model(MockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
self.sq.add_model(AnotherMockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
self.sq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))
# Reset to default.
self.sq.set_result_class(None)
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
def test_in_filter_values_list(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=[1, 2, 3]))
self.assertEqual(self.sq.build_query(), '((why) AND title:("1" OR "2" OR "3"))')
def test_narrow_sq(self):
sqs = SearchQuerySet(using="elasticsearch").narrow(SQ(foo="moof"))
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
self.assertEqual(sqs.query.narrow_queries.pop(), "foo:(moof)")
def test_build_query_with_dwithin_range(self):
backend = connections["elasticsearch"].get_backend()
search_kwargs = backend.build_search_kwargs(
"where",
dwithin={
"field": "location_field",
"point": Point(1.2345678, 2.3456789),
"distance": D(m=500),
},
)
self.assertEqual(
search_kwargs["query"]["bool"]["filter"]["geo_distance"],
{
"distance": "0.500000km",
"location_field": {"lat": 2.3456789, "lon": 1.2345678},
},
)
|
[
"chris@improbable.org"
] |
chris@improbable.org
|
3961d1b537a00c32618a754782ecee5157adcf74
|
b64f4a01f1b15d7128b1bd806bfe3d10c66f14d4
|
/shortner/admin.py
|
a3ef210f572ae7b0c4b969f0fd752aeca7192ab9
|
[] |
no_license
|
mahmoudzeyada/Cloned-Pastbin-webapp
|
2e85a92b852baf0253a169107e0dafd8f90a9d0c
|
c3fbb768707d2fee5a3bed7c99f64109d103707f
|
refs/heads/master
| 2022-05-15T15:28:46.681474
| 2019-06-03T21:39:00
| 2019-06-03T21:39:00
| 190,085,592
| 0
| 0
| null | 2022-04-22T21:26:07
| 2019-06-03T21:34:59
|
Python
|
UTF-8
|
Python
| false
| false
| 102
|
py
|
from django.contrib import admin
from .models import UrlShortener
admin.site.register(UrlShortener)
|
[
"mahmoudzeyada440@gmail.com"
] |
mahmoudzeyada440@gmail.com
|
fb78a7ba635cbda1a933a00eb4dd0da34c3334c4
|
1fdf7dbde0b8253ef164b8a8fdff958ecef6866e
|
/proyecto_tienda/carrito/urls.py
|
4720a39d71e7706be0f6f794d953f06a5cf2ed68
|
[] |
no_license
|
jkaalexkei/proyecto_tienda
|
b9b0315b05a956ab1bf619059b8ea839fb4c7093
|
6986ccce2f8aaffc6b1289090d274cbc43b36509
|
refs/heads/master
| 2023-08-25T07:21:40.781058
| 2021-11-05T02:17:18
| 2021-11-05T02:17:18
| 396,094,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from django.urls import path
from . import views
app_name = 'carrito'
urlpatterns = [
path('',views.carrito,name='carrito'),
path('agregar/',views.agregar,name='agregar'),
path('eliminar/',views.remove,name='remove'),
]
|
[
"jkaalexkei@gmail.com"
] |
jkaalexkei@gmail.com
|
74dfc36af985a5ffa5a9f34e9e7f893fc514bef3
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/surface/compute/instance_groups/managed/delete.py
|
368808d0b8bcc4f36cd56c4ef8184a148eacee2a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 7,090
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting managed instance group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import managed_instance_groups_utils
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import text
from six.moves import zip
class Delete(base.DeleteCommand):
"""Delete Google Compute Engine managed instance group."""
@staticmethod
def Args(parser):
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGERS_ARG.AddArgument(
parser, operation_type='delete')
def _GenerateAutoscalerDeleteRequests(self, holder, project, mig_requests):
"""Generates Delete requestes for autoscalers attached to instance groups.
Args:
holder: ComputeApiHolder, object encapsulating compute api.
project: str, project this request should apply to.
mig_requests: Messages which will be sent to delete instance group
managers.
Returns:
Messages, which will be sent to delete autoscalers.
"""
mig_requests = list(zip(*mig_requests))[2] if mig_requests else []
zone_migs = [(request.instanceGroupManager, 'zone',
managed_instance_groups_utils.CreateZoneRef(
holder.resources, request)) for request in mig_requests
if hasattr(request, 'zone') and request.zone is not None]
region_migs = [(request.instanceGroupManager, 'region',
managed_instance_groups_utils.CreateRegionRef(
holder.resources, request)) for request in mig_requests
if hasattr(request, 'region') and request.region is not None]
zones = list(zip(*zone_migs))[2] if zone_migs else []
regions = list(zip(*region_migs))[2] if region_migs else []
client = holder.client.apitools_client
messages = client.MESSAGES_MODULE
autoscalers_to_delete = managed_instance_groups_utils.AutoscalersForMigs(
migs=zone_migs + region_migs,
autoscalers=managed_instance_groups_utils.AutoscalersForLocations(
zones=zones,
regions=regions,
client=holder.client))
requests = []
for autoscaler in autoscalers_to_delete:
if autoscaler.zone:
service = client.autoscalers
request = messages.ComputeAutoscalersDeleteRequest(
zone=path_simplifier.Name(autoscaler.zone))
else:
service = client.regionAutoscalers
request = messages.ComputeRegionAutoscalersDeleteRequest(
region=path_simplifier.Name(autoscaler.region))
request.autoscaler = autoscaler.name
request.project = project
requests.append((service, 'Delete', request))
return requests
def _GetCommonScopeNameForRefs(self, refs):
"""Gets common scope for references."""
has_zone = any(hasattr(ref, 'zone') for ref in refs)
has_region = any(hasattr(ref, 'region') for ref in refs)
if has_zone and not has_region:
return 'zone'
elif has_region and not has_zone:
return 'region'
else:
return None
def _CreateDeleteRequests(self, client, igm_refs):
"""Returns a list of delete messages for instance group managers."""
messages = client.MESSAGES_MODULE
requests = []
for ref in igm_refs:
if ref.Collection() == 'compute.instanceGroupManagers':
service = client.instanceGroupManagers
request = messages.ComputeInstanceGroupManagersDeleteRequest(
instanceGroupManager=ref.Name(),
project=ref.project,
zone=ref.zone)
elif ref.Collection() == 'compute.regionInstanceGroupManagers':
service = client.regionInstanceGroupManagers
request = messages.ComputeRegionInstanceGroupManagersDeleteRequest(
instanceGroupManager=ref.Name(),
project=ref.project,
region=ref.region)
else:
raise ValueError('Unknown reference type {0}'.format(ref.Collection()))
requests.append((service, 'Delete', request))
return requests
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
project = properties.VALUES.core.project.Get(required=True)
igm_refs = (
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGERS_ARG.
ResolveAsResource)(
args, holder.resources, default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=flags.GetDefaultScopeLister(holder.client, project))
scope_name = self._GetCommonScopeNameForRefs(igm_refs)
utils.PromptForDeletion(
igm_refs, scope_name=scope_name, prompt_title=None)
requests = list(self._CreateDeleteRequests(
holder.client.apitools_client, igm_refs))
resources = []
# Delete autoscalers first.
errors = []
autoscaler_delete_requests = self._GenerateAutoscalerDeleteRequests(
holder, project, mig_requests=requests)
if autoscaler_delete_requests:
with progress_tracker.ProgressTracker(
'Deleting ' + text.Pluralize(
len(autoscaler_delete_requests), 'autoscaler'),
autotick=False,
) as tracker:
resources = holder.client.MakeRequests(
autoscaler_delete_requests,
errors,
progress_tracker=tracker)
if errors:
utils.RaiseToolException(errors)
# Now delete instance group managers.
errors = []
with progress_tracker.ProgressTracker(
'Deleting ' + text.Pluralize(len(requests), 'Managed Instance Group'),
autotick=False,
) as tracker:
resources += holder.client.MakeRequests(
requests, errors, progress_tracker=tracker)
if errors:
utils.RaiseToolException(errors)
return resources
Delete.detailed_help = {
'brief': 'Delete Google Compute Engine managed instance groups',
'DESCRIPTION': """\
*{command}* deletes one or more Google Compute Engine managed instance
groups.
""",
}
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
fa45d061dd6710a66d3b365dc1f3104878de0bd9
|
66ab8fac9fb19e5ff470be0fa7b2b73600231f16
|
/pyble/osx/console.py
|
f823488bb6edf4f68be5844e217d58b1b79c62e0
|
[
"MIT"
] |
permissive
|
bgromov/PyBLEWrapper
|
e97bbc2299f880838d246a8c6fdb27b05cb72af1
|
8a5d016e65b3c259391ddc97c371ab4b1b5c61b5
|
refs/heads/master
| 2020-03-25T21:41:43.702666
| 2018-08-12T23:38:16
| 2018-08-12T23:38:16
| 144,185,816
| 0
| 0
|
MIT
| 2018-08-09T17:50:12
| 2018-08-09T17:50:12
| null |
UTF-8
|
Python
| false
| false
| 6,054
|
py
|
from objc import *
from Foundation import *
import cmd
import os
import logging
import time
from pprint import pformat
try:
from queue import Queue, Empty
except:
from Queue import Queue, Empty
from pyble.patterns import LoggerObject
class OSXCmd(cmd.Cmd, LoggerObject):
def __init__(self, history_size=10):
# both cmd.Cmd, LoggerObject need to be init.
cmd.Cmd.__init__(self)
LoggerObject.__init__(self)
self.cmdqueue = Queue()
self.history_size = history_size
def registerKeyboardInterrupt(self):
stdin = NSFileHandle.fileHandleWithStandardInput().retain()
handle = objc.selector(self.keyboardHandler_, signature='v@:@')
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(self, handle, NSFileHandleReadCompletionNotification, stdin)
stdin.readInBackgroundAndNotify()
def unregisterKeyboardInterrupt(self):
NSNotificationCenter.defaultCenter().removeObserver_(self)
def keyboardHandler_(self, notification):
data = notification.userInfo().objectForKey_(NSFileHandleNotificationDataItem)
line = NSString.alloc().initWithData_encoding_(data, NSUTF8StringEncoding).autorelease()
if len(line):
self.cmdqueue.put(line)
stdin = NSFileHandle.fileHandleWithStandardInput().retain()
stdin.readInBackgroundAndNotify()
def cmdloop(self, intro=None):
# customized for python & OSX co-existence
# use OSX framework to read input from keyboard interrupt
self.preloop()
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
# the main loop
stop = None
showPrompt = True
while not stop:
if showPrompt:
self.stdout.write(self.prompt)
self.stdout.flush()
showPrompt = False
try:
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantPast())
line = self.cmdqueue.get_nowait()
if not len(line):
line = "EOF"
else:
line = line.strip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.cmdqueue.task_done()
showPrompt = True
except Empty:
continue
except KeyboardInterrupt:
break
except Exception as e:
import traceback
print traceback.format_exc()
break
# cleanup
self.postloop()
def preloop(self):
# cmd history
self._history = []
# OSX
self.osx_pool = NSAutoreleasePool.alloc().init()
self.registerKeyboardInterrupt()
def postloop(self):
self.unregisterKeyboardInterrupt()
del self.osx_pool
def endloop(self):
self.cmdqueue.put("exit")
def precmd(self, line):
self._history += [ line.strip() ]
if len(self._history) > self.history_size:
self._history = self._history[-(self.history_size):]
self.unregisterKeyboardInterrupt()
return line
def postcmd(self, stop, line):
try:
self.stdout.flush()
except:
pass
self.registerKeyboardInterrupt()
return stop
def emptyline(self):
pass
def do_shell(self, args):
"""Execute shell command
"""
os.system(args)
def do_debug(self, args):
"""Enable/disable debugging information
"""
if not hasattr(self, 'debug'):
return
option = args.strip()
if option == "":
pass
elif option == "True":
self.debug = True
elif option == "False":
self.debug = False
else:
self.stdout.write("Only accept True/False\n")
ans = "%s is %sin debug mode.\n"
cls_name = self.__class__.__name__
if self.debug:
ans = ans % (cls_name, "")
else:
ans = ans % (cls_name, "not ")
self.stdout.write(ans)
self.stdout.flush()
def default(self, line):
if len(line.strip()):
self.do_eval(line)
def do_eval(self, args):
"""Evaluate a single line python statement
"""
line = args.strip()
if len(line) == 0:
return
output = ""
oldstdout = self.stdout
from StringIO import StringIO
import ast
buffer = StringIO()
self.stdout = buffer
try:
code = compile(line, "<string>", "single")
exec(code)
except NameError as e:
self.logger.debug(e)
cmd, args, line = self.parseline(line)
self.commandNotFound(cmd)
except SyntaxError as e:
self.logger.debug(e)
cmd, args, line = self.parseline(line)
self.commandNotFound(cmd)
except Exception as e:
self.logger.debug(e)
self.stdout.write(pformat(e) + "\n")
finally:
self.stdout = oldstdout
self.stdout.write(buffer.getvalue())
def commandNotFound(self, cmd):
self.stdout.write("Command: '%s' is not yet support by %s\n" % (cmd, self.__class__.__name__))
def do_hist(self, args):
"""Show last N command history
"""
length = len(self._history)
try:
length = int(args.strip())
except:
pass
self._history.pop()
for cmd in self._history[-length:]:
self.stdout.write(cmd)
self.stdout.write('\n')
self.stdout.flush()
def do_exit(self, args):
"""Exit
"""
return True
if __name__ == "__main__":
app = OSXCmd()
app.cmdloop()
|
[
"brett.chien@gmail.com"
] |
brett.chien@gmail.com
|
00e012b55ec43e614adb4687ff49bb5f5b807e97
|
7c551e749064b25af706b9167211050f8c6ad0a9
|
/signatures/windows/disables_browserwarn.py
|
3fb78aceaba0966a66cdcad065787f4ccc8ebbb4
|
[] |
no_license
|
dashjuvi/Cuckoo-Sandbox-vbox-win7
|
fa382828b4895c5e1ee60b37a840edd395bf1588
|
a3a26b539b06db15176deadeae46fc0476e78998
|
refs/heads/master
| 2020-03-12T08:33:06.231245
| 2019-01-14T23:09:02
| 2019-01-14T23:09:02
| 130,529,882
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
# Copyright (C) 2015 Optiv, Inc. (brad.spengler@optiv.com), Kevin Ross, Updated 2016 for Cuckoo 2.0
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class DisablesBrowserWarn(Signature):
name = "disables_browser_warn"
description = "Attempts to disable browser security warnings"
severity = 3
categories = ["generic", "banker", "clickfraud"]
authors = ["Optiv", "Kevin Ross"]
minimum = "2.0"
regkeys_re = [
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\WarnOnBadCertRecving",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\WarnOnBadCertSending",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\WarnOnHTTPSToHTTPRedirect",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\WarnOnZoneCrossing",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\WarnOnPostRedirect",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\IEHardenIENoWarn",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\NoProtectedModeBanner",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\IE9RunOncePerInstall",
]
def on_complete(self):
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, actions=["regkey_written"], all=True):
self.mark_ioc("registry", regkey)
return self.has_marks()
|
[
"diegovm14@gmail.com"
] |
diegovm14@gmail.com
|
a4febf7096cdd80c0178f06210bc98d127790e7b
|
a884039e1a8b0ab516b80c2186e0e3bad28d5147
|
/Livros/Livro-Desenvolvimento web com Flask/Capitulo03/Nível 01/exemplo14a.py
|
fb031bc2512a683fa44f48441a518deee804b3f6
|
[
"MIT"
] |
permissive
|
ramonvaleriano/python-
|
6e744e8bcd58d07f05cd31d42a5092e58091e9f0
|
ada70918e945e8f2d3b59555e9ccc35cf0178dbd
|
refs/heads/main
| 2023-04-10T14:04:24.497256
| 2021-04-22T18:49:11
| 2021-04-22T18:49:11
| 340,360,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# Program: exemplo14a.py
# Author: Ramon R. Valeriano
# Description: Programa do Capítulo 3, para melhorar a fixação
# Developed: 05/03/2020 - 20:05
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from datetime import datetime
app = Flask(__name__)
bootstrap = Bootstrap(app)
moment = Moment(app)
@app.route('/')
def index():
return render_template('hellonew1.html', current_time=datetime.utcnow())
@app.route('/user/<name>')
def usuario(name):
return render_template('user3.html', name=name)
@app.errorhandler(404)
def paginaNaoEncontrada(e):
return render_template('404.html')
@app.errorhandler(500)
def erroNoServidor(e):
return render_template('500.html')
app.run(debug=True)
|
[
"rrvaleriano@gmail.com"
] |
rrvaleriano@gmail.com
|
46e27f7215b2e25256ad1950e0f41b1e35267578
|
9587c6e58ef0ef4959898454c1a7c3d8fc963530
|
/blog/migrations/0001_initial.py
|
2df14f34fc1416652b5fb0330668df6e589da01d
|
[] |
no_license
|
okielife/okie.life
|
cfa5450be85c4076985156793a48ee4e68c1610d
|
6be9ed7178cddd300b00adb263b1962a7987ac11
|
refs/heads/master
| 2020-05-26T11:45:40.831311
| 2018-01-31T15:52:45
| 2018-01-31T15:52:45
| 84,996,176
| 0
| 0
| null | 2018-01-31T15:52:46
| 2017-03-14T20:54:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-04-09 12:47
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('body', models.TextField()),
('posted', models.DateField(auto_now_add=True, db_index=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, max_length=100)),
('slug', models.SlugField(max_length=100)),
],
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
]
|
[
"leeed2001@gmail.com"
] |
leeed2001@gmail.com
|
81219fa1bf7140d19dfa21d75f40241648410d07
|
c733d8d610a2f00b128abc25e4cdf79212ce4e63
|
/photologue/migrations/0017_remove_photo_admin_orig_image_tag.py
|
cd1025dd49ec8530e2cab6c30842f42d30bb112e
|
[
"BSD-3-Clause"
] |
permissive
|
pbarton666/photologue
|
301f56010208e068fd2c29d24862e529ceade6f1
|
fe47e6eb7830dbcfecfd059294dfbee30b94c4f9
|
refs/heads/master
| 2020-12-24T18:51:07.411561
| 2016-04-11T17:35:05
| 2016-04-11T17:35:05
| 55,991,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-01 18:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('photologue', '0016_photo_admin_orig_image_tag'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='admin_orig_image_tag',
),
]
|
[
"barton.pj@gmail.com"
] |
barton.pj@gmail.com
|
28a524703a77756b184f0f4bfe85f853572665a8
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Math Game/mathgame/constants.py
|
512e7167a39e24c0b56398f58226c05d5c9119a5
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4cb8c44068f19e31f8a933330313b35f4f809635c3f596eef01c16fd342dacd6
size 2243
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
ddf805539d408d7e2034a60ca30138c7b2a902ad
|
7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d
|
/packages/autorest.python/test/vanilla/version-tolerant/AcceptanceTests/test_security.py
|
46f859bb9858d9cdcf2f59faebdd3fdd493eee3f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.python
|
cc4bfbf91ae11535731cad37cedd6b733edf1ebd
|
a00d7aaa3753ef05cb5a0d38c664a90869478d44
|
refs/heads/main
| 2023-09-03T06:58:44.246200
| 2023-08-31T20:11:51
| 2023-08-31T20:11:51
| 100,315,955
| 47
| 40
|
MIT
| 2023-09-14T21:00:21
| 2017-08-14T22:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from securityaadswaggerversiontolerant import AutorestSecurityAad
from securitykeyswaggerversiontolerant import AutorestSecurityKey
from azure.core.credentials import AzureKeyCredential
from azure.core.pipeline.policies import AzureKeyCredentialPolicy
from azure.core.pipeline.policies import BearerTokenCredentialPolicy
def test_security_aad_swagger(credential):
client = AutorestSecurityAad(credential=credential)
assert isinstance(client._config.authentication_policy, BearerTokenCredentialPolicy)
client.head(enforce_https=False)
def test_security_key_swagger():
# the key value shall keep same with https://github.com/Azure/autorest.testserver/tree/main/src/test-routes/security.ts
client = AutorestSecurityKey(credential=AzureKeyCredential('123456789'))
assert isinstance(client._config.authentication_policy, AzureKeyCredentialPolicy)
client.head()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
6ce782e1de0a0f55c0d164d33e68d359bb9bd33d
|
5050cb4aa00de443d3f9dfeddd4b3d70389386c0
|
/site-packages/sugar_network/toolkit/network.py
|
a0ba49914efda6ddccf4108d45178f41d6278200
|
[] |
no_license
|
sugar-activities/4619-activity
|
0f72e6c64fd0f98ac3e7d6011a75fb2ddd27490a
|
f6bc2dc64f30de57d3c2f50ac9795ac2faf5ac9e
|
refs/heads/master
| 2021-01-19T23:15:29.516534
| 2017-04-21T05:03:58
| 2017-04-21T05:03:58
| 88,936,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
# Copyright (C) 2012 Aleksey Lim
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ctypes
import logging
from ctypes.util import find_library
_logger = logging.getLogger('network')
def res_init():
"""Reset resolving cache.
Calling this function will enforce libc to avoid using stale resolving
cache after getting [re]connected. For example, if application process
was launched when there were no any DNS servers available, after getting
connected, call `res_init()` to reuse newly appeared DNS servers.
"""
try:
lib_name = find_library('c')
libc = ctypes.CDLL(lib_name)
getattr(libc, '__res_init')(None)
except Exception:
_logger.exception('Failed to call res_init()')
|
[
"ignacio@sugarlabs.org"
] |
ignacio@sugarlabs.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.