blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
864caf90e1baee8f519c896c8212eb0d3e3e2ae8
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/cloud/rsctxprofiletoregion.py
|
f5884b03f3bd5c4ccb68561a4aa42a56060d1fcf
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,817
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsCtxProfileToRegion(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = SourceRelationMeta("cobra.model.cloud.RsCtxProfileToRegion", "cobra.model.cloud.Region")
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "cloudRsCtxProfileToRegion"
meta.rnFormat = "rsctxProfileToRegion"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Attachment to Cloud Region"
meta.writeAccessMask = 0x1002001
meta.readAccessMask = 0x1002001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.parentClasses.add("cobra.model.cloud.CtxProfile")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsctxProfileToRegion', False),
]
prop = PropMeta("str", "annotation", "annotation", 51540, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 51541, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 51474, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 51221, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 14804
prop.defaultValueStr = "cloudRegion"
prop._addConstant("cloudRegion", None, 14804)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 51220, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudAppViaEPg", "From cloud context profile to application profile", "cobra.model.cloud.App"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudExtEPg", "From cloudCtxProfile to CloudExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudRouterP", "From cloudCtxProfile to cloudRouterP", "cobra.model.cloud.RouterP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToHcloudEndPoint", "From cloudCtxProfile to HcloudEndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToHcloudCtx", "From cloudCtxProfile to HcloudCtx", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudRegion", "From cloudCtxProfile to CloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudEPg", "From cloudCtxProfile to CloudEPg", "cobra.model.cloud.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToFvCtx", "From cloudCtxProfile to fvCtx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudCtxProfileToCloudCidr", "From cloudCtxProfile to cloudCidr", "cobra.model.cloud.Cidr"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
6d18cbe38eee616d5462100561105c64e781a985
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02959/s005038203.py
|
72072b6b09a863006aae80c7da70edb873d1591c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
n = int(input())
a = [int(i) for i in input().split()]
b = [int(i) for i in input().split()]
cnt = 0
for i in range(n):
dm = min(a[i], b[i])
b[i] -= dm; cnt += dm
dmn = min(a[i + 1], b[i])
a[i + 1] -= dmn; cnt += dmn
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2045f838ced5cbb44e3acff6b5588a986d821932
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2153/60634/269290.py
|
4a9e1e2f2528705a00fe22bb4edfda1efd0a27a3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
def over(num):
result = 0
while num > 0:
result *= 10
result += num%10
num = int(num/10)
return result
num = int(input())
if num < 10 and num >= 0:
print(num)
elif num < 0:
print(-1*over(-1*num))
else:
print(over(num))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e97e2648f03a2b000541078198a7bd237e410cbf
|
780b6cca690a213ac908b1cd5faef5366a18dc4e
|
/314_print_names_to_columns/save3_nopass.py
|
9877032426300f773ec84c56b3d2a2c1653a6085
|
[] |
no_license
|
katkaypettitt/pybites-all
|
899180a588e460b343c00529c6a742527e4ea1bc
|
391c07ecac0d92d5dc7c537bcf92eb6c1fdda896
|
refs/heads/main
| 2023-08-22T16:33:11.171732
| 2021-10-24T17:29:44
| 2021-10-24T17:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from typing import List # not needed when we upgrade to 3.9
def print_names_to_columns(names: List[str], cols: int = 2) -> None:
name_list = [f'| {name:{10}}' for name in names]
output = ''
for i in range(0, len(name_list), cols):
output += ' '.join(name_list[i: i + cols]) + '\n'
print(output)
|
[
"70788275+katrinaalaimo@users.noreply.github.com"
] |
70788275+katrinaalaimo@users.noreply.github.com
|
961463af7f72a271cabbeb12200888b42613eece
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02820/s467587633.py
|
fa3163e453745247ba7d8f8b2cd48d58de4cc18e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
n,k=map(int,input().split())
R,S,P=map(int,input().split())
T=input()
t=""
count=0
for i in range(n):
if i>k-1:
if T[i]=="r":
if t[i-k]!="p":
t=t+"p"
count+=P
else:
t=t+" "
if T[i]=="s":
if t[i-k]!="r":
t=t+"r"
count+=R
else:
t=t+" "
if T[i]=="p":
if t[i-k]!="s":
t=t+"s"
count+=S
else:
t=t+" "
else:
if T[i]=="r":
t=t+"p"
count+=P
if T[i]=="p":
t=t+"s"
count+=S
if T[i]=="s":
t=t+"r"
count+=R
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
320588077a9f70d6444751783bb3c54e23696683
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4051/826004051.py
|
c80b8f5999f99f8b050d0f6bd2743ff2c9af953f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'TI',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BTI', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'TFS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'FGS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
b4d71cbd9222d51470dd154f85cbec6495bd4db4
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/prefixSums_20200729113651.py
|
f9d6c16ff0f9dc4d781e07a32e438e70210e2a34
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
def prefix(A):
n = len(A)
p = [0] * (n+1)
for k in range(1,n+1):
p[k]
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
e5ea9b1ddf5e93da6d0a8e8a1c2d4abac86bd9bf
|
441b37974ac5f999001a773caa3fbf7584f82cc8
|
/Walmart Trip Type Classification/walmart_cv.py
|
208e036fd9d80b50bb13c7b6c0a95cddc512663d
|
[] |
no_license
|
lvraikkonen/Kaggle
|
bcdb653c774c211ae9e5a35fdacdb1205e81bebe
|
dbeac80d645619dc519819d4ed2c45f383dd1206
|
refs/heads/master
| 2021-01-23T03:33:17.804886
| 2016-09-03T11:51:11
| 2016-09-03T11:51:11
| 24,451,963
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
from sklearn.metrics import log_loss
from hyperopt import fmin, tpe, hp, STATUS_OK
# These functions define the metric which we are trying to
# optimize.
def objective1(params):
print "Training model1 with parameters: "
print params
watchlist1 = [(dtrain1, 'train'), (dtestCV1, 'eval')]
model = xgb.train(params=params,
dtrain=dtrain1,
num_boost_round=1000,
early_stopping_rounds=10,
evals=watchlist1)
score = log_loss(dtestCV1.get_label(), model.predict(dtestCV1))
print "\tScore {0}\n\n".format(score)
return {'loss': score, 'status': STATUS_OK}
def objective2(params):
print "Training model2 with parameters: "
print params
watchlist2 = [(dtrain2, 'train'), (dtestCV2, 'eval')]
model = xgb.train(params=params,
dtrain=dtrain1,
num_boost_round=1000,
early_stopping_rounds=10,
evals=watchlist2)
score = log_loss(dtestCV2.get_label(), model.predict(dtestCV2))
print "\tScore {0}\n\n".format(score)
return {'loss': score, 'status': STATUS_OK}
# Load data from buffer files
dtrain1 = xgb.DMatrix('data/dtrain1.buffer')
dtestCV1 = xgb.DMatrix('data/dtestCV1.buffer')
dtrain2 = xgb.DMatrix('data/dtrain2.buffer')
dtestCV2 = xgb.DMatrix('data/dtestCV2.buffer')
# Define the hyperparameter space
space = {'eta': hp.quniform('eta', 0.025, 0.5, 0.025),
'max_depth': hp.quniform('max_depth', 1, 15, 1),
'min_child_weight': hp.quniform('min_child_weight', 1, 6, 1),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'gamma': hp.quniform('gamma', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'num_class': 38,
'eval_metric': 'mlogloss',
'objective': 'multi:softprob'}
# Evaluate the function fmin over the hyperparameter space, and
# print the best hyperparameters.
best1 = fmin(objective1, space=space, algo=tpe.suggest, max_evals=250)
print "Optimal parameters for dtrain1 are: ", best1
#
best2 = fmin(objective2, space=space, algo=tpe.suggest, max_evals=250)
print "Optimal parameters for dtrain2 are: ", best2
#
|
[
"claus.lv@hotmail.com"
] |
claus.lv@hotmail.com
|
4fdf5b55ca8b34dbe7b97e293d8dace35dd2c25c
|
ce13eba2d3d1e7267b44cd322d309c0e1f3e6785
|
/pb_file_generation.py
|
26d7bf10581c85f22b12aec9ff2e341b9465e57b
|
[] |
no_license
|
parthnatekar/Brain-tumor-segmentation
|
fdfc9ba41d410a3618947c0b6d784ff013ded4a7
|
88aecfea58bf551457c2c8622cc23e74e48db7e7
|
refs/heads/master
| 2022-02-22T03:31:28.981931
| 2019-08-30T06:53:16
| 2019-08-30T06:53:16
| 192,159,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,089
|
py
|
import tensorflow as tf
import keras.backend as K
resnet_model_path = 'trained_models/U_resnet/ResUnet.h5'
resnet_weights_path = 'trained_models/U_resnet/ResUnet.15_0.491.hdf5'
resnet_pb_path = 'trained_models/U_resnet/resnet.pb'
sunet_model_path = 'trained_models/SimUnet/FCN.h5'
sunet_weights_path = 'trained_models/SimUnet/SimUnet.40_0.060.hdf5'
sunet_pb_path = 'trained_models/SimUnet/SUnet.pb'
dense_model_path = 'trained_models/densenet_121/densenet121.h5'
dense_weights_path = 'trained_models/densenet_121/densenet.55_0.522.hdf5'
dense_pb_path = 'trained_models/densenet_121/densenet.pb'
shallow_model_path = 'trained_models/shallowunet/shallow_unet.h5'
shallow_weights_path = 'trained_models/shallowunet/shallow_weights.hdf5'
shallow_pb_path = 'trained_models/shallowunet/shallow_unet.pb'
from keras.models import load_model
from models import *
from losses import *
def load_seg_model(model_='shallow'):
# model = unet_densenet121_imagenet((240, 240), weights='imagenet12')
# model.load_weights(weights_path)
if model_ == 'uresnet':
model = load_model(resnet_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(resnet_weights_path)
return model, resnet_weights_path, resnet_pb_path
elif model_ == 'fcn':
model = load_model(sunet_model_path, custom_objects={'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(sunet_weights_path)
return model, sunet_weights_path, sunet_pb_path
elif model_ == 'dense':
model = load_model(dense_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(dense_weights_path)
return model, dense_weights_path, dense_pb_path
elif model_ == 'shallow':
model = load_model(shallow_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(shallow_weights_path)
return model, shallow_weights_path, shallow_pb_path
def save_frozen_graph(filename):
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
session,
K.get_session().graph.as_graph_def(),
['conv2d_32/BiasAdd']
)
with open(filename, "wb") as f:
f.write(output_graph_def.SerializeToString())
with tf.Session(graph=K.get_session().graph) as session:
session.run(tf.global_variables_initializer())
model_res, weights_path, pb_path = load_seg_model()
print (model_res.summary())
save_frozen_graph(pb_path)
import tensorflow as tf
graph_def = tf.GraphDef()
with open(pb_path, "rb") as f:
graph_def.ParseFromString(f.read())
for node in graph_def.node:
print(node.name)
|
[
"koriavinash1@gmail.com"
] |
koriavinash1@gmail.com
|
2e83ec6d1e2949ecaaf7d1bb3de03ea892f66966
|
1c6e5c808c1a3e6242e40b15ae711574e670c3b6
|
/food_management/views/update_meal_schedule/request_response_mocks.py
|
c10259d419f9263a235901ef89852547ecc07f81
|
[] |
no_license
|
KatakamVedaVandhana/smart_food_management-vandhana
|
dbe195994c110471d0ae7a5a53adef1441e86466
|
19e410a2aa792b22889a2dfed599312ba6b5a7ad
|
refs/heads/master
| 2023-07-09T05:43:17.491313
| 2020-06-15T06:44:00
| 2020-06-15T06:44:00
| 269,609,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
REQUEST_BODY_JSON = """
{
"meal_type": "Breakfast",
"date": "string",
"items_list": [
{
"item_id": 1,
"meal_course": "Half-meal",
"quantity": 1
}
]
}
"""
|
[
"vandhanakatakam@gmail.com"
] |
vandhanakatakam@gmail.com
|
3dcf93313868e6a333acf59112cec9cc100db628
|
20db5a27f2a8b2d324085f5e1ec6c46ad7c1e8c3
|
/djangoMovie/wsgi.py
|
bd36ec73a578e75dac2e61871a123692e83408c3
|
[] |
no_license
|
mortadagzar/djangoMovie
|
dae326fc83a31e485792b1ee42fa89b7d681049d
|
e83904c0c1ecc45992eed7516cb483bd2c97590b
|
refs/heads/master
| 2020-04-01T22:32:28.246877
| 2018-10-19T02:41:22
| 2018-10-19T02:41:22
| 153,713,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for djangoMovie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoMovie.settings')
application = get_wsgi_application()
|
[
"mortadagzar@gmail.com"
] |
mortadagzar@gmail.com
|
016f142fbb09daf43f5feeb03bd08f1a32783e55
|
478071aed3612b8eefb5dc521b8fe18f95eaffdd
|
/Existing paper reading/model/GATA.py
|
105813c869a5edc310f98773b833ce8591293d86
|
[
"MIT"
] |
permissive
|
leiloong/PaperRobot
|
f913918671d758ae7e9d4098fe42cad19fbbbc6d
|
070972dc1548571c28d89d2c54fb379e87d172c7
|
refs/heads/master
| 2020-05-30T11:39:11.814416
| 2019-07-18T08:25:00
| 2019-07-18T08:25:00
| 189,710,771
| 0
| 0
|
MIT
| 2019-07-18T08:25:01
| 2019-06-01T08:47:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
# --------- Link Prediction Model with both TAT and GAT contained -----------
import torch.nn as nn
import torch
from .GAT import GAT
from .TAT import TAT
class GATA(nn.Module):
def __init__(self, emb_dim, hid_dim, out_dim, num_voc, num_heads, num_ent, num_rel, dropout, alpha, **kwargs):
super(GATA, self).__init__()
self.ent_embedding = nn.Embedding(num_ent, emb_dim)
self.rel_embedding = nn.Embedding(num_rel, emb_dim)
self.graph = GAT(nfeat=emb_dim, nhid=hid_dim, dropout=dropout, nheads=num_heads, alpha=alpha)
self.text = TAT(emb_dim, num_voc)
self.gate = nn.Embedding(num_ent, out_dim)
def forward(self, nodes, adj, pos, shifted_pos, h_sents, h_order, h_lengths, t_sents, t_order, t_lengths):
node_features = self.ent_embedding(nodes)
graph = self.graph(node_features, adj)
head_graph = graph[[shifted_pos[:, 0].squeeze()]]
tail_graph = graph[[shifted_pos[:, 1].squeeze()]]
head_text = self.text(h_sents, h_order, h_lengths, node_features[[shifted_pos[:, 0].squeeze()]])
tail_text = self.text(t_sents, t_order, t_lengths, node_features[[shifted_pos[:, 1].squeeze()]])
r_pos = self.rel_embedding(pos[:, 2].squeeze())
gate_head = self.gate(pos[:, 0].squeeze())
gate_tail = self.gate(pos[:, 1].squeeze())
score_pos = self._score(head_graph, head_text, tail_graph, tail_text, r_pos, gate_head, gate_tail)
return score_pos
def _score(self, hg, ht, tg, tt, r, gh, gt):
gate_h = torch.sigmoid(gh)
gate_t = torch.sigmoid(gt)
head = gate_h * hg + (1-gate_h) * ht
tail = gate_t * tg + (1-gate_t) * tt
s = torch.abs(head + r - tail)
return s
|
[
"dalewanghz@gmail.com"
] |
dalewanghz@gmail.com
|
50ac7d9499d215fdeee98e4acab4c2ba61d65aa5
|
a704c91ba38fb9f733102506f3bbf1325ab0e73b
|
/loans/asgi.py
|
e0a7f28c0e79d8818c407d48e9c27dbbbb8509f2
|
[] |
no_license
|
Nyakinyua/Loans-
|
e2b69ef00118ab2831df5b12a9e9987944bd23a2
|
fd9d9d51cfb02905001921f7c989ea11be0f68e4
|
refs/heads/master
| 2023-04-03T03:12:29.134845
| 2021-04-13T09:26:44
| 2021-04-13T09:26:44
| 348,429,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for loans project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'loans.settings')
application = get_asgi_application()
|
[
"wanyakinyua968@gmail.com"
] |
wanyakinyua968@gmail.com
|
81500922d96e1dcf88f6749557790f570cda92ca
|
cf1431d3d4843fda317ec9c1d39cceaa0cbe69e2
|
/gewittergefahr/gg_utils/time_periods.py
|
102edab472e111ff3dd03896c662b6a0a7d926a4
|
[
"MIT"
] |
permissive
|
theweathermanda/GewitterGefahr
|
9dad0f5d4595db647d511a7b179b159201dff4f2
|
b8bcbf4c22457b3aa4613ff2c07b32a6e71068e2
|
refs/heads/master
| 2020-04-26T02:20:40.434434
| 2019-02-17T03:42:31
| 2019-02-17T03:42:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,831
|
py
|
"""Methods for handling time periods."""
import numpy
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import error_checking
def range_and_interval_to_list(start_time_unix_sec=None, end_time_unix_sec=None,
time_interval_sec=None, include_endpoint=True):
"""Converts time period from range and interval to list of exact times.
N = number of exact times
:param start_time_unix_sec: Start time (Unix format).
:param end_time_unix_sec: End time (Unix format).
:param time_interval_sec: Interval (seconds) between successive exact times.
:param include_endpoint: Boolean flag. If True, endpoint will be included
in list of time steps. If False, endpoint will be excluded.
:return: unix_times_sec: length-N numpy array of exact times (Unix format).
"""
error_checking.assert_is_integer(start_time_unix_sec)
error_checking.assert_is_not_nan(start_time_unix_sec)
error_checking.assert_is_integer(end_time_unix_sec)
error_checking.assert_is_not_nan(end_time_unix_sec)
error_checking.assert_is_integer(time_interval_sec)
error_checking.assert_is_boolean(include_endpoint)
if include_endpoint:
error_checking.assert_is_geq(end_time_unix_sec, start_time_unix_sec)
else:
error_checking.assert_is_greater(end_time_unix_sec, start_time_unix_sec)
start_time_unix_sec = int(rounder.floor_to_nearest(
float(start_time_unix_sec), time_interval_sec))
end_time_unix_sec = int(rounder.ceiling_to_nearest(
float(end_time_unix_sec), time_interval_sec))
if not include_endpoint:
end_time_unix_sec -= time_interval_sec
num_time_steps = 1 + (end_time_unix_sec -
start_time_unix_sec) / time_interval_sec
return numpy.linspace(start_time_unix_sec, end_time_unix_sec,
num=num_time_steps, dtype=int)
def time_and_period_length_to_range(unix_time_sec, period_length_sec):
"""Converts single time and period length to range (start/end of period).
:param unix_time_sec: Single time (Unix format).
:param period_length_sec: Length of time period (seconds).
:return: start_time_unix_sec: Beginning of time period (Unix format).
:return: end_time_unix_sec: End of time period (Unix format).
"""
error_checking.assert_is_integer(unix_time_sec)
error_checking.assert_is_not_nan(unix_time_sec)
error_checking.assert_is_integer(period_length_sec)
start_time_unix_sec = int(rounder.floor_to_nearest(
float(unix_time_sec), period_length_sec))
return start_time_unix_sec, start_time_unix_sec + period_length_sec
def time_and_period_length_and_interval_to_list(unix_time_sec=None,
period_length_sec=None,
time_interval_sec=None,
include_endpoint=True):
"""Converts single time, period length, and interval to list of exact times.
:param unix_time_sec: Single time (Unix format).
:param period_length_sec: Length of time period (seconds).
:param time_interval_sec: Interval (seconds) between successive exact times.
:param include_endpoint: Boolean flag. If True, endpoint will be included
in list of time steps. If False, endpoint will be excluded.
:return: unix_times_sec: length-N numpy array of exact times (Unix format).
"""
(start_time_unix_sec, end_time_unix_sec) = time_and_period_length_to_range(
unix_time_sec, period_length_sec)
return range_and_interval_to_list(
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
time_interval_sec=time_interval_sec, include_endpoint=include_endpoint)
|
[
"ryan.lagerquist@ou.edu"
] |
ryan.lagerquist@ou.edu
|
cda754c843996deb186c2e23dde533e8fee2c7e6
|
4a6ee62745aaad67326bf6e3bb2001f5ef84b8ab
|
/music/admin.py
|
a2ccca151a409e344792bd5071c78414ad43b21b
|
[] |
no_license
|
sandeep201451066/MusicAlbum
|
437c79599fa56a6b21354e3d376a21d0b48f47a9
|
bb3b1c176eb2f2bccdd88547fc1f04ee8262ac28
|
refs/heads/master
| 2021-01-12T02:36:59.740581
| 2017-01-07T06:57:05
| 2017-01-07T06:57:05
| 78,078,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.contrib import admin
# Register your models here.
from music.models import AlbumList, SongList
admin.site.register(AlbumList)
admin.site.register(SongList)
|
[
"you@example.com"
] |
you@example.com
|
201cb18806e64a90eab4c2b766ae1aae7d263ec3
|
598548d1364ea8e4810404d6aa17d5a6653b4d1c
|
/torba/client/basetransaction.py
|
a022bfa2ba10da401397daa64c9763199ed493c5
|
[
"MIT"
] |
permissive
|
productinfo/torba
|
844d362ac8791715bcb16e883f994b5b5ed67109
|
45669dad336f7b74dd7c04d3f826620ed2f777c7
|
refs/heads/master
| 2020-04-19T18:33:44.480802
| 2019-01-29T17:51:59
| 2019-01-29T18:17:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,238
|
py
|
import logging
import typing
from typing import List, Iterable, Optional
from binascii import hexlify
from torba.client.basescript import BaseInputScript, BaseOutputScript
from torba.client.baseaccount import BaseAccount
from torba.client.constants import COIN, NULL_HASH32
from torba.client.bcd_data_stream import BCDataStream
from torba.client.hash import sha256, TXRef, TXRefImmutable
from torba.client.util import ReadOnlyList
if typing.TYPE_CHECKING:
from torba.client import baseledger
log = logging.getLogger()
class TXRefMutable(TXRef):
__slots__ = ('tx',)
def __init__(self, tx: 'BaseTransaction') -> None:
super().__init__()
self.tx = tx
@property
def id(self):
if self._id is None:
self._id = hexlify(self.hash[::-1]).decode()
return self._id
@property
def hash(self):
if self._hash is None:
self._hash = sha256(sha256(self.tx.raw))
return self._hash
@property
def height(self):
return self.tx.height
def reset(self):
self._id = None
self._hash = None
class TXORef:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef, position: int) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def id(self):
return '{}:{}'.format(self.tx_ref.id, self.position)
@property
def is_null(self):
return self.tx_ref.is_null
@property
def txo(self) -> Optional['BaseOutput']:
return None
class TXORefResolvable(TXORef):
__slots__ = ('_txo',)
def __init__(self, txo: 'BaseOutput') -> None:
assert txo.tx_ref is not None
assert txo.position is not None
super().__init__(txo.tx_ref, txo.position)
self._txo = txo
@property
def txo(self):
return self._txo
class InputOutput:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def size(self) -> int:
""" Size of this input / output in bytes. """
stream = BCDataStream()
self.serialize_to(stream)
return len(stream.get_bytes())
def get_fee(self, ledger):
return self.size * ledger.fee_per_byte
def serialize_to(self, stream, alternate_script=None):
raise NotImplementedError
class BaseInput(InputOutput):
script_class = BaseInputScript
NULL_SIGNATURE = b'\x00'*72
NULL_PUBLIC_KEY = b'\x00'*33
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
def __init__(self, txo_ref: TXORef, script: BaseInputScript, sequence: int = 0xFFFFFFFF,
tx_ref: TXRef = None, position: int = None) -> None:
super().__init__(tx_ref, position)
self.txo_ref = txo_ref
self.sequence = sequence
self.coinbase = script if txo_ref.is_null else None
self.script = script if not txo_ref.is_null else None
@property
def is_coinbase(self):
return self.coinbase is not None
@classmethod
def spend(cls, txo: 'BaseOutput') -> 'BaseInput':
""" Create an input to spend the output."""
assert txo.script.is_pay_pubkey_hash, 'Attempting to spend unsupported output.'
script = cls.script_class.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
return cls(txo.ref, script)
@property
def amount(self) -> int:
""" Amount this input adds to the transaction. """
if self.txo_ref.txo is None:
raise ValueError('Cannot resolve output to get amount.')
return self.txo_ref.txo.amount
@property
def is_my_account(self) -> Optional[bool]:
""" True if the output this input spends is yours. """
if self.txo_ref.txo is None:
return False
return self.txo_ref.txo.is_my_account
@classmethod
def deserialize_from(cls, stream):
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
position = stream.read_uint32()
script = stream.read_string()
sequence = stream.read_uint32()
return cls(
TXORef(tx_ref, position),
cls.script_class(script) if not tx_ref.is_null else script,
sequence
)
def serialize_to(self, stream, alternate_script=None):
stream.write(self.txo_ref.tx_ref.hash)
stream.write_uint32(self.txo_ref.position)
if alternate_script is not None:
stream.write_string(alternate_script)
else:
if self.is_coinbase:
stream.write_string(self.coinbase)
else:
stream.write_string(self.script.source)
stream.write_uint32(self.sequence)
class BaseOutputEffectiveAmountEstimator:
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
def __init__(self, ledger: 'baseledger.BaseLedger', txo: 'BaseOutput') -> None:
self.txo = txo
self.txi = ledger.transaction_class.input_class.spend(txo)
self.fee: int = self.txi.get_fee(ledger)
self.effective_amount: int = txo.amount - self.fee
def __lt__(self, other):
return self.effective_amount < other.effective_amount
class BaseOutput(InputOutput):
script_class = BaseOutputScript
estimator_class = BaseOutputEffectiveAmountEstimator
__slots__ = 'amount', 'script', 'is_change', 'is_my_account'
def __init__(self, amount: int, script: BaseOutputScript,
tx_ref: TXRef = None, position: int = None,
is_change: Optional[bool] = None, is_my_account: Optional[bool] = None
) -> None:
super().__init__(tx_ref, position)
self.amount = amount
self.script = script
self.is_change = is_change
self.is_my_account = is_my_account
def update_annotations(self, annotated):
if annotated is None:
self.is_change = False
self.is_my_account = False
else:
self.is_change = annotated.is_change
self.is_my_account = annotated.is_my_account
@property
def ref(self):
return TXORefResolvable(self)
@property
def id(self):
return self.ref.id
def get_address(self, ledger):
return ledger.hash160_to_address(
self.script.values['pubkey_hash']
)
def get_estimator(self, ledger):
return self.estimator_class(ledger, self)
@classmethod
def pay_pubkey_hash(cls, amount, pubkey_hash):
return cls(amount, cls.script_class.pay_pubkey_hash(pubkey_hash))
@classmethod
def deserialize_from(cls, stream):
return cls(
amount=stream.read_uint64(),
script=cls.script_class(stream.read_string())
)
def serialize_to(self, stream, alternate_script=None):
stream.write_uint64(self.amount)
stream.write_string(self.script.source)
class BaseTransaction:
input_class = BaseInput
output_class = BaseOutput
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
height: int = -2, position: int = -1) -> None:
self._raw = raw
self.ref = TXRefMutable(self)
self.version = version
self.locktime = locktime
self._inputs: List[BaseInput] = []
self._outputs: List[BaseOutput] = []
self.is_verified = is_verified
# Height Progression
# -2: not broadcast
# -1: in mempool but has unconfirmed inputs
# 0: in mempool and all inputs confirmed
# +num: confirmed in a specific block (height)
self.height = height
self.position = position
if raw is not None:
self._deserialize()
@property
def is_broadcast(self):
return self.height > -2
@property
def is_mempool(self):
return self.height in (-1, 0)
@property
def is_confirmed(self):
return self.height > 0
@property
def id(self):
return self.ref.id
@property
def hash(self):
return self.ref.hash
@property
def raw(self):
if self._raw is None:
self._raw = self._serialize()
return self._raw
def _reset(self):
self._raw = None
self.ref.reset()
@property
def inputs(self) -> ReadOnlyList[BaseInput]:
return ReadOnlyList(self._inputs)
@property
def outputs(self) -> ReadOnlyList[BaseOutput]:
return ReadOnlyList(self._outputs)
def _add(self, new_ios: Iterable[InputOutput], existing_ios: List) -> 'BaseTransaction':
for txio in new_ios:
txio.tx_ref = self.ref
txio.position = len(existing_ios)
existing_ios.append(txio)
self._reset()
return self
def add_inputs(self, inputs: Iterable[BaseInput]) -> 'BaseTransaction':
return self._add(inputs, self._inputs)
def add_outputs(self, outputs: Iterable[BaseOutput]) -> 'BaseTransaction':
return self._add(outputs, self._outputs)
@property
def size(self) -> int:
""" Size in bytes of the entire transaction. """
return len(self.raw)
@property
def base_size(self) -> int:
""" Size of transaction without inputs or outputs in bytes. """
return (
self.size
- sum(txi.size for txi in self._inputs)
- sum(txo.size for txo in self._outputs)
)
@property
def input_sum(self):
return sum(i.amount for i in self.inputs if i.txo_ref.txo is not None)
@property
def output_sum(self):
return sum(o.amount for o in self.outputs)
@property
def net_account_balance(self) -> int:
balance = 0
for txi in self.inputs:
if txi.txo_ref.txo is None:
continue
if txi.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
elif txi.is_my_account:
balance -= txi.amount
for txo in self.outputs:
if txo.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
elif txo.is_my_account:
balance += txo.amount
return balance
@property
def fee(self) -> int:
return self.input_sum - self.output_sum
def get_base_fee(self, ledger) -> int:
""" Fee for base tx excluding inputs and outputs. """
return self.base_size * ledger.fee_per_byte
def get_effective_input_sum(self, ledger) -> int:
""" Sum of input values *minus* the cost involved to spend them. """
return sum(txi.amount - txi.get_fee(ledger) for txi in self._inputs)
def get_total_output_sum(self, ledger) -> int:
""" Sum of output values *plus* the cost involved to spend them. """
return sum(txo.amount + txo.get_fee(ledger) for txo in self._outputs)
def _serialize(self, with_inputs: bool = True) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
if with_inputs:
stream.write_compact_size(len(self._inputs))
for txin in self._inputs:
txin.serialize_to(stream)
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
return stream.get_bytes()
def _serialize_for_signature(self, signing_input: int) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs):
if signing_input == i:
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else:
txin.serialize_to(stream, b'')
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
return stream.get_bytes()
def _deserialize(self):
if self._raw is not None:
stream = BCDataStream(self._raw)
self.version = stream.read_uint32()
input_count = stream.read_compact_size()
self.add_inputs([
self.input_class.deserialize_from(stream) for _ in range(input_count)
])
output_count = stream.read_compact_size()
self.add_outputs([
self.output_class.deserialize_from(stream) for _ in range(output_count)
])
self.locktime = stream.read_uint32()
@classmethod
def ensure_all_have_same_ledger(cls, funding_accounts: Iterable[BaseAccount],
change_account: BaseAccount = None) -> 'baseledger.BaseLedger':
ledger = None
for account in funding_accounts:
if ledger is None:
ledger = account.ledger
if ledger != account.ledger:
raise ValueError(
'All funding accounts used to create a transaction must be on the same ledger.'
)
if change_account is not None and change_account.ledger != ledger:
raise ValueError('Change account must use same ledger as funding accounts.')
if ledger is None:
raise ValueError('No ledger found.')
return ledger
@classmethod
async def create(cls, inputs: Iterable[BaseInput], outputs: Iterable[BaseOutput],
funding_accounts: Iterable[BaseAccount], change_account: BaseAccount):
""" Find optimal set of inputs when only outputs are provided; add change
outputs if only inputs are provided or if inputs are greater than outputs. """
tx = cls() \
.add_inputs(inputs) \
.add_outputs(outputs)
ledger = cls.ensure_all_have_same_ledger(funding_accounts, change_account)
# value of the outputs plus associated fees
cost = (
tx.get_base_fee(ledger) +
tx.get_total_output_sum(ledger)
)
# value of the inputs less the cost to spend those inputs
payment = tx.get_effective_input_sum(ledger)
try:
for _ in range(5):
if payment < cost:
deficit = cost - payment
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
if not spendables:
raise ValueError('Not enough funds to cover this transaction.')
payment += sum(s.effective_amount for s in spendables)
tx.add_inputs(s.txi for s in spendables)
cost_of_change = (
tx.get_base_fee(ledger) +
cls.output_class.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
)
if payment > cost:
change = payment - cost
if change > cost_of_change:
change_address = await change_account.change.get_or_create_usable_address()
change_hash160 = change_account.ledger.address_to_hash160(change_address)
change_amount = change - cost_of_change
change_output = cls.output_class.pay_pubkey_hash(change_amount, change_hash160)
change_output.is_change = True
tx.add_outputs([cls.output_class.pay_pubkey_hash(change_amount, change_hash160)])
if tx._outputs:
break
else:
# this condition and the outer range(5) loop cover an edge case
# whereby a single input is just enough to cover the fee and
# has some change left over, but the change left over is less
# than the cost_of_change: thus the input is completely
# consumed and no output is added, which is an invalid tx.
# to be able to spend this input we must increase the cost
# of the TX and run through the balance algorithm a second time
# adding an extra input and change output, making tx valid.
# we do this 5 times in case the other UTXOs added are also
# less than the fee, after 5 attempts we give up and go home
cost += cost_of_change + 1
await tx.sign(funding_accounts)
except Exception as e:
log.exception('Failed to create transaction:')
await ledger.release_outputs(tx.outputs)
raise e
return tx
@staticmethod
def signature_hash_type(hash_type):
return hash_type
async def sign(self, funding_accounts: Iterable[BaseAccount]):
ledger = self.ensure_all_have_same_ledger(funding_accounts)
for i, txi in enumerate(self._inputs):
assert txi.script is not None
assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash:
address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
private_key = await ledger.get_private_key_for_address(address)
tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
txi.script.generate()
else:
raise NotImplementedError("Don't know how to spend this output.")
self._reset()
|
[
"lex@damoti.com"
] |
lex@damoti.com
|
49f6372be4dbd4d5b5afa73800df88ec46448170
|
e035e042c425b7abbaee50b8f331b1d4ebcbd388
|
/test/testapikeyfilegenerator.py
|
4447a5f57529b553266526308950cbab3a2cfe21
|
[] |
no_license
|
Archanciel/C2
|
6ae2589c23c1499db3ca3d362459afecb52283ed
|
8abf29d6798743d06d1949e22ff5c4bece2cd85d
|
refs/heads/master
| 2020-03-21T11:19:38.298586
| 2018-09-01T10:23:50
| 2018-09-01T10:23:50
| 138,500,734
| 0
| 1
| null | 2018-07-28T18:33:25
| 2018-06-24T17:09:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
import inspect
import os
import sys
import unittest
import pickle
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.insert(0,currentdir) # this instruction is necessary for successful importation of utilityfortest module when
# the test is executed standalone
from apikey.apikeyfilegenerator import ApiKeyFileGenerator
class TestApiKeyFileGenerator(unittest.TestCase):
def testCreateKeyFile(self):
'''
This test demonsttates how to test a command line script using artparse.
:return:
'''
ap = ApiKeyFileGenerator()
ap.createKeyFile(['-a', 'key', '-s', 'secret key', '-f', 'testfile', '-pw', 'monpw'])
with open(ap.FILE_PATH + 'testfile.bin', 'rb') as handle:
encryptedKeyList = pickle.load(handle)
self.assertEqual(['w5jDlMOn', 'w6DDlMORw6LDnMOhwo_DmcOVw7A='], encryptedKeyList)
self.assertEqual('key', ap.decode('monpw', encryptedKeyList[0]))
self.assertEqual('secret key', ap.decode('monpw', encryptedKeyList[1]))
def testCreateKeyFileNoArgs(self):
ap = ApiKeyFileGenerator()
with self.assertRaises(SystemExit):
ap.createKeyFile([])
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
a6f3acb6b119c6a7b83bda654ccc2e610af29885
|
af4eb8204923b5848fce3158c6f8a89a480ea1d8
|
/script/AGNSS/AGNSS_Test_0084.py
|
afe06efc62851c32680c7b868f5dba30c5625c8c
|
[] |
no_license
|
wanghaoplus/gatog
|
4ab0c77d4f9eb93da136ad3933a68cbf0b5c5bca
|
8935e20a426638462cd1cc7bc048a16751287a2f
|
refs/heads/master
| 2022-04-10T21:36:20.395304
| 2020-03-26T10:09:28
| 2020-03-26T10:09:28
| 248,264,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/02/21 22:14
# @Author : wangdelei
# @Site :
# @File : AGNSS_Test_0084.py
# @Software: PyCharm
from aw.LbsTestCase import LbsTestCase
import time
from aw.core.Input import *
import threading
class AGNSS_Test_0084(LbsTestCase):
def __init__(self):
super(AGNSS_Test_0084, self).__init__()
self.TestCaseList = ["AGNSS_Test_0084"]
def setup(self):
self.setupStep('labsat')
super(AGNSS_Test_0084, self).setup()
self.aw_initLabsat()
loopTimes = self.data.LoopTimes
sceneId = self.data.sceneId
print(self.sceneData)
def AGNSS_Test_0084(self):
self.testStep("开始测试")
self.testStep("播放labsat场景")
self.labsat.aw_labsatPlay(self.sceneData["fileName"], self.sceneData['startTime'], self.sceneData['duarTime'])
self.labsat.aw_labsatATTN(20)
time.sleep(self.sceneData['duarTime'])
self.testStep("停止labsat播放")
self.labsat.aw_labsatStopPlay()
self.testStep("停止串口读取")
self.lbs.aw_stopReadPort()
self.testStep("分析Nmea数据")
self.lbs.aw_nmeanalysis(self.sceneData['utcStartTime'], self.sceneData['utcEndTime'], sceneId=self.sceneData['sceneId'])
def teardown(self):
self.teardownStep("ֹͣ测试结束")
|
[
"418816179@qq.com"
] |
418816179@qq.com
|
fc9a91f8832a9e9583dfed7a9b8dbfdde0d1adee
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/counter/counter_vtep_stats.py
|
f46f33a278bb54b92bf117eca714b56cd163fb28
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 8,169
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param rx_bad_inner_ipv4_len_pkts: {"description": "Packets received with Bad Inner IPv4 Payload length", "format": "counter", "type": "number", "oid": "34", "optional": true, "size": "2"}
:param rx_pkts: {"description": "In Total Packets", "format": "counter", "type": "number", "oid": "15", "optional": true, "size": "8"}
:param tx_encap_missing_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "2"}
:param rx_mcast_pkts: {"description": "Out Multicast Packets", "format": "counter", "type": "number", "oid": "19", "optional": true, "size": "2"}
:param cfg_vtep_error: {"description": "Config Error: Drop Packet", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "2"}
:param rx_reassembled_pkts: {"description": "Reassembled Packets", "format": "counter", "type": "number", "oid": "33", "optional": true, "size": "2"}
:param rx_ucast_pkts: {"description": "In Unicast Packets", "format": "counter", "type": "number", "oid": "17", "optional": true, "size": "8"}
:param rx_lif_uninit: {"description": "Lif not UP: Drop Rx", "format": "counter", "type": "number", "oid": "36", "optional": true, "size": "2"}
:param rx_lif_invalid: {"description": "Invalid Lif: Drop Rx", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "2"}
:param rx_dot1q_ptks: {"description": "Dot1q Packet: Drop Rx", "format": "counter", "type": "number", "oid": "31", "optional": true, "size": "2"}
:param rx_bad_checksum_pkts: {"description": "Packet reveived with Bad Inner checksum", "format": "counter", "type": "number", "oid": "22", "optional": true, "size": "2"}
:param tx_bcast_pkts: {"description": "Out Broadcast Packets", "format": "counter", "type": "number", "oid": "27", "optional": true, "size": "8"}
:param tx_fragmented_pkts: {"description": "Fragmented Packets", "format": "counter", "type": "number", "oid": "32", "optional": true, "size": "2"}
:param rx_host_learned: {"description": "Number of Host =", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "2"}
:param rx_unhandled_pkts: {"description": "Unhandled Packets: Drop Rx", "format": "counter", "type": "number", "oid": "13", "optional": true, "size": "2"}
:param tx_arp_req_sent_pkts: {"description": "Number of Arp Requests Sent", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "2"}
:param rx_host_learn_error: {"description": "Number of Host =", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "2"}
:param rx_encap_miss_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "21", "optional": true, "size": "2"}
:param rx_requeued_pkts: {"description": "Packets requeued to another CPU", "format": "counter", "type": "number", "oid": "23", "optional": true, "size": "2"}
:param tx_lif_invalid: {"description": "Invalid Lif: Drop Tx", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "2"}
:param rx_vtep_unknown: {"description": "Vtep unknown: Drop Rx", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "2"}
:param rx_dropped_pkts: {"description": "In Dropped Packets", "format": "counter", "type": "number", "oid": "20", "optional": true, "size": "2"}
:param tx_flood_pkts: {"description": "Out Flooded Packets", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param rx_bad_inner_ipv6_len_pkts: {"description": "Packets received with Bad Inner IPv6 Payload length", "format": "counter", "type": "number", "oid": "35", "optional": true, "size": "2"}
:param rx_pkts_too_large: {"description": "Packet too large: Drop Rx", "format": "counter", "type": "number", "oid": "30", "optional": true, "size": "2"}
:param tx_bytes: {"description": "Out Total Octets", "format": "counter", "type": "number", "oid": "25", "optional": true, "size": "8"}
:param tx_mcast_pkts: {"description": "Out Multicast Packets", "format": "counter", "type": "number", "oid": "28", "optional": true, "size": "2"}
:param tx_vtep_unknown: {"description": "Vtep unknown: Drop Tx", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "2"}
:param tx_encap_unresolved_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "2"}
:param tx_encap_bad_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "2"}
:param rx_bcast_pkts: {"description": "In Broadcast Packets", "format": "counter", "type": "number", "oid": "18", "optional": true, "size": "8"}
:param tx_unhandled_pkts: {"description": "Unhandled Packets: Drop Tx", "format": "counter", "type": "number", "oid": "14", "optional": true, "size": "2"}
:param tx_dropped_pkts: {"description": "Out Dropped Packets", "format": "counter", "type": "number", "oid": "29", "optional": true, "size": "2"}
:param tx_ucast_pkts: {"description": "Out Unicast Packets", "format": "counter", "type": "number", "oid": "26", "optional": true, "size": "8"}
:param tx_pkts: {"description": "Out Total Packets", "format": "counter", "type": "number", "oid": "24", "optional": true, "size": "8"}
:param rx_bytes: {"description": "In Total Octets", "format": "counter", "type": "number", "oid": "16", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.rx_bad_inner_ipv4_len_pkts = ""
self.rx_pkts = ""
self.tx_encap_missing_pkts = ""
self.rx_mcast_pkts = ""
self.cfg_vtep_error = ""
self.rx_reassembled_pkts = ""
self.rx_ucast_pkts = ""
self.rx_lif_uninit = ""
self.rx_lif_invalid = ""
self.rx_dot1q_ptks = ""
self.rx_bad_checksum_pkts = ""
self.tx_bcast_pkts = ""
self.tx_fragmented_pkts = ""
self.rx_host_learned = ""
self.rx_unhandled_pkts = ""
self.tx_arp_req_sent_pkts = ""
self.rx_host_learn_error = ""
self.rx_encap_miss_pkts = ""
self.rx_requeued_pkts = ""
self.tx_lif_invalid = ""
self.rx_vtep_unknown = ""
self.rx_dropped_pkts = ""
self.tx_flood_pkts = ""
self.rx_bad_inner_ipv6_len_pkts = ""
self.rx_pkts_too_large = ""
self.tx_bytes = ""
self.tx_mcast_pkts = ""
self.tx_vtep_unknown = ""
self.tx_encap_unresolved_pkts = ""
self.tx_encap_bad_pkts = ""
self.rx_bcast_pkts = ""
self.tx_unhandled_pkts = ""
self.tx_dropped_pkts = ""
self.tx_ucast_pkts = ""
self.tx_pkts = ""
self.rx_bytes = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Vtep(A10BaseClass):
"""Class Description::
Statistics for the object vtep.
Class vtep supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/counter/vtep/{sampling_enable}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "sampling_enable"]
self.b_key = "vtep"
self.a10_url="/axapi/v3/counter/vtep/{sampling_enable}/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
2241a6690333ae6509e556b65fccb35af2f114c3
|
18a6b272d4c55b24d9c179ae1e58959674e53afe
|
/tf_rl/examples/NerveNet/scripts/ppos/mujoco/test_env.py
|
66cb7b6d26ae7b5ddd7b2ed0f8b11ee777bf4a40
|
[
"MIT"
] |
permissive
|
Rowing0914/TF2_RL
|
6cce916f409b3d4ef2a5a40a0611908f20d08b2c
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
refs/heads/master
| 2022-12-10T09:58:57.456415
| 2021-05-23T02:43:21
| 2021-05-23T02:43:21
| 233,476,950
| 9
| 1
|
MIT
| 2022-12-08T07:02:42
| 2020-01-12T23:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 621
|
py
|
import gym
# you can choose other environments.
# possible environments: Ant-v2, HalfCheetah-v2, Hopper-v2, Humanoid-v2,
# HumanoidStandup-v2, InvertedPendulum-v2, Reacher-v2, Swimmer-v2, Walker2D-v2
env = gym.make("Walker2d-v2")
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print('state size:', num_inputs)
print('action size:', num_actions)
env.reset()
for _ in range(1000):
env.render()
state, reward, done, _ = env.step(env.action_space.sample())
# print('state:', state)
# reward = forward velocity - sum(action^2) + live_bonus
print('reward:', reward)
|
[
"kosakaboat@gmail.com"
] |
kosakaboat@gmail.com
|
8929de1dd4441865c55d4dcb7a063a6b3ee7b872
|
637bb3f080ff18001a732d9bf607ef962b09c5dd
|
/AtiviadeMeioDisciplina/marte.py
|
f9f948a67a836eaaa4301207c7e9bf074231a62a
|
[] |
no_license
|
magnoazneto/IFPI_Algoritmos
|
995296fa22445c57981a1fad43e1ef7a8da83e5e
|
3b5e79c79b7a1fb7a08206719fd418fba1b39691
|
refs/heads/master
| 2022-02-27T10:59:17.123895
| 2019-11-17T13:51:35
| 2019-11-17T13:51:35
| 186,868,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
def main():
msg = input()
print('Mensagens alteradas:', identify_mars(msg))
def identify_mars(string):
default = 'HELP'
lenght = len(default)
i = 0
def_idx = 0
infected_msgs = 0
while i < len(string):
if string[i] == default[def_idx]:
def_idx += 1
i += 1
if def_idx == 4:
def_idx = 0
else:
infected_msgs += 1
i += lenght - def_idx
def_idx = 0
return infected_msgs
main()
|
[
"magnoazneto@gmail.com"
] |
magnoazneto@gmail.com
|
1b2beebcfce2d6669e1ab5bf05f8a9f49f94fa6b
|
a7b07e14f58008e4c9567a9ae67429cedf00e1dc
|
/lib/jnpr/healthbot/swagger/models/device_schema.py
|
1952a7576fcf36a41d947010eca0a51bf076602f
|
[
"Apache-2.0"
] |
permissive
|
dmontagner/healthbot-py-client
|
3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9
|
0952e0a9e7ed63c9fe84879f40407c3327735252
|
refs/heads/master
| 2020-08-03T12:16:38.428848
| 2019-09-30T01:57:24
| 2019-09-30T01:57:24
| 211,750,200
| 0
| 0
|
Apache-2.0
| 2019-09-30T01:17:48
| 2019-09-30T01:17:47
| null |
UTF-8
|
Python
| false
| false
| 11,136
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from jnpr.healthbot.swagger.models.device_schema_i_agent import DeviceSchemaIAgent # noqa: F401,E501
from jnpr.healthbot.swagger.models.device_schema_openconfig import DeviceSchemaOpenconfig # noqa: F401,E501
from jnpr.healthbot.swagger.models.device_schema_snmp import DeviceSchemaSnmp # noqa: F401,E501
from jnpr.healthbot.swagger.models.device_schema_variable import DeviceSchemaVariable # noqa: F401,E501
from jnpr.healthbot.swagger.models.device_schema_vendor import DeviceSchemaVendor # noqa: F401,E501
from jnpr.healthbot.swagger.models.devicegroup_schema_authentication import DevicegroupSchemaAuthentication # noqa: F401,E501
class DeviceSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'authentication': 'DevicegroupSchemaAuthentication',
'description': 'str',
'device_id': 'str',
'host': 'str',
'i_agent': 'DeviceSchemaIAgent',
'open_config': 'DeviceSchemaOpenconfig',
'snmp': 'DeviceSchemaSnmp',
'system_id': 'str',
'variable': 'list[DeviceSchemaVariable]',
'vendor': 'DeviceSchemaVendor'
}
attribute_map = {
'authentication': 'authentication',
'description': 'description',
'device_id': 'device-id',
'host': 'host',
'i_agent': 'iAgent',
'open_config': 'open-config',
'snmp': 'snmp',
'system_id': 'system-id',
'variable': 'variable',
'vendor': 'vendor'
}
def __init__(self, authentication=None, description=None, device_id=None, host=None, i_agent=None, open_config=None, snmp=None, system_id=None, variable=None, vendor=None): # noqa: E501
"""DeviceSchema - a model defined in Swagger""" # noqa: E501
self._authentication = None
self._description = None
self._device_id = None
self._host = None
self._i_agent = None
self._open_config = None
self._snmp = None
self._system_id = None
self._variable = None
self._vendor = None
self.discriminator = None
if authentication is not None:
self.authentication = authentication
if description is not None:
self.description = description
self.device_id = device_id
self.host = host
if i_agent is not None:
self.i_agent = i_agent
if open_config is not None:
self.open_config = open_config
if snmp is not None:
self.snmp = snmp
if system_id is not None:
self.system_id = system_id
if variable is not None:
self.variable = variable
if vendor is not None:
self.vendor = vendor
@property
def authentication(self):
"""Gets the authentication of this DeviceSchema. # noqa: E501
:return: The authentication of this DeviceSchema. # noqa: E501
:rtype: DevicegroupSchemaAuthentication
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this DeviceSchema.
:param authentication: The authentication of this DeviceSchema. # noqa: E501
:type: DevicegroupSchemaAuthentication
"""
self._authentication = authentication
@property
def description(self):
"""Gets the description of this DeviceSchema. # noqa: E501
Description about the device # noqa: E501
:return: The description of this DeviceSchema. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DeviceSchema.
Description about the device # noqa: E501
:param description: The description of this DeviceSchema. # noqa: E501
:type: str
"""
self._description = description
@property
def device_id(self):
"""Gets the device_id of this DeviceSchema. # noqa: E501
Identifier for the device. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:return: The device_id of this DeviceSchema. # noqa: E501
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this DeviceSchema.
Identifier for the device. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:param device_id: The device_id of this DeviceSchema. # noqa: E501
:type: str
"""
if device_id is None:
raise ValueError("Invalid value for `device_id`, must not be `None`") # noqa: E501
if device_id is not None and len(device_id) > 64:
raise ValueError("Invalid value for `device_id`, length must be less than or equal to `64`") # noqa: E501
if device_id is not None and not re.search('^[a-zA-Z0-9]([a-zA-Z0-9_-]*\\.*)*$', device_id): # noqa: E501
raise ValueError("Invalid value for `device_id`, must be a follow pattern or equal to `/^[a-zA-Z0-9]([a-zA-Z0-9_-]*\\.*)*$/`") # noqa: E501
self._device_id = device_id
@property
def host(self):
"""Gets the host of this DeviceSchema. # noqa: E501
Name or IP the device # noqa: E501
:return: The host of this DeviceSchema. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this DeviceSchema.
Name or IP the device # noqa: E501
:param host: The host of this DeviceSchema. # noqa: E501
:type: str
"""
if host is None:
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def i_agent(self):
"""Gets the i_agent of this DeviceSchema. # noqa: E501
:return: The i_agent of this DeviceSchema. # noqa: E501
:rtype: DeviceSchemaIAgent
"""
return self._i_agent
@i_agent.setter
def i_agent(self, i_agent):
"""Sets the i_agent of this DeviceSchema.
:param i_agent: The i_agent of this DeviceSchema. # noqa: E501
:type: DeviceSchemaIAgent
"""
self._i_agent = i_agent
@property
def open_config(self):
"""Gets the open_config of this DeviceSchema. # noqa: E501
:return: The open_config of this DeviceSchema. # noqa: E501
:rtype: DeviceSchemaOpenconfig
"""
return self._open_config
@open_config.setter
def open_config(self, open_config):
"""Sets the open_config of this DeviceSchema.
:param open_config: The open_config of this DeviceSchema. # noqa: E501
:type: DeviceSchemaOpenconfig
"""
self._open_config = open_config
@property
def snmp(self):
"""Gets the snmp of this DeviceSchema. # noqa: E501
:return: The snmp of this DeviceSchema. # noqa: E501
:rtype: DeviceSchemaSnmp
"""
return self._snmp
@snmp.setter
def snmp(self, snmp):
"""Sets the snmp of this DeviceSchema.
:param snmp: The snmp of this DeviceSchema. # noqa: E501
:type: DeviceSchemaSnmp
"""
self._snmp = snmp
@property
def system_id(self):
"""Gets the system_id of this DeviceSchema. # noqa: E501
ID which is sent in the JTI UDP messages # noqa: E501
:return: The system_id of this DeviceSchema. # noqa: E501
:rtype: str
"""
return self._system_id
@system_id.setter
def system_id(self, system_id):
"""Sets the system_id of this DeviceSchema.
ID which is sent in the JTI UDP messages # noqa: E501
:param system_id: The system_id of this DeviceSchema. # noqa: E501
:type: str
"""
self._system_id = system_id
@property
def variable(self):
"""Gets the variable of this DeviceSchema. # noqa: E501
Playbook variable configuration # noqa: E501
:return: The variable of this DeviceSchema. # noqa: E501
:rtype: list[DeviceSchemaVariable]
"""
return self._variable
@variable.setter
def variable(self, variable):
"""Sets the variable of this DeviceSchema.
Playbook variable configuration # noqa: E501
:param variable: The variable of this DeviceSchema. # noqa: E501
:type: list[DeviceSchemaVariable]
"""
self._variable = variable
@property
def vendor(self):
"""Gets the vendor of this DeviceSchema. # noqa: E501
:return: The vendor of this DeviceSchema. # noqa: E501
:rtype: DeviceSchemaVendor
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""Sets the vendor of this DeviceSchema.
:param vendor: The vendor of this DeviceSchema. # noqa: E501
:type: DeviceSchemaVendor
"""
self._vendor = vendor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
f6b74de65dfb9b450d827c0b8c8a01263f7b6766
|
64b135891387dac3a4bb29f3001a524830d0e4e4
|
/news/forms.py
|
d5177748c1fd666136f6e163c39fd7bca1f0fca6
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
dynamicguy/treeio
|
9ad52802722b64a212e22710c04dbb0bb50d831e
|
4f674898cff2331711639a9b5f6812c874a2cb25
|
refs/heads/master
| 2021-08-28T11:25:41.504635
| 2014-01-31T17:16:22
| 2014-01-31T17:16:22
| 11,323,559
| 0
| 0
|
NOASSERTION
| 2021-08-16T20:18:53
| 2013-07-10T20:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,878
|
py
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
News module forms
"""
from django import forms
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from treeio.core.conf import settings
from treeio.core.models import UpdateRecord, ModuleSetting, Object
class UpdateRecordForm(forms.ModelForm):
""" UpdateRecord form """
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(UpdateRecordForm, self).__init__(*args, **kwargs)
self.fields['body'].required = True
self.fields['body'].label = _("Details")
self.fields['recipients'].help_text = ""
self.fields['recipients'].required = False
self.fields['recipients'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('identities_ajax_access_lookup')})
# get default permissions from settings
try:
conf = ModuleSetting.get_for_module('treeio.core', 'default_permissions')[0]
default_permissions = conf.value
except:
default_permissions = settings.HARDTREE_DEFAULT_PERMISSIONS
if self.user and 'userallgroups' in default_permissions:
self.fields['recipients'].initial = [i.id for i in self.user.other_groups.all().only('id')]
self.fields['recipients'].initial.append(self.user.default_group.id)
elif self.user and 'usergroup' in default_permissions:
self.fields['recipients'].initial = [self.user.default_group.id]
class Meta:
"TaskRecordForm"
model = UpdateRecord
fields = ['body', 'recipients']
class UpdateRecordFilterForm(forms.ModelForm):
""" Filter form definition """
def __init__(self, user, *args, **kwargs):
super(UpdateRecordFilterForm, self).__init__(*args, **kwargs)
self.fields['author'].label = _("Author")
self.fields['about'].label = _("About")
self.fields['author'].required = False
self.fields['author'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_user_lookup')})
self.fields['about'].queryset = Object.filter_permitted(user, Object.objects, mode='x')
self.fields['about'].required = False
self.fields['about'].null = True
self.fields['about'].help_text = ""
self.fields['about'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('core_ajax_object_lookup')})
class Meta:
"Filter"
model = UpdateRecord
fields = ('author', 'about')
|
[
"letoosh@gmail.com"
] |
letoosh@gmail.com
|
ebe61823f230bf6d45bdfab7c10060e7919519bb
|
76cba124f60bf963b2e6bf4dbf03e74bfdb37899
|
/democode/comet.py
|
54d41fbd8968dc7de0a04921458475eae9bd6b57
|
[] |
no_license
|
gasman/exogenesis
|
b51d553bdb20fe523fe00f5fe585f6d7aaa84b72
|
511af1c990bb82d5364d6f950125c057610c8404
|
refs/heads/master
| 2021-01-22T11:51:35.771421
| 2013-09-08T10:53:59
| 2013-09-08T10:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
from democode.antialias import Antialiaser
import draw
import math
class CometScene(object):
def __init__(self, lp):
self.aa = Antialiaser(lp)
def tick(self, pattern, beat):
self.aa.clear()
for x in range(2, 16):
b = beat - x / 4
head_y = max(2, 14 - (b / 4)) - 2 * math.sin(b * math.pi / 4)
greenness = (16 - x) / 16.0
draw.disc(self.aa.screen, x, head_y, greenness * 2, (1, greenness))
self.aa.render()
class CircleCometScene(object):
def __init__(self, lp):
self.aa = Antialiaser(lp)
def tick(self, pattern, beat):
self.aa.clear()
for i in range(16, 2, -1):
b = beat - i * 0.8
a = math.pi * b / 12
r = 7 - (beat / 16)
x = 9 + r * math.sin(a)
y = 9 + r * math.cos(a)
greenness = (16 - i) / 16.0
draw.disc(self.aa.screen, x, y, greenness * 2, (1, greenness))
self.aa.render()
|
[
"matt@west.co.tt"
] |
matt@west.co.tt
|
53a918e3da7373feaf7ba73412451df8825f8a1a
|
729ac731cc69ffb331e1ad86ec09946f6e210da3
|
/welcome.py
|
a48ea96191811a0c14eccb4ee05f94eb1dd2eaa2
|
[] |
no_license
|
sadiqulislam/Python-Practice-All-
|
1fb31095653137ea954ddf3d7173261b8e86a872
|
f4a0305c0c4363ada6459bf55ee3ef267dcea53f
|
refs/heads/master
| 2020-07-26T12:39:50.437859
| 2020-02-10T15:41:12
| 2020-02-10T15:41:12
| 208,645,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
message = "Hello's World"
print (message)
s = 'Amar Vai Tomar Vai Shishir Vai'
print(s)
|
[
"sishishir2015@gmail.com"
] |
sishishir2015@gmail.com
|
16127aa13974427bc0f3874250f37ea6074014bc
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/tree-big-712.py
|
2e6cbaabd1989e69d23b731561665ff903d049e3
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,287
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if $Member is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
eec486626a15cfc2fe84cee4502784e8873d9f69
|
3fb660ec514a7e2d2f225313991f16b58974f708
|
/Learning/assert-keyword.py
|
434293e5a78caed67cee76c60559b793cca2795a
|
[] |
no_license
|
jwatson-CO-edu/py_info
|
e2c89bbe05b411d2086c182630d165f87a99ec3f
|
297f9f5733fe256e5c96f2da82f49d82c2a4ba9d
|
refs/heads/master
| 2022-02-06T13:03:14.178557
| 2022-01-22T03:01:46
| 2022-01-22T03:01:46
| 230,357,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
"""
RESULT: If the conditional following the 'assert' keyword evaluates False , then an 'AssertionError' is raised , Just like C++
You can optionally provide a message string to give more information about the assertion was violated , which is a good idea anyway
"""
def picky_arg( arg ):
""" 'arg' must be greater than 5 """
assert arg > 5 , "'arg' was too small!"
print "You make a compelling argument"
# picky_arg( 4 ) # AssertionError: 'arg' was too small! , program crashes with unhandled exception
picky_arg( 6 ) # "You make a compelling argument"
|
[
"james.r.watson@utah.edu"
] |
james.r.watson@utah.edu
|
b0737e53bab3a189f26c79793e6107c969e82108
|
7004661440b908a622ccc01809864971ed6f6d7b
|
/main.py
|
d594a99dc4f2b81f4a0b3a1fce2710a201520858
|
[] |
no_license
|
hanj2270/E-HentaiCrawler
|
a00aa91d4fae2fe1082eebc0daa30ad152ca9f41
|
46f778db61da2166c35bcf03aaf930177c3acd8d
|
refs/heads/master
| 2021-01-19T02:33:13.803416
| 2017-03-08T08:34:18
| 2017-03-08T08:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
import threading
from multiprocessing import Queue, Process
from Proxy.IPPool import getIP
from config import PROCESS_MAX, THREAD_MAX
from crawler.indexgeter import indexgeter
from crawler.webDataGeter import webdatageter
from database.data_writer import data_writer
from error.error_handling import error_handing
if __name__ == "__main__":
# 目录队列
qindex = Queue()
# 数据队列
qdata = Queue()
# 错误队列
qerror = Queue()
# 代理队列
qip = Queue()
# 启动目录发生器进程
Process(target=indexgeter, args=(qindex,)).start()
# 开始启动爬虫进程
n = 0
while n < PROCESS_MAX:
Process(target=webdatageter, args=(qindex, qdata, qerror, qip)).start()
n += 1
# 获取THREAD_COUNT个代理用于开启爬虫线程
n = 0
while n < THREAD_MAX:
try:
qip.put(getIP())
n += 1
except BaseException:
break
# 数据写入器线程
threading.Thread(target=data_writer, args=(qdata,)).start()
# 错误处理线程
threading.Thread(target=error_handing, args=(qdata, qerror, qip, n)).start()
|
[
"shuiqukeyou@gmail.com"
] |
shuiqukeyou@gmail.com
|
2665af70a535b15961a85fcd7f63751772321701
|
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
|
/MY_REPOS/awesome-4-new-developers/OVERFLOW/DS-ALGO-OFFICIAL/temp/algorithms/algorithms/maths/base_conversion.py
|
b538f2b3bfc0b89f2a4c708831f88b8ba847a996
|
[] |
no_license
|
Portfolio-Projects42/UsefulResourceRepo2.0
|
1dccc8961a09347f124d3ed7c27c6d73b9806189
|
75b1e23c757845b5f1894ebe53551a1cf759c6a3
|
refs/heads/master
| 2023-08-04T12:23:48.862451
| 2021-09-15T12:51:35
| 2021-09-15T12:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
"""
Integer base conversion algorithm
int2base(5, 2) return '101'.
base2int('F', 16) return 15.
"""
import string
def int_to_base(n, base):
"""
:type n: int
:type base: int
:rtype: str
"""
is_negative = False
if n == 0:
return "0"
elif n < 0:
is_negative = True
n *= -1
digit = string.digits + string.ascii_uppercase
res = ""
while n > 0:
res += digit[n % base]
n //= base
if is_negative:
return "-" + res[::-1]
else:
return res[::-1]
def base_to_int(s, base):
"""
Note : You can use int() built-in function instead of this.
:type s: str
:type base: int
:rtype: int
"""
digit = {}
for i, c in enumerate(string.digits + string.ascii_uppercase):
digit[c] = i
multiplier = 1
res = 0
for c in s[::-1]:
res += digit[c] * multiplier
multiplier *= base
return res
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
cf6364cfe0b6485a6cb715e366ab55a7804f6714
|
dd7e2cb3366855584fc27bbb2e0dc7ed63977117
|
/vida/vida/migrations/0019_auto_20160204_1447.py
|
f82aefc2ee0ce4b604551cdbd531c2578fedde9d
|
[
"MIT"
] |
permissive
|
ProminentEdge/flintlock
|
3d3eb6efbe4a2d0e731f8a26e69ffcd314e25cb9
|
271c897b332f0c24e00a23c1fe86f5172fb9dd30
|
refs/heads/master
| 2021-01-17T07:58:49.650273
| 2016-03-05T02:06:59
| 2016-03-05T02:06:59
| 50,030,647
| 1
| 1
| null | 2016-03-04T05:10:45
| 2016-01-20T13:06:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0018_auto_20160204_1131'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'ordering': ['-timestamp'], 'get_latest_by': 'timestamp'},
),
migrations.AlterField(
model_name='report',
name='geom',
field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, blank=True),
),
]
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
56071c9dbe2ba7e34e38e15fca15143a1dcf295b
|
db9140a12939db6226d68624eecc3cc3fdadf3dd
|
/adage/node.py
|
417b8feaaa05c5f55453175c310e5311253af50e
|
[] |
no_license
|
nextiams/adage
|
dedb09a64bdbd714d7043a00a51f556b6224e129
|
57525f7ed67d765525009b639bb355c74b1054e1
|
refs/heads/master
| 2022-09-08T23:25:24.163519
| 2017-10-09T08:13:33
| 2017-10-09T08:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
import time
import uuid
import logging
import adage.nodestate as nodestate
log = logging.getLogger(__name__)
class Node(object):
def __init__(self,name,task,identifier = None, define_time = None):
self.identifier = identifier or str(uuid.uuid4())
self.name = name
self.task = task
# the timestamps
self.define_time = define_time or time.time()
self.submit_time = None
self.ready_by_time = None
# backend to update state against
self.backend = None
# relevant state data
self.resultproxy = None
self._result = None
self._state = nodestate.DEFINED
def __repr__(self):
return '<Node name: {} id: {} state: {}>'.format(self.name,self.identifier,self.state)
def update_state(self):
#if we do not have a result object
#that means it's not submitted yet
if not self.resultproxy:
self._state = nodestate.DEFINED
return
#if we have a resultobject
#but the result is not ready
#the node is still running
if not self.backend.ready(self.resultproxy):
self._state = nodestate.RUNNING
return
#if it's ready it's either successful
#or failed
if self.backend.successful(self.resultproxy):
self._state = nodestate.SUCCESS
self._result = self.backend.result(self.resultproxy)
else:
self._state = nodestate.FAILED
#it's ready so set time stamp it not already set
if not self.ready_by_time:
self.ready_by_time = time.time()
log.info('node ready %s',self)
@property
def result(self):
return self._result
@property
def state(self):
return self._state
def ready(self):
return self.state in [nodestate.SUCCESS, nodestate.FAILED]
def successful(self):
return self.state == nodestate.SUCCESS
|
[
"lukas.heinrich@gmail.com"
] |
lukas.heinrich@gmail.com
|
52c025692aaad19bacf308a26578cc9bf8277340
|
57c570d1b5a621158d8763f935e2069be6b8c90a
|
/tykj-operation/tykj-operation/service/service/estoreservice/api/tests.py
|
5be1dda2c5f44a1c0e04cdf10ddf55e7dc9f2c39
|
[] |
no_license
|
liuliainio/liuli
|
e011decf45f7eca7009a12ad4a96f33a17055945
|
203fbf4f135efb6432c77b937633003ce2f2c9a2
|
refs/heads/master
| 2021-01-10T20:35:08.070770
| 2018-08-21T05:52:59
| 2018-08-21T05:52:59
| 25,625,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
from __future__ import print_function, division, absolute_import
from bson.objectid import ObjectId
from django.test import TestCase
from estorecore.servemodels.push import PushMongodbStorage
from estoreservice import settings
import logging
import os
import time
from estorecore.test import get_all_test_cases
logger = logging.getLogger('django')
# import all test cases from test directory
test_case_dir = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'test'))
test_cases = get_all_test_cases(test_case_dir)
for pkg, mod in test_cases:
exec 'from %s.%s import *' % (pkg, mod)
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def dtest_push_message_update_perf():
push_db = PushMongodbStorage(settings.MONGODB_CONF)
# cond = {'_id': ObjectId("51064adb9813f3ea9cc702bc")}
cond = {'id': 73}
n_round = 10
message_coll = push_db._db.messages
for msg in message_coll.find(cond):
print(msg)
total_time = 0.0
for _ in range(n_round):
start = time.time()
message_coll.update(cond, {'$inc': {'sent_count': 1}})
total_time += (time.time() - start) * 1000.0
print('inc sent_count sync took: %0.3f ms' % (total_time / n_round))
total_time = 0.0
for _ in range(n_round):
start = time.time()
message_coll.update(cond, {'$inc': {'sent_count': 1}}, w=0)
total_time += (time.time() - start) * 1000.0
print('inc sent_count async took: %0.3f ms' % (total_time / n_round))
# Revert above changes.
message_coll.update(cond, {'$inc': {'sent_count': -1 * 2 * n_round}})
if __name__ == '__main__':
dtest_push_message_update_perf()
|
[
"liuliainio@163.com"
] |
liuliainio@163.com
|
f0fe4ca2175284e073366e4bb5c3fd85bbe2a82c
|
6b7aef6254b5a3535e2540792299ca52fadeed68
|
/src/pi_django_app/views.py
|
bfca293c7929a11d4f77963b2ccb8ce4d3c6245d
|
[] |
no_license
|
asfcarter/pi_django
|
1137d4443778e7218776fcc89aef94fcf74627ba
|
4c1671bb2b3668c0ef77c1ad231aa864e69cf97e
|
refs/heads/master
| 2021-06-17T19:20:05.994607
| 2021-03-24T15:33:33
| 2021-03-24T15:33:33
| 42,591,711
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,"index2.html",{})
|
[
"pi@raspberrypi.(none)"
] |
pi@raspberrypi.(none)
|
2835c6426469a8c114fb1a60b7567e563589339c
|
cca53e4b21ca8d31f4b66519ac678e37ebce47a8
|
/exercises/ex8.py
|
90790acc57dae4156d8660a53d5b61fb7b4b6265
|
[] |
no_license
|
hancush/pynotes
|
b115a92ac9eb9b9d4e410177da855a828487adbd
|
be27097a8726d4cc22740b1b96398a7a77289d22
|
refs/heads/master
| 2021-01-19T00:47:23.246090
| 2015-09-15T21:20:30
| 2015-09-15T21:20:30
| 40,318,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That 'you' could type up right.",
"But it didn't sing.", # shows up w double quotes bc single quote in string?
"So I said goodnight."
)
|
[
"hannah.cushman@gmail.com"
] |
hannah.cushman@gmail.com
|
291249ca174379bb84d42e63c78fdf78cee4b58c
|
27e2b4b14d8217fcad3b57ef8918cb857931d89f
|
/learning/modules/cuda_module.py
|
abc88462975e47882fa8c9515c8385d895769c71
|
[
"BSD-2-Clause"
] |
permissive
|
jhu-lcsr/good_robot
|
205b31e9a2399032488ffa094d9f1e0d07592aa4
|
61217d65f040d536e54804150ce8abcf97343410
|
refs/heads/master
| 2022-03-31T23:49:04.958796
| 2022-03-25T18:14:14
| 2022-03-25T18:14:14
| 198,902,668
| 95
| 24
|
BSD-2-Clause
| 2022-02-18T20:45:13
| 2019-07-25T21:21:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
import torch
from torch import nn as nn
"""
class CudaModule(torch.nn.Module):
def __init__(self):
super(CudaModule, self).__init__()
self.is_cuda = False
self.cuda_device = None
def cuda(self, device=None):
nn.Module.cuda(self, device)
self.is_cuda = True
self.cuda_device = device
return self
"""
|
[
"elias.stengel@gmail.com"
] |
elias.stengel@gmail.com
|
62dffbb21ad8bfd2fb90e766057c1a6e49c6ac04
|
10fddce056973c339b1d939110ca2b29591e77f7
|
/wc_utils/workbook/__init__.py
|
57e2da10440dbb50fab80104f1190e0ddc100808
|
[
"MIT"
] |
permissive
|
KarrLab/wc_utils
|
4ed3bdfa558171ab32293a452f9e6e02b3fc16f1
|
a4c0e2e8b9bd88356729e38faf5c0d09d61ff921
|
refs/heads/master
| 2022-05-01T06:17:54.910791
| 2020-09-28T22:38:30
| 2020-09-28T22:38:30
| 69,289,809
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
from .core import (Workbook, Worksheet, Row, Formula,
WorkbookDifference, WorksheetDifference,
RowDifference, CellDifference)
from . import io
|
[
"jonrkarr@gmail.com"
] |
jonrkarr@gmail.com
|
05b1f77092060e63d75e668a1221e319952163a7
|
e7a9bac3b02a3849c1ab5d6990012510b8592c47
|
/src/briefcase/commands/__init__.py
|
5ed9108309d7fddad2a7683abe59158b365c852f
|
[
"BSD-3-Clause"
] |
permissive
|
saroad2/briefcase
|
3d15dabfa6462a3b123053042532d0ae482b689d
|
afbe8ed499c08afbeaa837ea032fa24d20b320a5
|
refs/heads/main
| 2023-03-06T12:33:52.929614
| 2022-10-04T22:27:03
| 2022-10-04T22:27:03
| 245,388,096
| 1
| 0
|
BSD-3-Clause
| 2023-02-26T20:59:27
| 2020-03-06T10:12:29
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
from .build import BuildCommand # noqa
from .create import CreateCommand # noqa
from .dev import DevCommand # noqa
from .new import NewCommand # noqa
from .open import OpenCommand # noqa
from .package import PackageCommand # noqa
from .publish import PublishCommand # noqa
from .run import RunCommand # noqa
from .update import UpdateCommand # noqa
from .upgrade import UpgradeCommand # noqa
|
[
"russell@keith-magee.com"
] |
russell@keith-magee.com
|
68a9e00c01783e8032ebfa06d4436747cfec88b8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02612/s636347083.py
|
580b43933e3e425618cddf0df173aca1748fc2f8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
N = int(input())
ans = (1000 - N % 1000) % 1000
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ebc22e699980c3f5b7b8234f2aad220fa055e67a
|
7f0548b7191b7589712af19baebafddae1d0505f
|
/dojoassignments/python/django/full_stack_django/bad_travel_buddy/apps/login_registration/migrations/0004_auto_20170627_1628.py
|
cb00a717c150b7df258a975ef221794b3969c1e7
|
[] |
no_license
|
mtjhartley/codingdojo
|
dd8eab1bd61fb847e44766e89fe3db2340468102
|
65dc558d19adbe62f85ad61c32cb1c392b56567c
|
refs/heads/master
| 2022-12-14T23:06:11.927445
| 2017-08-16T21:08:35
| 2017-08-16T21:08:35
| 92,218,728
| 1
| 5
| null | 2022-12-07T23:59:48
| 2017-05-23T20:46:03
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-27 16:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login_registration', '0003_remove_user_birthday'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='first_name',
new_name='name',
),
migrations.RenameField(
model_name='user',
old_name='last_name',
new_name='user_name',
),
]
|
[
"mtjhartley@gmail.com"
] |
mtjhartley@gmail.com
|
9fb38590c5f71af6d4e4a18577416948774e05a2
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/test_forge_onn22R.py
|
cf5f4f0b31136df8e8cedf859b2a06b0447a6563
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTestonn22R(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_onn22R(self):
expected = get_data(
path='operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/forged.hex')
actual = forge_operation_group(get_data(
path='operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/unsigned.json'))
self.assertEqual(expected, actual)
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
2d37dc59f7b514b78dcc1c30bee95be85ca763e3
|
0728a2e165808cfe5651693a6e7f47804bfb085f
|
/bot/pymorphy/pymorphy/contrib/lastnames_ru.py
|
446291d75d74e00a2d96ae2e019b13b9ac7170de
|
[
"MIT"
] |
permissive
|
testTemtProj/OLD_PROJECT
|
5b026e072017f5135159b0940370fda860241d39
|
9e5b165f4e8acf9003536e05dcefd33a5ae46890
|
refs/heads/master
| 2020-05-18T15:30:24.543319
| 2013-07-23T15:17:32
| 2013-07-23T15:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,149
|
py
|
#-*- coding: UTF-8
from __future__ import unicode_literals
import re
from pymorphy.morph import GramForm
# Порядок важен: ЯНЦ должно быть перед ЯН для правильного срабатывания.
LASTNAME_PATTERN = re.compile(r'(.*('
r'ОВ|ИВ|ЕВ'
r'|ИН'
r'|СК|ЦК'
r'|ИЧ'
r'|ЮК|УК'
r'|ИС|ЭС|УС'
r'|ЫХ|ИХ'
r'|ЯНЦ|ЕНЦ|АН|ЯН'
# Фамилии без явного суффикса (-ок, -ия, -иа) сюда включать не надо - decline() попытается угадать лемму.
# Несклоняемые фамилии включать так же не надо - у них нет игнорируемой части после суффикса.
# Фамилии с суффиксами -ых/-их включены сюда т.к. эти окончания образуют множ. форму CASES_OV
r'))',
re.UNICODE | re.VERBOSE)
# XXX: Й тут нет сознательно?
CONSONANTS = 'БВГДЖЗКЛМНПРСТФХЦЧШЩ'
# http://www.gramota.ru/spravka/letters/?rub=rubric_482
# http://planeta-imen.narod.ru/slovar-smolenskich-familij/struktura-familij.html
# 13.1.1, 13.1.3
CASES_OV = {
'мр': ('', 'А', 'У', 'А', 'ЫМ', 'Е'),
'жр': ('А', 'ОЙ', 'ОЙ', 'У', 'ОЙ', 'ОЙ'),
}
# 13.1.2
CASES_SK = {
'мр': ('ИЙ', 'ОГО', 'ОМУ', 'ОГО', 'ИМ', 'ОМ'),
'жр': ('АЯ', 'ОЙ', 'ОЙ', 'УЮ', 'ОЙ', 'ОЙ'),
}
# Белорусские фамилии на -ич, украинские на -ук, -юк
CASES_CH = {
'мр': ('', 'А', 'У', 'А', 'ЕМ', 'Е'),
'жр': ('', '', '', '', '', ''),
}
# Фамилии заканчивающиеся на -ок
CASES_OK = {
'мр': ('ОК', 'КА', 'КУ', 'КА', 'КОМ', 'КЕ'),
'жр': ('ОК', 'ОК', 'ОК', 'ОК', 'ОК', 'ОК'),
}
# Фамилии заканчивающиеся на -ец
CASES_EC = {
'мр': ('ЕЦ', 'ЦА', 'ЦУ', 'ЦА', 'ЦОМ', 'ЦЕ'),
'жр': ('ЕЦ', 'ЕЦ', 'ЕЦ', 'ЕЦ', 'ЕЦ', 'ЕЦ'),
}
# Литовские, эстонские, часть армянских
CASES_IS = {
'мр': ('', 'А', 'У', 'А', 'ОМ', 'Е'),
'жр': ('', '', '', '', '', ''),
}
# 13.1.12 (not really)
CASES_IA = {
'мр': ('ИЯ', 'ИЮ', 'ИИ', 'ИЮ', 'ИЕЙ', 'ИИ'),
'жр': ('ИЯ', 'ИЮ', 'ИИ', 'ИЮ', 'ИЕЙ', 'ИИ'),
}
# 13.1.6, 13.1.10
INDECLINABLE_CASES = {
'мр': ('', '', '', '', '', ''),
'жр': ('', '', '', '', '', ''),
}
# Склонение в множественную форму: общие суффиксы для обоих родов
# Множественная форма для CASES_OV
PLURAL_OV = ('Ы', 'ЫХ', 'ЫМ', 'ЫХ', 'ЫМИ', 'ЫХ')
# для CASES_SK
PLURAL_SK = ('ИЕ', 'ИХ', 'ИМ', 'ИХ', 'ИМИ', 'ИХ')
# для CASES_OK
PLURAL_OK = ('КИ', 'КОВ', 'КАМ', 'КОВ', 'КАМИ', 'КАХ')
# для CASES_EC
PLURAL_EC = ('ЦЫ', 'ЦОВ', 'ЦАМ', 'ЦОВ', 'ЦАМИ', 'ЦАХ')
# для INDECLINABLE_CASES (и фамилий которые не склоняются во множ. числе)
PLURAL_INDECLINABLE_CASES = ('', '', '', '', '', '')
# Суффикс -> (Склонение единственной формы, Склонение множественной формы)
CASEMAP = {
'ОВ': (CASES_OV, PLURAL_OV),
'ИВ': (CASES_OV, PLURAL_OV),
'ЕВ': (CASES_OV, PLURAL_OV),
'ИН': (CASES_OV, PLURAL_OV),
'СК': (CASES_SK, PLURAL_SK),
'ЦК': (CASES_SK, PLURAL_SK),
'ИЧ': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'ЮК': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'УК': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'ИС': (CASES_IS, PLURAL_INDECLINABLE_CASES),
'ЭС': (CASES_IS, PLURAL_INDECLINABLE_CASES),
'УС': (CASES_IS, PLURAL_INDECLINABLE_CASES),
# Часть армянских фамилий склоняется так же как литовские
'АН': (CASES_IS, PLURAL_INDECLINABLE_CASES),
'ЯН': (CASES_IS, PLURAL_INDECLINABLE_CASES),
# Другая часть армянских фамилий склоняется как украинские
# заканчивающиеся на ИЧ
'УНЦ': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'ЯНЦ': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'ЕНЦ': (CASES_CH, PLURAL_INDECLINABLE_CASES),
'ИЯ': (CASES_IA, PLURAL_INDECLINABLE_CASES),
'ОК': (CASES_OK, PLURAL_OK),
# Склонение -ец похоже на фамилии с суффиксом -ок
'ЕЦ': (CASES_EC, PLURAL_EC),
'ЫХ': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'ИХ': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'КО': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'АГО': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'ЯГО': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'ИА': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
'ХНО': (INDECLINABLE_CASES, PLURAL_INDECLINABLE_CASES),
}
def decline(lastname, gram_form=''):
''' Склоняет фамилию и возвращает все возможные формы '''
# Из фамилии выделяется предполагаемая лемма (Табуретов -> Табуретов,
# Табуретовым -> Табуретов), лемма склоняется по правилам склонения фамилий
def guess_lemma(name):
'''
Попытаться угадать сложносклоняемую фамилию (Цапок, Бегунец, Берия)
Возвращает пару (name=lemma+suffix, lemma) либо (None, None)
'''
name_len = len(name)
# Попытка угадать склонённую фамилию из 13.1.12 ("Берией")
if name_len > 2 and name[-2:] in ('ИИ', 'ИЮ',):
return (lastname[:-2] + 'ИЯ', lastname[:-2])
elif name_len > 3 and name[-3:] in ('ИЕЙ',):
return (lastname[:-3] + 'ИЯ', lastname[:-3])
# Попытка угадать склонённую фамилию, закачивающуюся на -ок ("Цапка")
# Работает, только если буква перед окончанием согласная.
# Проверка согласной делается для исключения склонённых фамилий на -ак
# ("Собчака")
if name_len > 3 and name[-2:] in ('КА', 'КУ', 'КЕ',) and name[-3] in CONSONANTS:
return (lastname[:-2] + 'ОК', lastname[:-2])
elif name_len > 4 and name[-3:] in ('КОМ',) and name[-4] in CONSONANTS:
return (lastname[:-3] + 'ОК', lastname[:-3])
# Попытка угадать склонённую фамилию, закачивающуюся на -ец ("Бегунец")
# FIXME: необходима проверка на коллизии с другими фамилиями (как в
# случае с "Цапок")
if name_len > 3 and name[-2:] in ('ЦА', 'ЦУ', 'ЦЕ',):
return (lastname[:-2] + 'ЕЦ', lastname[:-2])
return (None, None)
match = LASTNAME_PATTERN.search(lastname)
lemma = name = match.group(1) if match else lastname # name is lemma + suffix
name_len = len(name)
guessed_name, guessed_lemma = guess_lemma(name)
if guessed_name and guessed_lemma:
name, lemma = guessed_name, guessed_lemma
cases, plural_cases = {}, ()
if name_len > 2:
cases, plural_cases = CASEMAP.get(name[-2:], ({}, ()))
if cases:
lemma = name[:-2]
if not cases and name_len > 3:
cases, plural_cases = CASEMAP.get(name[-3:], ({}, ()))
if cases:
lemma = name[:-3]
# В случае 13.1.12 лемма состоит из фамилии, за исключением
# двух последних букв
if cases is CASES_IA or cases is CASES_OK:
lemma = name = name[:-2]
if not cases:
return []
expected_form = GramForm(gram_form)
forms = []
for i, case in zip(range(6), ('им', 'рд', 'дт', 'вн', 'тв', 'пр',)):
for gender_tag in ('мр', 'жр',):
form = GramForm('%s,%s,фам,ед' % (case, gender_tag,))
if gram_form and not form.match(expected_form):
continue
forms.append({
'word': '%s%s' % (name, cases[gender_tag][i]),
'class': 'С',
'info': form.get_form_string(),
'lemma': name,
'method': 'decline_lastname (%s)' % lastname,
'norm': '%s%s' % (name, cases[gender_tag][0]),
})
plural_form = GramForm('%s,мр-жр,фам,мн' % (case,))
if gram_form and not plural_form.match(expected_form):
continue
forms.append({
'word': '%s%s' % (name, plural_cases[i]),
'class': 'С',
'info': plural_form.get_form_string(),
'lemma': name,
'method': 'decline_lastname (%s)' % lastname,
'norm': '%s%s' % (name, plural_cases[0]),
})
# Просклонять рекурсивно для случая с множественным числом фамилии.
# Козловых -> фам,им; Козловых (мн) -> Козлов -> фам,им
if lemma != name and LASTNAME_PATTERN.match(lemma):
refinement = decline(lemma)
if refinement:
return forms + refinement
return forms
def normalize(morph, lastname, hints=''):
'''
Возвращает нормальную форму (именительный падеж) фамилии для заданного рода
Параметры:
* hints - подсказки об исходной форме фамилии ('мр' или 'жр',
по-умолчанию принимается 'мр')
'''
hints_form = GramForm(hints)
gender_tag = (hints_form.match_string('жр') or 'мр')
# FIXME: эта функция возвращает саму форму, а Morph.normalize возвращает
# множество (set) возможных форм, одно из двух лучше поправить.
return inflect(morph, lastname, 'им,ед,%s' % gender_tag)
def inflect(morph, lastname, gram_form):
'''
Вернуть вариант фамилии который соотвествует данной грамматической
форме
Параметры:
* morph - объект Morph
* lastname - фамилия которую хотим склонять
* gram_form - желаемые характеристики грам. формы (если 'жр' отсутствует
в этом параметре, то по-умолчанию принимается 'мр', или 'мр-жр', если
указано 'мн')
'''
expected_form = GramForm(gram_form)
gender_tag = ('мр-жр' if expected_form.match_string('мн') else None)
if not gender_tag:
gender_tag = (expected_form.match_string('жр') or 'мр')
# За один проход проверяется, что исходное слово может быть склонено как
# фамилия и выбирается форма подходящая под gram_form
present_in_decline = False
accepted = {}
for item in decline(lastname):
form = GramForm(item.get('info', ''))
# Если в результате склонения не получилось исходной формы - ложное срабатывание
# Обязательно проверяется род: при склонении в противоположном роде
# может получиться исходная форма но нас интересует совпадение только в
# заданном роде
if item.get('word', '') == lastname:
# В случае склонения во множественную форму, род игнорируется.
# Род всех фамилий во множественном числе - мр-жр.
if expected_form.match_string('мн') or form.match_string(gender_tag):
present_in_decline = True
expected = form.match(expected_form)
if expected and not accepted:
accepted = item
# Здесь break не нужен т.к. present_in_decline всё ещё может быть
# не установлена в корректное значение
# Если в результате склонения исходной формы не получилось,
# возвращается результат склонения как для обычного слова
if present_in_decline and accepted:
return accepted.get('word', '')
else:
return morph.inflect_ru(lastname, gram_form, smart_guess=False)
def get_graminfo(lastname):
'''Вернуть грамматическую информацию о фамилии и её нормальную форму'''
info = []
for item in decline(lastname):
if item.get('word', '') == lastname:
info.append(item)
return info
def pluralize(morph, lastname, gram_form=''):
'''
Вернуть фамилию во множественном числе.
Параметры:
* morph - объект Morph
* lastname - фамилия которую хотим склонять
* gram_form - желаемые характеристики грам. формы
'''
expected_form = GramForm(gram_form)
# Удалить из желаемой формы признаки рода и числа
refined_form = GramForm(gram_form).clear_gender().clear_number()
# Если дан gram_form - склонить в указанную форму
if refined_form.get_form_string():
return inflect(
morph,
lastname,
','.join((refined_form.get_form_string(), 'мн',)))
# Иначе - найти форму исходной фамилии и склонить в неё же, но во мн. числе
# Если в желаемой форме был указан род - использовать как подсказку
gender_tag = (expected_form.match_string('жр') or 'мр')
for item in decline(lastname):
form = GramForm(item.get('info', ''))
# Проверить наличие исходной формы в заданном роде (аналогично inflect())
if item.get('word', '') == lastname and form.match_string(gender_tag):
for case in ('им', 'рд', 'дт', 'вн', 'тв', 'пр'):
if form.match_string(case):
return inflect(morph, lastname, 'мн,%s' % case)
# В случае неудачи - просклонять как обычное слово
return morph.pluralize_ru(lastname, gram_form)
def pluralize_inflected(morph, lastname, num, hints=''):
'''
Вернуть фамилию в форме, которая будет сочетаться с переданным числом.
Например: 1 Попугаев, 2 Попугаевых, 5 Попугаевых.
Параметры:
* morph - объект Morph
* lastname - фамилия которую хотим склонять
* num - число
* hints - подсказки об исходной форме фамилии ('мр' или 'жр')
'''
if num == 1:
return normalize(morph, lastname, hints)
hints_form = GramForm(hints)
gender_tag = (hints_form.match_string('жр') or 'мр')
return pluralize(morph, lastname, 'мн,рд,%s' % gender_tag)
|
[
"Kyzmenko_Pavel@mail.ru"
] |
Kyzmenko_Pavel@mail.ru
|
a961e5012524167cb669c64c86afbaa4d9707ed2
|
c104dbd09a853725cb4f4b17df7c5dd59d47e04e
|
/test/opsgenie_swagger/models/amazon_ses_integration.py
|
270f9fd25ff796979da446388dc26a706331bae3
|
[
"Apache-2.0"
] |
permissive
|
bm-lab/opsgenie-python-sdk
|
5a64e2c24f1b9168ecadf482ba8084ba27a659fc
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
refs/heads/master
| 2021-10-09T03:18:48.101672
| 2018-12-15T01:03:36
| 2018-12-20T15:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,335
|
py
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.integration import Integration # noqa: F401,E501
from opsgenie_swagger.models.recipient import Recipient # noqa: F401,E501
from opsgenie_swagger.models.team_meta import TeamMeta # noqa: F401,E501
from opsgenie_swagger.models.token_based_incoming_feature import TokenBasedIncomingFeature # noqa: F401,E501
class AmazonSesIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_write_access': 'bool'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_write_access': 'allowWriteAccess'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, feature_type=None, allow_configuration_access=None, allow_write_access=None): # noqa: E501
"""AmazonSesIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_write_access = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this AmazonSesIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this AmazonSesIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this AmazonSesIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this AmazonSesIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this AmazonSesIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this AmazonSesIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this AmazonSesIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this AmazonSesIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this AmazonSesIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this AmazonSesIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this AmazonSesIntegration. # noqa: E501
:return: The is_advanced of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this AmazonSesIntegration.
:param is_advanced: The is_advanced of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def feature_type(self):
"""Gets the feature_type of this AmazonSesIntegration. # noqa: E501
:return: The feature_type of this AmazonSesIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this AmazonSesIntegration.
:param feature_type: The feature_type of this AmazonSesIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this AmazonSesIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this AmazonSesIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this AmazonSesIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this AmazonSesIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this AmazonSesIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this AmazonSesIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AmazonSesIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"c.chary@criteo.com"
] |
c.chary@criteo.com
|
1f3bfae4db8913dfd24255d46803ea63e87bd277
|
e5e0d729f082999a9bec142611365b00f7bfc684
|
/tensorflow/python/estimator/canned/dnn.py
|
cde0d955dfddd3a9ea810c9a1d333d593cb03465
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/tensorflow
|
ed6294098c7354dfc9f09631fc5ae22dbc278138
|
7cbba04a2ee16d21309eefad5be6585183a2d5a9
|
refs/heads/r1.15.5+nv23.03
| 2023-08-16T22:25:18.037979
| 2023-08-03T22:09:23
| 2023-08-03T22:09:23
| 263,748,045
| 763
| 117
|
Apache-2.0
| 2023-07-03T15:45:19
| 2020-05-13T21:34:32
|
C++
|
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dnn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import dnn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
dnn.__all__ = [s for s in dir(dnn) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.canned.dnn import *
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
36532ee5c85b919df895937c26f85572f220875a
|
6a4e1e4f320ae81a8069fbe2587d62420976dbf1
|
/mysite/polls/urls.py
|
949fd485050af7d1c38bb5918e4fc6677c444057
|
[] |
no_license
|
YuriiKhomych/First-Django-App
|
2f192ec00be6f84318905d4b18595b1cbad2b955
|
440a4618053adddf3b647975218bb6a003260243
|
refs/heads/master
| 2020-03-18T16:17:20.352467
| 2018-08-22T06:44:13
| 2018-08-22T06:44:13
| 134,957,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"yuriykhomich@gmail.com"
] |
yuriykhomich@gmail.com
|
1a2a79c4c2018e104f9467715ad305e56df8cec2
|
ad6cd0aa5d96ef7a4116ec20737a3026082b9e16
|
/src/test/directory_lister_test.py
|
d3008e8dc00832476b28d24880a822d71170e5c7
|
[
"MIT"
] |
permissive
|
pgecsenyi/router-fs
|
0a00ad9f5cb5995048aa7fe08c20ee7eaf0621b7
|
1d4f579fb3cccd022fe1ab0e61aa00693e7234c1
|
refs/heads/master
| 2020-04-19T06:47:26.791739
| 2019-04-15T18:21:13
| 2019-04-15T18:21:45
| 168,028,417
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
import unittest
from unittest.mock import patch
from filesystem.transformation.directory_lister import DirectoryLister
class DirectoryListerTest(unittest.TestCase):
@patch('os.walk')
def test_list_directory(self, mock_walk):
dirpath = '/home/root/doc/fruits'
expected_files = [dirpath + '/apple.txt', dirpath + '/banana.txt']
mock_walk.return_value = [(dirpath, [], ['apple.txt', 'banana.txt'])]
directory_lister = DirectoryLister(dirpath)
result = [i for i in directory_lister.list_directory()]
mock_walk.assert_called_once_with(dirpath)
self.assertEqual(sorted(expected_files), sorted(result))
|
[
"pgecsenyi@protonmail.com"
] |
pgecsenyi@protonmail.com
|
80c576d5d209206e6331ae90eb60ec6b46958211
|
2aba3c043ce4ef934adce0f65bd589268ec443c5
|
/atcoder/ABC/033/A.py
|
9aec5d2d637f9b56938a2a336fe5925fe380c571
|
[] |
no_license
|
kambehmw/algorithm_python
|
4f66593b77039d90515d1fcbecacdab8c811b92f
|
17222399dcc92fd8f908e5774a9883e2e89c486e
|
refs/heads/master
| 2020-06-02T12:44:11.322356
| 2020-05-18T13:22:05
| 2020-05-18T13:22:05
| 191,157,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
N = input()
if len(set(N)) == 1:
print("SAME")
else:
print("DIFFERENT")
|
[
"kanbe.hmw@gmail.com"
] |
kanbe.hmw@gmail.com
|
0b2d748e251d874ac3698205a67fcbe62158aaa9
|
1156b7cde01b0cc467c22cfb75cde0a74887da1a
|
/bin/mongrey_web_sqlite.py
|
13ddaaf630e27e50d70faab7f82756f995022afd
|
[] |
no_license
|
davidpolet/mongrey-build
|
d1ee2e12ffbbd8061f4f2faa753572b3e328291f
|
8782b0e87474f2662cf35b3cb545516d76d9340d
|
refs/heads/master
| 2021-01-21T09:34:07.283966
| 2015-05-26T19:00:31
| 2015-05-26T19:00:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itsdangerous
import passlib
import flask_babelex
import babel
import blinker
import six
import flask
import redis
import arrow
import pygeoip
import regex
from werkzeug.contrib import cache
import peewee
import playhouse
import wtforms
import wtfpeewee
import simplekv
import flask_kvsession
from mongrey.web import settings
from mongrey.web.manager import main
from mongrey.web import extensions
from mongrey.storage.sql import models
main()
|
[
"stephane.rault@radicalspam.org"
] |
stephane.rault@radicalspam.org
|
17e23071e1107ae08f77200b82db87c0be5f516e
|
925fa0208e07ac2aeb64f9201249a91f48b900fa
|
/LeetCode/DP/MaxProductSubarray.py
|
a7f24f03a8350ba8ed682a2ef192a351af047334
|
[] |
no_license
|
Sanchi02/Dojo
|
984eb3cba26e43a8f6f0ef9c93f7aed24527b3ae
|
b25288c42a67d8639195f3fddef698f5cd179aac
|
refs/heads/master
| 2022-03-18T02:58:27.506082
| 2022-03-13T12:11:18
| 2022-03-13T12:11:18
| 197,040,319
| 0
| 0
| null | 2019-07-15T17:14:20
| 2019-07-15T17:06:36
| null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# https://leetcode.com/problems/maximum-product-subarray/
# Given an integer array nums, find a contiguous non-empty subarray within the array that has the largest product, and return the product.
# The test cases are generated so that the answer will fit in a 32-bit integer.
# A subarray is a contiguous subsequence of the array.
# Example 1:
# Input: nums = [2,3,-2,4]
# Output: 6
# Explanation: [2,3] has the largest product 6.
# Example 2:
# Input: nums = [-2,0,-1]
# Output: 0
# Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
# Constraints:
# 1 <= nums.length <= 2 * 104
# -10 <= nums[i] <= 10
# The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
class Solution:
def maxProduct(self, nums: List[int]) -> int:
rmaxV = max(nums)
maxV = 1
minV = 1
for n in nums:
tmp = maxV
maxV = max(n,n*maxV,n*minV)
minV = min(n,n*tmp,n*minV)
rmaxV = max(maxV,rmaxV)
return rmaxV
|
[
"sanchibadkas@gmail.com"
] |
sanchibadkas@gmail.com
|
a4523d04d79a2270fc27804ff1d7958b47125de7
|
96e38b89fa057fa0c1cf34e498b4624041dfc6e2
|
/BOJ/Implementation/Python/9816.py
|
08902f566b9df267a88b9d25bfa3f4149acf31de
|
[] |
no_license
|
malkoG/polyglot-cp
|
66059246b01766da3c359dbd16f04348d3c7ecd2
|
584763144afe40d73e72dd55f90ee1206029ca8f
|
refs/heads/master
| 2021-11-24T13:33:49.625237
| 2019-10-06T07:42:49
| 2019-10-06T07:42:49
| 176,255,722
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
while True:
s=input()
if s=='-1':
break
print("N={}:".format(s))
if s==s[::-1]:
print("No!!")
continue
counter = 0
while True:
if s=='6174' or s=='0':
break
ss=sorted(list(s))
rs=reversed(ss)
n1=int(''.join(ss))
n2=int(''.join(rs))
print("{}-{}={}".format(n2,n1,n2-n1))
s = str(n2-n1)
counter += 1
print("Ok!! {} times".format(counter))
|
[
"rijgndqw012@gmail.com"
] |
rijgndqw012@gmail.com
|
4dfd047c301995c55f6708521634af7a8edb0fcf
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/azurestackhci/azure-mgmt-azurestackhci/generated_samples/put_extension.py
|
bfff134a861b2f0791e6f673fdd6721b86ca5f88
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.azurestackhci import AzureStackHCIClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-azurestackhci
# USAGE
python put_extension.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureStackHCIClient(
credential=DefaultAzureCredential(),
subscription_id="fd3c3665-1729-4b7b-9a38-238e83b0f98b",
)
response = client.extensions.begin_create(
resource_group_name="test-rg",
cluster_name="myCluster",
arc_setting_name="default",
extension_name="MicrosoftMonitoringAgent",
extension={
"properties": {
"extensionParameters": {
"protectedSettings": {"workspaceKey": "xx"},
"publisher": "Microsoft.Compute",
"settings": {"workspaceId": "xx"},
"type": "MicrosoftMonitoringAgent",
"typeHandlerVersion": "1.10",
}
}
},
).result()
print(response)
# x-ms-original-file: specification/azurestackhci/resource-manager/Microsoft.AzureStackHCI/preview/2021-09-01-preview/examples/PutExtension.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
c9e40167565b06b7278440ba15f8c57c606277f2
|
c65d512975feed7dfe74f1117cdd1337293d9d60
|
/python/my_py_notes_万物皆对象/db_and_数据持久化/sqlite3/hm_cards/cards_main.py
|
d1dfa4992d1e534df7edb53c99e41041e76f22ad
|
[] |
no_license
|
Rockyzsu/StudyRepo
|
e5c6420e325917c2df7dc51d606be5fa3c2ee1b8
|
385785c09bebb56df156fd149a088043f38d0aab
|
refs/heads/master
| 2022-12-09T13:45:38.332899
| 2020-09-15T09:56:09
| 2020-09-15T09:56:09
| 295,388,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# 处理app的主要业务逻辑
# coding: utf-8
import os
import cards_utils
def init_menu():
while True:
print('欢迎使用<名片系统> v0.1beta'.center(35, '*'))
print('1.新建名片'.center(35, ' '))
print('2.显示全部'.center(35, ' '))
print('3.查询名片'.center(35, ' '))
print('4.删除名片'.center(35, ' '))
print('5.更改名片'.center(35, ' '))
print('0.退出'.center(35, ' '))
print(''.center(40, '*'))
msg = int(input('请输入功能编号:'))
if msg == 1:
cards_utils.new_card()
elif msg == 2:
os.system('clear')
cards_utils.show_all_cards()
input('请输入任意值继续')
os.system('clear')
elif msg == 3:
os.system('clear')
cards_utils.index_card()
input('请输入任意值继续')
elif msg == 4:
os.system('clear')
cards_utils.del_card()
input('请输入任意值继续')
elif msg == 5:
os.system('clear')
elif msg == 0:
# os.system('clear')
print('欢迎再次使用!')
break
else:
print('输入错误,请重新输入!!')
input('请输入任意值继续')
os.system('clear')
init_menu()
|
[
"jinweizsu@gmail.com"
] |
jinweizsu@gmail.com
|
cc52b3e3d8f14a3a38726bf19dbd85b7b8c7d351
|
fc948981497ccbf47dcc8f039845ffb153a41140
|
/03_Bigdata/02_Standardization_Analysis/2. Excel/2excel_introspect_workbook.py
|
31e4aaaa19941801c9629ec772eb955e4db193ec
|
[] |
no_license
|
jeongwoohong/iot_python2019
|
683b8d46b4035991700ae2caaee17d5db0743bd6
|
799733e845ab8eea3a6f9fa6a4c5acce66fce6f7
|
refs/heads/master
| 2022-03-04T17:17:15.456408
| 2019-11-08T00:10:57
| 2019-11-08T00:10:57
| 195,142,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
import sys
from xlrd import open_workbook
from xlwt import Workbook
input_file = sys.argv[1]
output_file = sys.argv[2]
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('jan_2013_output')
with open_workbook(input_file) as workbook:
worksheet = workbook.sheet_by_name('january_2013')
for row_index in range(worksheet.nrows):
for column_index in range(worksheet.ncols):
output_worksheet.write(row_index, column_index, worksheet.cell_value(row_index, column_index))
output_workbook.save(output_file)
|
[
"you@example.com"
] |
you@example.com
|
9216cdb40ef5e1eb0ed05d01e645f8aa233560ae
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/recovery/reconcilenode.py
|
f4bbdb7e9340f5a7a571edb9da7bfda459899c5c
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,756
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ReconcileNode(Mo):
"""
"""
meta = ClassMeta("cobra.model.recovery.ReconcileNode")
meta.moClassName = "recoveryReconcileNode"
meta.rnFormat = "recnode-%(nodeId)s"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.recovery.ReconcileCont")
meta.rnPrefixes = [
('recnode-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "mode", "mode", 35638, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "recover"
prop._addConstant("checker", "checker", 0)
prop._addConstant("fixer", "fixer", 1)
prop._addConstant("recover", "recover", 2)
meta.props.add("mode", prop)
prop = PropMeta("str", "nodeId", "nodeId", 21575, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16000)]
prop.defaultValue = 1
prop.defaultValueStr = "1"
meta.props.add("nodeId", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "nodeId"))
def __init__(self, parentMoOrDn, nodeId, markDirty=True, **creationProps):
namingVals = [nodeId]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
1279a0cef6482bd7c047fe05ad3f1b488ed3388f
|
6ceea2578be0cbc1543be3649d0ad01dd55072aa
|
/src/fipy/meshes/pyMesh/grid2D.py
|
193a3d348a7be54653662f81bb8ef78cfb142fc6
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
regmi/fipy
|
57972add2cc8e6c04fda09ff2faca9a2c45ad19d
|
eb4aacf5a8e35cdb0e41beb0d79a93e7c8aacbad
|
refs/heads/master
| 2020-04-27T13:51:45.095692
| 2010-04-09T07:32:42
| 2010-04-09T07:32:42
| 602,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,607
|
py
|
#!/usr/bin/env python
## -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "grid2D.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
"""2D rectangular Mesh
"""
__docformat__ = 'restructuredtext'
from fipy.tools import numerix
from fipy.meshes.pyMesh.mesh import Mesh
from fipy.meshes.pyMesh.vertex import Vertex
from fipy.meshes.pyMesh.face2D import Face2D
from fipy.meshes.pyMesh.cell import Cell
from fipy.tools.dimensions.physicalField import PhysicalField
class Grid2D(Mesh):
"""2D rectangular Mesh
Numbering system
nx=5
ny=3
Cells::
*************************************
* * * * * *
* 10 * 11 * 12 * 13 * 14 *
*************************************
* * * * * *
* 5 * 6 * 7 * 8 * 9 *
*************************************
* * * * * *
* 0 * 1 * 2 * 3 * 4 *
*************************************
Faces (before reordering)::
***15******16*****17******18****19***
* * * * * *
32 33 34 35 36 37
***10******11*****12******13*****14**
* * * * * *
26 27 28 29 30 31
***5*******6******7*******8******9***
* * * * * *
20 21 22 23 24 25
***0*******1******2*******3******4***
Faces (after reordering)::
***27******28*****29******30****31***
* * * * * *
34 18 19 20 21 37
***5*******6******7*******8******9***
* * * * * *
33 14 15 16 17 36
***0*******1******2*******3******4***
* * * * * *
32 10 11 12 13 35
***22******23*****24******25*****26**
Vertices::
18*****19*****20******21*****22****23
* * * * * *
* * * * * *
12*****13*****14******15*****16****17
* * * * * *
* * * * * *
6******7******8*******9******10****11
* * * * * *
* * * * * *
0******1******2*******3******4******5
"""
def __init__(self, dx, dy, nx, ny):
"""Grid2D is initialized by caller
:Parameters:
- `dx`: dimension of each cell in **x** direction
- `dy`: dimension of each cell in **y** direction
- `nx`: number of cells in **x** direction
- `ny`: number of cells in **y** direction
"""
self.nx=nx
self.ny=ny
self.dx=PhysicalField(value = dx)
self.dy=PhysicalField(value = dy)
self.scale = PhysicalField(value = 1, unit = self.dx.getUnit())
self.dx /= self.scale
self.dy /= self.scale
vertices = self._createVertices()
rowFaces,colFaces = self._createFaces(vertices)
cells = self._createCells(rowFaces,colFaces)
faces,interiorFaces = self._reorderFaces(rowFaces,colFaces)
Mesh.__init__(self, cells, faces, interiorFaces, vertices)
def _createVertices(self):
"""Return list of `Vertex` objects
"""
vertices = ()
ny=self.ny
nx=self.nx
dx=self.dx
dy=self.dy
for j in range(ny+1):
for i in range(nx+1):
vertices += (Vertex(numerix.array([i * dx, j * dy],'d')),)
## vertices += (Vertex(PhysicalField(value = [i * dx, j * dy])),)
return vertices
def _createFaces(self, vertices):
"""Return 2-`tuple` of `Face` objects bounded by `vertices`.
First `tuple` are the `Face` objects that separate rows of `Cell` objects.
Second `tuple` are the `Face` objects that separate columns of `Cell
objects. These initial lists are layed out for efficiency of composing
and indexing into the lists to compose `Cell` objects. They will
subsequently be reordered for efficiency of computations.
"""
nx=self.nx
ny=self.ny
id = 0
rowFaces = ()
for j in range(ny+1):
oneRow = ()
for i in range(nx):
oneRow += (Face2D((vertices[i + j * (nx + 1)],vertices[i + 1 + j * (nx + 1)]),id),)
id += 1
rowFaces += (oneRow,)
colFaces = []
for j in range(ny):
oneCol = ()
for i in range(nx+1):
oneCol += (Face2D((vertices[i + j * (nx + 1)],vertices[i + (j + 1) * (nx + 1)]),id),)
id += 1
colFaces += (oneCol,)
return (rowFaces,colFaces)
def _reorderFaces(self,rowFaces,colFaces):
"""Return a `tuple` of `Face` objects ordered for best efficiency.
Composed from `rowFaces` and `colFaces` such that all interior faces
are listed contiguously, rows then columns, followed by all boundary
faces, rows then columns.
"""
interiorFaces = ()
for rowFace in rowFaces[1:-1]:
interiorFaces += rowFace
for colFace in colFaces:
interiorFaces += colFace[1:-1]
faces = interiorFaces
faces += rowFaces[0] + rowFaces[-1]
for colFace in colFaces:
faces += (colFace[0],)
for colFace in colFaces:
faces += (colFace[-1],)
id = 0
for face in faces:
face._setID(id)
id += 1
return (faces, interiorFaces)
def _createCells(self,rowFaces,colFaces):
"""Return list of `Cell` objects.
"""
nx=self.nx
ny=self.ny
cells = ()
for j in range(ny):
for i in range(nx):
id = j * nx + i
cells += (
Cell(
faces = (rowFaces[j][i],
rowFaces[j+1][i],
colFaces[j][i],
colFaces[j][i+1]),
faceOrientations = (-1,1,1,-1),
id = id
),
)
return cells
def _createInteriorFaces(self,faces):
"""Return list of faces that are not on boundary of Grid2D.
"""
interiorFaces = ()
for face in faces:
if len(face.getCells()) == 2:
interiorFaces += (face,)
return interiorFaces
def getFacesLeft(self):
"""
Return list of faces on left boundary of Grid2D with the
x-axis running from left to right.
"""
nx=self.nx
ny=self.ny
start = len(self.interiorFaces) + 2 * nx
return self.faces[start:start + ny]
def getFacesRight(self):
"""
Return list of faces on right boundary of Grid2D with the
x-axis running from left to right.
"""
nx=self.nx
ny=self.ny
start = len(self.interiorFaces) + 2 * nx + ny
return self.faces[start:start + ny]
def getFacesTop(self):
"""
Return list of faces on top boundary of Grid2D with the
y-axis running from bottom to top.
"""
nx=self.nx
start = len(self.interiorFaces) + nx
return self.faces[start:start + nx]
def getFacesBottom(self):
"""
Return list of faces on bottom boundary of Grid2D with the
y-axis running from bottom to top.
"""
nx=self.nx
start = len(self.interiorFaces)
return self.faces[start:start + nx]
def getShape(self):
"""Return cell dimensions `Grid2D`.
"""
return (self.nx,self.ny)
def getPhysicalShape(self):
"""Return physical dimensions of Grid2D.
"""
return PhysicalField(value = (self.nx * self.dx * self.getScale(), self.ny * self.dy * self.getScale()))
def _getMaxFacesPerCell(self):
return 4
def _getFaceAreas(self):
return Mesh._getFaceAreas(self) * self.getScale()
def getCellVolumes(self):
if self.getScale() is 1:
return Mesh.getCellVolumes(self)
else:
return Mesh.getCellVolumes(self) * self.getScale() * self.getScale()
def getCellCenters(self):
if self.getScale() is 1:
return Mesh.getCellCenters(self)
else:
return Mesh.getCellCenters(self) * self.getScale()
def _getCellDistances(self):
if self.getScale() is 1:
return Mesh._getCellDistances(self)
else:
return Mesh._getCellDistances(self) * self.getScale()
def _getFaceToCellDistances(self):
if self.getScale() is 1:
return Mesh._getFaceToCellDistances(self)
else:
return Mesh._getFaceToCellDistances(self) * self.getScale()
def _getMeshSpacing(self):
return PhysicalField(value = ((self.dx * self.getScale(),),(self.dy * self.getScale(),)))
|
[
"regmisk@gmail.com"
] |
regmisk@gmail.com
|
b3337f038c0d0beab33e9d4d79a6151c08668cc0
|
f47a1c59fb69e2005c6e87db254b156f2b49ad65
|
/trajan/core/urls.py
|
79f7c2ecaa96f671a939cb277bbeb7635c6e2a5b
|
[] |
no_license
|
wd5/trajan
|
ac1206345bd359b01aa8312641ed5f545d844dc3
|
cd7b48c38c31cf63dabf19cee3d76cd6c09d887d
|
HEAD
| 2016-09-10T03:25:50.199591
| 2012-12-26T19:39:49
| 2012-12-26T19:39:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.conf.urls.defaults import patterns, include, url
from trajan.core.api import PageResource
from tastypie.api import Api
from django.views.generic.simple import direct_to_template
v1_api = Api(api_name='v1')
v1_api.register(PageResource())
urlpatterns = patterns('',
url(r'^$', direct_to_template, {'template': 'core/home.html'}, name="homepage"),
url(r'^(?P<page_slug>[-\w]+)/$', 'trajan.core.views.render_page'),
url(r'^pages/api/', include(v1_api.urls)),
)
|
[
"dstegelman@gmail.com"
] |
dstegelman@gmail.com
|
89384c946bc98181b410bdbd2524b8ff13b12143
|
40af81296e8f07788f8b613643a62ae23b2063d6
|
/hw0_release/.env/share/doc/networkx-2.0/examples/graph/words.py
|
065a96678d591b179a1ffc957c0f82e9c26c53ad
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
ChienyehLin/Computer_Vision_CS131
|
97b6c89d86b747deb80b2d643bdb66f6f5432a4a
|
de23015ac08a899adfd03ff28485c7b6a74d4d1e
|
refs/heads/master
| 2022-11-28T08:49:06.345587
| 2020-02-28T07:29:54
| 2020-02-28T07:29:54
| 228,156,278
| 3
| 0
|
NOASSERTION
| 2022-11-22T02:10:30
| 2019-12-15T09:01:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
"""
=====
Words
=====
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile `words_dat.txt.gz`. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book (see [1]_ and [2]_).
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg (hagberg@lanl.gov),
# Brendt Wohlberg,
# hughdbrown@yahoo.com
# Copyright (C) 2004-2017 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import gzip
from string import ascii_lowercase as lowercase
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
G = nx.Graph(name="words")
lookup = dict((c, lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i + 1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j + 1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
fh = gzip.open('words_dat.txt.gz', 'r')
words = set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w = str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
G = words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print("%d connected components" % nx.number_connected_components(G))
for (source, target) in [('chaos', 'order'),
('nodes', 'graph'),
('pound', 'marks')]:
print("Shortest path between %s and %s is" % (source, target))
try:
sp = nx.shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
[
"linchienyeh_jaden@outlook.com"
] |
linchienyeh_jaden@outlook.com
|
b0a123cf6c2621279d5a96f58ca86274001fbb83
|
2d4380518d9c591b6b6c09ea51e28a34381fc80c
|
/CIM16/CDPSM/Geographical/IEC61970/Wires/LoadBreakSwitch.py
|
f2ffb57bbe01797bc0ff4c7e1a391185331dd6af
|
[
"MIT"
] |
permissive
|
fran-jo/PyCIM
|
355e36ae14d1b64b01e752c5acd5395bf88cd949
|
de942633d966bdf2bd76d680ecb20517fc873281
|
refs/heads/master
| 2021-01-20T03:00:41.186556
| 2017-09-19T14:15:33
| 2017-09-19T14:15:33
| 89,480,767
| 0
| 1
| null | 2017-04-26T12:57:44
| 2017-04-26T12:57:44
| null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.CDPSM.Geographical.IEC61970.Core.ConductingEquipment import ConductingEquipment
class LoadBreakSwitch(ConductingEquipment):
"""A mechanical switching device capable of making, carrying, and breaking currents under normal operating conditions.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'LoadBreakSwitch' instance.
"""
super(LoadBreakSwitch, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
|
[
"fran_jo@hotmail.com"
] |
fran_jo@hotmail.com
|
821885c333f39e248c3b4d48680946323bb48106
|
0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664
|
/myapi/fullfeblog/webdev/urls.py
|
975db698ce1d8db571ad43d6fed54ff0e576c3a5
|
[] |
no_license
|
mrpal39/ev_code
|
6c56b1a4412503604260b3346a04ef53a2ba8bf2
|
ffa0cf482fa8604b2121957b7b1d68ba63b89522
|
refs/heads/master
| 2023-03-24T03:43:56.778039
| 2021-03-08T17:48:39
| 2021-03-08T17:48:39
| 345,743,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
from django.conf.urls import url, include
# from .. import core
sitemaps={
'posts':PostSitemap,
}
urlpatterns = [
path('admin/', admin.site.urls, ),
path('',include('blog.urls')),
path('core/',include('core.urls')),
path('api/',include('api.urls')),
# path('oauth/',include('oauth.urls')),
path('accounts/', include('allauth.urls')),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rp9545416@gmail.com"
] |
rp9545416@gmail.com
|
ae78afbd5d7364b1b0f03645d1d1d6ca47cd0ec9
|
d6716aade568d34adc4152aa83be2b19df30b58f
|
/yt_pb71_cs/str_repr_diff.py
|
ee2311e00ec53b10a27d2e89e63b07b6c7c48594
|
[] |
no_license
|
salma-shaik/python-projects
|
a47e7ba79284b6ae9d3cf9489f1d21c12d573ce5
|
ba0234844e1ad938271486ec8c0aac0954326ad5
|
refs/heads/master
| 2021-01-19T22:46:46.433144
| 2018-09-03T19:43:42
| 2018-09-03T19:43:42
| 88,865,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# The goal of __repr__ is to be unambiguous
# The goal of __str__ is to be readable
# a = [1, 2, 3, 4]
# b = 'sample string'
#
# print(str(a))
# print(repr(a))
#
# print(str(b))
# print(repr(b))
import datetime
import pytz
a = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
b = str(a)
print('str(a): {}'.format(str(a)))
print('repr(a): {}'.format(repr(a)))
print()
print('str(b): {}'.format(str(b)))
print('repr(b): {}'.format(repr(b)))
print()
c = 3+2
d = str(c)
print('str(c): {}'.format(str(c)))
print('repr(c): {}'.format(repr(c)))
print()
print('str(d): {}'.format(str(d)))
print('repr(d): {}'.format(repr(d)))
|
[
"salmashaik1611@gmail.com"
] |
salmashaik1611@gmail.com
|
f805830a20c2ba67d1a4bd9b2b0a978cc9522401
|
8f736b5cc28cc1d46506abf1b001eb41cc1f9423
|
/apps/users/signals.py
|
77ba064c53a024e746d0ffa4db5b81f43702a965
|
[] |
no_license
|
tang1323/MxShop
|
6ac68502f59ae07b483b6145e1b557399192e3dd
|
831b5bdd8abdf7d6e547b0bd3fff9341261e4afa
|
refs/heads/master
| 2023-04-04T07:09:32.759476
| 2021-04-14T14:36:00
| 2021-04-14T14:36:00
| 357,937,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth import get_user_model
# 返回此项目的用户模型(model),这是内置的
User = get_user_model()
# 这是一个装饰器,而sender就是接收哪个models传递过来的
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs): # created=False是不是新建的用户
# 新创建用户的时候才能对密码加密
if created:
password = instance.password
# instance翻译过来就是实例,而instance就是我们的User,里面有一个set_password方法,专门对密码加密的
instance.set_password(password)
# 做完信号量以后得到app.py里做一个def ready(self): import users.signals
instance.save()
# 因为我们用民jwt。所以不再用token
# Token.objects.create(user=instance)
"""
使用django自己内置的 Model signals信号,它会自己发送
如果设置其它的信号,就要自己去发送,再接收
"""
|
[
"1171242903@qq.com"
] |
1171242903@qq.com
|
88e07364d82188f3d72bd929b38d3ef1008e5d7c
|
8b39393897cd4cdf47d6520607aac094ec535779
|
/workflows/tests/cosmo/celery.py
|
635c70b01d5db27cf9013967a775b89b3b12fea8
|
[] |
no_license
|
yishaibeeri/cosmo-manager
|
d4debde3ff20e82b70514aea6991c7fd036f6bf3
|
9bc2b80cc360098939d6a6efe726d5df59f6982a
|
refs/heads/master
| 2021-01-21T18:21:29.711024
| 2014-01-13T09:13:37
| 2014-01-13T09:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from celery import Celery
from cosmo import includes
from celery.signals import after_setup_task_logger
import logging
__author__ = 'idanmo'
celery = Celery('cosmo.celery',
broker='amqp://',
backend='amqp://',
include=includes)
# Optional configuration, see the application user guide.
celery.conf.update(
CELERY_TASK_SERIALIZER="json",
CELERY_DEFAULT_QUEUE="cloudify.management"
)
@after_setup_task_logger.connect
def setup_logger(loglevel=None, **kwargs):
logger = logging.getLogger("cosmo")
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('| %(message)s'))
logger.addHandler(handler)
logger.setLevel(loglevel)
logger.propagate = True
if __name__ == '__main__':
celery.start()
|
[
"idan@gigaspaces.com"
] |
idan@gigaspaces.com
|
62c86946bd35096e59efdd673f88673fc50b9f53
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp/sblp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=80/params.py
|
805a7c663cc5294f124978200a747eb82a7c714a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.527714',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 80,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
fcb4615ea3d1c2f22d029cad3e4f73a185db48ae
|
3152fd9ec9ccd83b6e0d2ea40aa36a4b145aea2e
|
/temp/test_打印正方形.py
|
69d148e2251622402e2f8c77af081ab51a935b17
|
[] |
no_license
|
derekduan1028/hm_python
|
cf1b6037ac1cde8dcac393453a291c39b5a936c2
|
ae79f817a55d1b3bfdbdf1b50d5147946c8b7401
|
refs/heads/master
| 2023-01-21T00:33:00.927709
| 2020-11-27T00:00:35
| 2020-11-27T00:00:35
| 291,869,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/python
# coding:utf-8
"""
@author:derek
@contract:derek_duan@sina.com
@file: test_打印正方形.py
@time: 11/18/20 4:45 PM
"""
def print_lines(str1, str2, wide):
print(str1, end=" ")
for i in range(wide):
print(str2, end=" ")
print(str1, end=" ")
for i in range(wide):
print(str2, end=" ")
print(str1, end=" ")
a = 10
for j in range(0, 11):
if j % 5 == 0:
print_lines("+", "-", a)
print("\t")
else:
print_lines("|", " ", a)
print("\t")
|
[
"derek@Derek-Mbp"
] |
derek@Derek-Mbp
|
06dca00fa6a330d2a68438a2972b67d9f16a64a1
|
426521e1689f70732222efd5f98675014e361964
|
/youtube_dl/extractor/afreecatv.py
|
518c61f67eb0befa0ce59fb393d10d8ebd4dcc03
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
DalavanCloud/youtube-dl
|
8b6f34e8e8dc296df6ee7c12fdf91688092f2df7
|
c8f45f763cac3c0d0e4ca35ba072d8d321957e85
|
refs/heads/master
| 2020-04-13T06:36:38.023940
| 2016-09-27T16:03:00
| 2016-09-27T16:03:00
| 163,026,015
| 1
| 0
|
Unlicense
| 2018-12-24T22:04:42
| 2018-12-24T22:04:42
| null |
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
xpath_element,
xpath_text,
)
class AfreecaTVIE(InfoExtractor):
IE_DESC = 'afreecatv.com'
_VALID_URL = r'''(?x)^
https?://(?:(live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
(?:
/app/(?:index|read_ucc_bbs)\.cgi|
/player/[Pp]layer\.(?:swf|html))
\?.*?\bnTitleNo=(?P<id>\d+)'''
_TESTS = [{
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
'info_dict': {
'id': '36164052',
'ext': 'mp4',
'title': '데일리 에이프릴 요정들의 시상식!',
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'upload_date': '20160503',
}
}, {
'url': 'http://afbbs.afreecatv.com:8080/app/read_ucc_bbs.cgi?nStationNo=16711924&nTitleNo=36153164&szBjId=dailyapril&nBbsNo=18605867',
'info_dict': {
'id': '36153164',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
},
'playlist_count': 2,
'playlist': [{
'md5': 'd8b7c174568da61d774ef0203159bf97',
'info_dict': {
'id': '36153164_1',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}, {
'md5': '58f2ce7f6044e34439ab2d50612ab02b',
'info_dict': {
'id': '36153164_2',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}],
}, {
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
'only_matching': True,
}]
@staticmethod
def parse_video_key(key):
video_key = {}
m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key)
if m:
video_key['upload_date'] = m.group('upload_date')
video_key['part'] = m.group('part')
return video_key
def _real_extract(self, url):
video_id = self._match_id(url)
parsed_url = compat_urllib_parse_urlparse(url)
info_url = compat_urlparse.urlunparse(parsed_url._replace(
netloc='afbbs.afreecatv.com:8080',
path='/api/video/get_video_info.php'))
video_xml = self._download_xml(info_url, video_id)
if xpath_element(video_xml, './track/video/file') is None:
raise ExtractorError('Specified AfreecaTV video does not exist',
expected=True)
title = xpath_text(video_xml, './track/title', 'title')
uploader = xpath_text(video_xml, './track/nickname', 'uploader')
uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id')
duration = int_or_none(xpath_text(video_xml, './track/duration',
'duration'))
thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail')
entries = []
for i, video_file in enumerate(video_xml.findall('./track/video/file')):
video_key = self.parse_video_key(video_file.get('key', ''))
if not video_key:
continue
entries.append({
'id': '%s_%s' % (video_id, video_key.get('part', i + 1)),
'title': title,
'upload_date': video_key.get('upload_date'),
'duration': int_or_none(video_file.get('duration')),
'url': video_file.text,
})
info = {
'id': video_id,
'title': title,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'thumbnail': thumbnail,
}
if len(entries) > 1:
info['_type'] = 'multi_video'
info['entries'] = entries
elif len(entries) == 1:
info['url'] = entries[0]['url']
info['upload_date'] = entries[0].get('upload_date')
else:
raise ExtractorError(
'No files found for the specified AfreecaTV video, either'
' the URL is incorrect or the video has been made private.',
expected=True)
return info
|
[
"peter@pmrowla.com"
] |
peter@pmrowla.com
|
4fe49b4538b78e6aef54b68010f7bf3670fe30d9
|
8a53b6e78ee6bc66bbf83d78fedef20e44e40809
|
/braceyourselfassignmentsarecoming/sudoku.py
|
00a3b61d94dbf5681019330623f67e0699604bba
|
[] |
no_license
|
chintanbetrabet/ChessAI
|
cc5c6dfa91c0ba5a0b6de1cc705092cf996bcdcb
|
3d9ebd96330623ab48f7f758cc8ad3b61eb79d55
|
refs/heads/master
| 2021-07-02T05:57:58.524808
| 2017-09-22T09:32:50
| 2017-09-22T09:32:50
| 104,456,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,450
|
py
|
import copy
import os
import time
class Square():
def __init__(self,show,row,col,legal):
self.show=show
self.row=row
self.col=col
self.legal=copy.deepcopy(legal)
def update(self):
if len(self.legal)==1:
self.show=str(self.legal[0])
else :
if self.show!='.':
self.legal=self.legal[0:0]
self.legal.append(int(self.show))
class Sudoku():
def __init__(self,fil):
self.Puzzle=[]
pos=0
univ_legal=[1,2,3,4,5,6,7,8,9]
f=open(fil,'r')
for i in range(9):
line=f.readline()
for j in range(9):
if line[j]=='.':
legal=copy.deepcopy(univ_legal)
else:
legal=[line[j]]
add=Square(line[j],i,j,legal)
self.Puzzle.append(add)
def __init__(self,string):
self.Puzzle=[]
pos=0
univ_legal=[1,2,3,4,5,6,7,8,9]
line=string.split()
for i in range(9):
for j in range(9):
if line[9*i+j]=='.':
legal=copy.deepcopy(univ_legal)
else:
legal=[line[i*9+j]]
add=Square(line[i*9+j],i,j,legal)
self.Puzzle.append(add)
def ret_box(self,i,j):
start_i=i-i%3
end_i=start_i+2
start_j=j-j%3
end_j=start_j+2
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_col(self,i,j):
start_i=0
end_i=8
start_j=j
end_j=j
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_row(self,i,j):
start_i=i
end_i=i
start_j=0
end_j=8
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_lis(self,i,i1,j,j1):
start_i=i
start_j=j
lis=[]
while i<=i1:
j=start_j
while j<=j1:
if self.Puzzle[9*i+j].show!='.' and self.Puzzle[9*i+j].show!='0' :
lis.append(int(self.Puzzle[9*i+j].show))
j+=1
i+=1
return lis
def upgrade(self,pos):
if self.Puzzle[pos].show=='.' or self.Puzzle[pos].show=='0' :
lis=self.ret_col(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis:
self.Puzzle[pos].legal.remove(int(x))
lis=self.ret_row(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis and len(lis)>1:
#x=int(x)
self.Puzzle[pos].legal.remove(int(x))
lis=self.ret_box(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis and len(lis)>1:
self.Puzzle[pos].legal.remove(int(x))
self.Puzzle[pos].update()
def do_move(self,pos):
if self.Puzzle[pos].show=='.' or self.Puzzle[pos].show=='0':
self.move_col(pos/9,pos%9)
self.move_row(pos/9,pos%9)
self.move_box(pos/9,pos%9)
self.Puzzle[pos].update()
def print_legal_on_demand(self,i,i1,j,j1):
start_j=j
while i<=i1:
j=start_j
while j<=j1:
print self.Puzzle[9*i+j].legal
j+=1
i+=1
def show_puz(self):
pos=0
for i in range(9):
print
print ' '.join(self.Puzzle[9*i+j].show for j in range(9))
def show_puz2(self):
pos=0
for i in range(9):
#print
print ' '.join(self.Puzzle[9*i+j].show for j in range(9)),
print
def move_box(self,i,j):
start_i=i-i%3
end_i=start_i+2
start_j=j-j%3
end_j=start_j+2
return self.make_move(start_i,end_i,start_j,end_j)
def move_col(self,i,j):
start_i=0
end_i=8
start_j=j
end_j=j
return self.make_move(start_i,end_i,start_j,end_j)
def move_row(self,i,j):
start_i=i
end_i=i
start_j=0
end_j=8
return self.make_move(start_i,end_i,start_j,end_j)
def make_move(self,i,i1,j,j1):
start_i=i
start_j=j
special=0
for num in range(1,10):
count=0
move_pos=-1
i=start_i
while i<=i1:
j=start_j
while j<=j1:
pos=9*i+j
if len(self.Puzzle[pos].legal)==1 and int(self.Puzzle[pos].show)==num:
count=-100000
if len(self.Puzzle[pos].legal)!=1:
for x in self.Puzzle[pos].legal:
if int(x)==num:
if count==0:
move_pos=pos
count+=1
j+=1
i+=1
if count==1 and self.Puzzle[move_pos].show=='.':
self.Puzzle[move_pos].show=str(num)
self.Puzzle[move_pos].level=copy.deepcopy([num])
self.Puzzle[move_pos].update()
for p in range(81):
self.upgrade(p)
#self.show_puz();
#raw_input();
def fil_count(pu):
count=0
for i in range(81):
if pu.Puzzle[i].show!='.':
count+=1
return count
t=input()
while(t>0):
t-=1
x=raw_input()
x1=""
for i in x:
if i =='0':
x1+='.'
else:
x1+=i
#print x1
#t=time.clock()
#sud=Sudoku("sud.txt")
sud=Sudoku(x1)
#sud.show_puz()
last=fil_count(sud)
j=0
while last!=81:
#print "last=%d"%last
for i in range(81):
sud.upgrade(i)
#sud.show_puz()
if j>0 and last==fil_count(sud):
for i in range(81):
sud.do_move(i)
last=fil_count(sud)
j+=1
#t=time.clock()-t1
#print "after time %f"%t
sud.show_puz2()
#raw_input("donne")
|
[
"chintanbetrabet@gmail.com"
] |
chintanbetrabet@gmail.com
|
62204aa625906842ccced44fdf50596c95ec552b
|
de4d88db6ea32d20020c169f734edd4b95c3092d
|
/aiotdlib/api/functions/add_network_statistics.py
|
b94694668600633ed7ff2c3fce21c7ade2450981
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
thiagosm/aiotdlib
|
5cc790a5645f7e4cc61bbd0791433ed182d69062
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
refs/heads/main
| 2023-08-15T05:16:28.436803
| 2021-10-18T20:41:27
| 2021-10-18T20:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
from ..types import NetworkStatisticsEntry
class AddNetworkStatistics(BaseObject):
"""
Adds the specified data to data usage statistics. Can be called before authorization
:param entry: The network statistics entry with the data to be added to statistics
:type entry: :class:`NetworkStatisticsEntry`
"""
ID: str = Field("addNetworkStatistics", alias="@type")
entry: NetworkStatisticsEntry
@staticmethod
def read(q: dict) -> AddNetworkStatistics:
return AddNetworkStatistics.construct(**q)
|
[
"pylakey@protonmail.com"
] |
pylakey@protonmail.com
|
b77c2a2d23ce3a8fe3f2d0a6917cb7d07d0047ff
|
835e3bd2afd6ea11ef4c42b0155e78d1a37ac13e
|
/courses/machine_learning/asl/open_project/cloud_composer_automated_ml_pipeline_taxifare/airflow/dags/taxifare_multi.py
|
0c384a3e3f87199089cf46cf1ce6037ae8f9a271
|
[
"Apache-2.0"
] |
permissive
|
kartik-nighania/training-data-analyst
|
e1e5ae96d3d506894a4a8692d979f54baf14b1eb
|
ba1dea1646ef0f5197a89e32d8c0bb158897a5b4
|
refs/heads/master
| 2020-07-26T11:57:39.811468
| 2019-09-15T19:14:12
| 2019-09-15T19:14:12
| 208,635,891
| 1
| 1
|
Apache-2.0
| 2019-09-15T18:09:21
| 2019-09-15T18:09:21
| null |
UTF-8
|
Python
| false
| false
| 16,014
|
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAG definition for taxifare automated pipeline."""
import airflow
from airflow import DAG
# Reference for all available airflow operators:
# https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators
from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator, MLEngineModelOperator, MLEngineVersionOperator
from airflow.models import TaskInstance
import datetime
import logging
def _get_project_id():
"""Get project ID from default GCP connection."""
extras = BaseHook.get_connection("google_cloud_default").extra_dejson
key = "extra__google_cloud_platform__project"
if key in extras:
project_id = extras[key]
else:
raise ("Must configure project_id in google_cloud_default "
"connection from Airflow Console")
return project_id
PROJECT_ID = _get_project_id()
# Data set constants, used in BigQuery tasks. You can change these
# to conform to your data.
# Specify your source BigQuery project, dataset, and table names
SOURCE_BQ_PROJECT = "nyc-tlc"
SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",")
# Specify your destination BigQuery dataset
DESTINATION_DATASET = "taxifare"
# GCS bucket names and region, can also be changed.
BUCKET = "gs://" + PROJECT_ID + "-bucket"
REGION = "us-east1"
# # The code package name comes from the model code in the wals_ml_engine
# # directory of the solution code base.
PACKAGE_URI = BUCKET + "/taxifare/code/taxifare-0.1.tar.gz"
JOB_DIR = BUCKET + "/jobs"
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": airflow.utils.dates.days_ago(2),
"email": ["airflow@example.com"],
"email_on_failure": True,
"email_on_retry": False,
"retries": 5,
"retry_delay": datetime.timedelta(minutes=5)
}
# Default schedule interval using cronjob syntax - can be customized here
# or in the Airflow console.
# Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm)
# Reference: https://airflow.apache.org/scheduler.html
schedule_interval = "00 21 * * *"
# Title your DAG
dag = DAG(
"taxifare_multi",
default_args=default_args,
schedule_interval=None
)
dag.doc_md = __doc__
#
#
# Task Definition
#
#
for model in SOURCE_DATASET_TABLE_NAMES:
# BigQuery data query
bql="""
SELECT
(tolls_amount + fare_amount) AS fare_amount,
EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek,
EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key
FROM
`{0}.{1}`
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
AND rand() < 0.00001
"""
bql = bql.format(SOURCE_BQ_PROJECT, model)
bql_train = "SELECT * EXCEPT (key) FROM({0}) WHERE MOD(ABS(FARM_FINGERPRINT(key)), 5) < 4".format(bql)
bql_eval = "SELECT * EXCEPT (key) FROM({0}) WHERE MOD(ABS(FARM_FINGERPRINT(key)), 5) = 4".format(bql)
# Complete the BigQueryOperator task to truncate the table if it already exists before writing
# Reference: https://airflow.apache.org/integration.html#bigqueryoperator
bq_train_data_op = BigQueryOperator(
task_id="bq_train_data_{}_task".format(model.replace(".","_")),
bql=bql_train,
destination_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")),
write_disposition="WRITE_TRUNCATE", # specify to truncate on writes
use_legacy_sql=False,
dag=dag
)
bq_eval_data_op = BigQueryOperator(
task_id="bq_eval_data_{}_task".format(model.replace(".","_")),
bql=bql_eval,
destination_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")),
write_disposition="WRITE_TRUNCATE", # specify to truncate on writes
use_legacy_sql=False,
dag=dag
)
sql = """
SELECT
COUNT(*)
FROM
[{0}:{1}.{2}]
"""
# Check to make sure that the data tables won"t be empty
bq_check_train_data_op = BigQueryCheckOperator(
task_id="bq_check_train_data_{}_task".format(model.replace(".","_")),
sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_train_data"),
dag=dag
)
bq_check_eval_data_op = BigQueryCheckOperator(
task_id="bq_check_eval_data_{}_task".format(model.replace(".","_")),
sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_eval_data"),
dag=dag
)
# BigQuery training data export to GCS
bash_remove_old_data_op = BashOperator(
task_id="bash_remove_old_data_{}_task".format(model.replace(".","_")),
bash_command="if gsutil ls {0}/taxifare/data/{1} 2> /dev/null; then gsutil -m rm -rf {0}/taxifare/data/{1}/*; else true; fi".format(BUCKET, model.replace(".","_")),
dag=dag
)
# Takes a BigQuery dataset and table as input and exports it to GCS as a CSV
train_files = BUCKET + "/taxifare/data/"
bq_export_gcs_train_csv_op = BigQueryToCloudStorageOperator(
task_id="bq_export_gcs_train_csv_{}_task".format(model.replace(".","_")),
source_project_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")),
destination_cloud_storage_uris=[train_files + "{}/train-*.csv".format(model.replace(".","_"))],
export_format="CSV",
print_header=False,
dag=dag
)
eval_files = BUCKET + "/taxifare/data/"
bq_export_gcs_eval_csv_op = BigQueryToCloudStorageOperator(
task_id="bq_export_gcs_eval_csv_{}_task".format(model.replace(".","_")),
source_project_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")),
destination_cloud_storage_uris=[eval_files + "{}/eval-*.csv".format(model.replace(".","_"))],
export_format="CSV",
print_header=False,
dag=dag
)
# ML Engine training job
job_id = "taxifare_{}_{}".format(model.replace(".","_"), datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
output_dir = BUCKET + "/taxifare/trained_model/{}".format(model.replace(".","_"))
job_dir = JOB_DIR + "/" + job_id
training_args = [
"--job-dir", job_dir,
"--train_data_paths", train_files,
"--eval_data_paths", eval_files,
"--output_dir", output_dir,
"--train_steps", str(500),
"--train_batch_size", str(32),
"--eval_steps", str(500),
"--eval_batch_size", str(32),
"--nbuckets", str(8),
"--hidden_units", "128,32,4"
]
# Reference: https://airflow.apache.org/integration.html#cloud-ml-engine
ml_engine_training_op = MLEngineTrainingOperator(
task_id="ml_engine_training_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
job_id=job_id,
package_uris=[PACKAGE_URI],
training_python_module="trainer.task",
training_args=training_args,
region=REGION,
scale_tier="BASIC",
runtime_version="1.13",
python_version="3.5",
dag=dag
)
MODEL_NAME = "taxifare_"
MODEL_VERSION = "v1"
MODEL_LOCATION = BUCKET + "/taxifare/saved_model/"
bash_remove_old_saved_model_op = BashOperator(
task_id="bash_remove_old_saved_model_{}_task".format(model.replace(".","_")),
bash_command="if gsutil ls {0} 2> /dev/null; then gsutil -m rm -rf {0}/*; else true; fi".format(MODEL_LOCATION + model.replace(".","_")),
dag=dag
)
bash_copy_new_saved_model_op = BashOperator(
task_id="bash_copy_new_saved_model_{}_task".format(model.replace(".","_")),
bash_command="gsutil -m rsync -d -r `gsutil ls {0}/export/exporter/ | tail -1` {1}".format(output_dir, MODEL_LOCATION + model.replace(".","_")),
dag=dag
)
# Create model on ML-Engine
bash_ml_engine_models_list_op = BashOperator(
task_id="bash_ml_engine_models_list_{}_task".format(model.replace(".","_")),
xcom_push=True,
bash_command="gcloud ml-engine models list --filter='name:{0}'".format(MODEL_NAME + model.replace(".","_")),
dag=dag
)
def check_if_model_already_exists(templates_dict, **kwargs):
cur_model = templates_dict["model"].replace(".","_")
ml_engine_models_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_models_list_{}_task".format(cur_model))
logging.info("check_if_model_already_exists: {}: ml_engine_models_list = \n{}".format(cur_model, ml_engine_models_list))
create_model_task = "ml_engine_create_model_{}_task".format(cur_model)
dont_create_model_task = "dont_create_model_dummy_branch_{}_task".format(cur_model)
if len(ml_engine_models_list) == 0 or ml_engine_models_list == "Listed 0 items.":
return create_model_task
return dont_create_model_task
check_if_model_already_exists_op = BranchPythonOperator(
task_id="check_if_model_already_exists_{}_task".format(model.replace(".","_")),
templates_dict={"model": model.replace(".","_")},
python_callable=check_if_model_already_exists,
provide_context=True,
dag=dag
)
ml_engine_create_model_op = MLEngineModelOperator(
task_id="ml_engine_create_model_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
model={"name": MODEL_NAME + model.replace(".","_")},
operation="create",
dag=dag
)
create_model_dummy_op = DummyOperator(
task_id="create_model_dummy_{}_task".format(model.replace(".","_")),
trigger_rule="all_done",
dag=dag
)
dont_create_model_dummy_branch_op = DummyOperator(
task_id="dont_create_model_dummy_branch_{}_task".format(model.replace(".","_")),
dag=dag
)
dont_create_model_dummy_op = DummyOperator(
task_id="dont_create_model_dummy_{}_task".format(model.replace(".","_")),
trigger_rule="all_done",
dag=dag
)
# Create version of model on ML-Engine
bash_ml_engine_versions_list_op = BashOperator(
task_id="bash_ml_engine_versions_list_{}_task".format(model.replace(".","_")),
xcom_push=True,
bash_command="gcloud ml-engine versions list --model {0} --filter='name:{1}'".format(MODEL_NAME + model.replace(".","_"), MODEL_VERSION),
dag=dag
)
def check_if_model_version_already_exists(templates_dict, **kwargs):
cur_model = templates_dict["model"].replace(".","_")
ml_engine_versions_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_versions_list_{}_task".format(cur_model))
logging.info("check_if_model_version_already_exists: {}: ml_engine_versions_list = \n{}".format(cur_model, ml_engine_versions_list))
create_version_task = "ml_engine_create_version_{}_task".format(cur_model)
create_other_version_task = "ml_engine_create_other_version_{}_task".format(cur_model)
if len(ml_engine_versions_list) == 0 or ml_engine_versions_list == "Listed 0 items.":
return create_version_task
return create_other_version_task
check_if_model_version_already_exists_op = BranchPythonOperator(
task_id="check_if_model_version_already_exists_{}_task".format(model.replace(".","_")),
templates_dict={"model": model.replace(".","_")},
python_callable=check_if_model_version_already_exists,
provide_context=True,
dag=dag
)
OTHER_VERSION_NAME = "v_{0}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S")[0:12])
ml_engine_create_version_op = MLEngineVersionOperator(
task_id="ml_engine_create_version_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
model_name=MODEL_NAME + model.replace(".","_"),
version_name=MODEL_VERSION,
version={
"name": MODEL_VERSION,
"deploymentUri": MODEL_LOCATION + model.replace(".","_"),
"runtimeVersion": "1.13",
"framework": "TENSORFLOW",
"pythonVersion": "3.5",
},
operation="create",
dag=dag
)
ml_engine_create_other_version_op = MLEngineVersionOperator(
task_id="ml_engine_create_other_version_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
model_name=MODEL_NAME + model.replace(".","_"),
version_name=OTHER_VERSION_NAME,
version={
"name": OTHER_VERSION_NAME,
"deploymentUri": MODEL_LOCATION + model.replace(".","_"),
"runtimeVersion": "1.13",
"framework": "TENSORFLOW",
"pythonVersion": "3.5",
},
operation="create",
dag=dag
)
ml_engine_set_default_version_op = MLEngineVersionOperator(
task_id="ml_engine_set_default_version_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
model_name=MODEL_NAME + model.replace(".","_"),
version_name=MODEL_VERSION,
version={"name": MODEL_VERSION},
operation="set_default",
dag=dag
)
ml_engine_set_default_other_version_op = MLEngineVersionOperator(
task_id="ml_engine_set_default_other_version_{}_task".format(model.replace(".","_")),
project_id=PROJECT_ID,
model_name=MODEL_NAME + model.replace(".","_"),
version_name=OTHER_VERSION_NAME,
version={"name": OTHER_VERSION_NAME},
operation="set_default",
dag=dag
)
# Build dependency graph, set_upstream dependencies for all tasks
bq_check_train_data_op.set_upstream(bq_train_data_op)
bq_check_eval_data_op.set_upstream(bq_eval_data_op)
bash_remove_old_data_op.set_upstream([bq_check_train_data_op, bq_check_eval_data_op])
bq_export_gcs_train_csv_op.set_upstream([bash_remove_old_data_op])
bq_export_gcs_eval_csv_op.set_upstream([bash_remove_old_data_op])
ml_engine_training_op.set_upstream([bq_export_gcs_train_csv_op, bq_export_gcs_eval_csv_op])
bash_remove_old_saved_model_op.set_upstream(ml_engine_training_op)
bash_copy_new_saved_model_op.set_upstream(bash_remove_old_saved_model_op)
bash_ml_engine_models_list_op.set_upstream(ml_engine_training_op)
check_if_model_already_exists_op.set_upstream(bash_ml_engine_models_list_op)
ml_engine_create_model_op.set_upstream(check_if_model_already_exists_op)
create_model_dummy_op.set_upstream(ml_engine_create_model_op)
dont_create_model_dummy_branch_op.set_upstream(check_if_model_already_exists_op)
dont_create_model_dummy_op.set_upstream(dont_create_model_dummy_branch_op)
bash_ml_engine_versions_list_op.set_upstream([dont_create_model_dummy_op, create_model_dummy_op])
check_if_model_version_already_exists_op.set_upstream(bash_ml_engine_versions_list_op)
ml_engine_create_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op])
ml_engine_create_other_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op])
ml_engine_set_default_version_op.set_upstream(ml_engine_create_version_op)
ml_engine_set_default_other_version_op.set_upstream(ml_engine_create_other_version_op)
|
[
"ryangillard@google.com"
] |
ryangillard@google.com
|
cdf83d46c866bb0fd896c3d3359f95a1100fee01
|
4018ede0bb90d621a1002073529304d942ba4322
|
/backend/vehicle/migrations/0001_initial.py
|
41e9f437055310feb82188c190808790ea8ccfd8
|
[] |
no_license
|
crowdbotics-apps/uber-19759
|
82ee6d2cd616c1fa699d426f85a964af40b4cb44
|
17331bfcdfc29a20c2d986e796df2db88a2b5ed1
|
refs/heads/master
| 2022-12-27T05:29:15.759136
| 2020-10-05T01:17:32
| 2020-10-05T01:17:32
| 289,784,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
# Generated by Django 2.2.15 on 2020-08-23 23:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VehicleType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('base_rate', models.FloatField()),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_description', models.CharField(max_length=255)),
('plate_number', models.CharField(max_length=10)),
('timestamp_registered', models.DateTimeField(auto_now_add=True)),
('is_on_duty', models.BooleanField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_driver', to='taxi_profile.DriverProfile')),
('vehicle_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_vehicle_type', to='vehicle.VehicleType')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9c15d394fd6b99aa3d6bc0d05671cbe053dda4a1
|
4c43fb0220bc0c12e8fa21f8cca2618d64b03425
|
/lab11/OrderRecordServicePS_RS/message_puller.py
|
5518536f8a9d4b8f8d9469421c1eac6611052f57
|
[] |
no_license
|
dayanach/IS
|
fddf0a8b95e6535ca9222ebfd535dc01f581d3bd
|
d5bab2729a5a6fd03280a62cc0132e7f9d72ba37
|
refs/heads/master
| 2022-05-26T09:46:56.543883
| 2020-05-02T16:18:18
| 2020-05-02T16:18:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
import json
from threading import Thread
import pika
import requests
def callback(ch, method, props, body):
print(" [x] Received %r" % body)
payload = json.loads(body.decode('utf-8'))
msg = requests.post("http://127.0.0.1:5002/orders/", json=payload)
connection = pika.BlockingConnection(pika.ConnectionParameters('104.198.35.199'))
channel = connection.channel()
channel.queue_declare(queue=props.reply_to)
channel.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id= \
props.correlation_id),
body=msg.content)
channel.basic_ack(delivery_tag=method.delivery_tag)
connection.close()
def pull_message():
connection = pika.BlockingConnection(pika.ConnectionParameters('104.198.35.199'))
channel = connection.channel()
channel.exchange_declare(exchange='order', exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='order', queue=queue_name, routing_key="order.create.*.*")
print(' [*] Waiting for messages. To exit press CTRL+C ' + queue_name)
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
channel.start_consuming()
class MessagePuller(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
while True:
pull_message()
|
[
"ipkumarawd@yahoo.com"
] |
ipkumarawd@yahoo.com
|
90f371715ee021773b7ca9da6fec0febc3eafcbc
|
36e3d735e06d0642f1e8c26bff57305a01cc627c
|
/apClient/net_data/migrations/0007_auto_20160428_0551.py
|
1ab204fe96c50f574cb4f7620bc8e1d1f6d23978
|
[] |
no_license
|
WilsonWangTHU/ipv6_server
|
5c768cdaeaf22ee508c5fff162b208481a42f95d
|
5088f58ab25061e65127699ed328ddaab24f9aac
|
refs/heads/master
| 2021-01-18T21:18:39.653994
| 2016-05-27T04:22:23
| 2016-05-27T04:22:23
| 55,656,523
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-28 05:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('net_data', '0006_auto_20160427_0806'),
]
operations = [
migrations.RemoveField(
model_name='long_term_dataset',
name='data_set',
),
migrations.RemoveField(
model_name='configuration',
name='long_term_sample_period',
),
migrations.RemoveField(
model_name='configuration',
name='long_term_volumn',
),
migrations.RemoveField(
model_name='cpu_data',
name='cpu_kernel',
),
migrations.AddField(
model_name='configuration',
name='heart_beat_sample_period',
field=models.IntegerField(default=600),
),
migrations.AlterField(
model_name='configuration',
name='short_term_sample_period',
field=models.IntegerField(default=60),
),
migrations.AlterField(
model_name='configuration',
name='short_term_volumn',
field=models.IntegerField(default=200),
),
migrations.DeleteModel(
name='long_term_dataset',
),
]
|
[
"wode406@hotmail.com"
] |
wode406@hotmail.com
|
cf36a196e55d8f95b64a061d2c6f6a26228ef58f
|
3f0b90fd8d81cbc544f6e80a7ed0c254ff71a199
|
/PyTorch/nlp/transformer/fairseq/fairseq/options.py
|
f31b209ab284887a6bb0b6ace4b8b538debc54ed
|
[
"MIT"
] |
permissive
|
omrialmog/Model-References
|
f25b7c432c36795dcffa6e54fb93bd07d6decffc
|
45922211087567baada46f6356dd8bd00f2faff8
|
refs/heads/master
| 2023-08-16T03:47:39.674059
| 2021-10-26T23:47:54
| 2021-10-26T23:47:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,491
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from typing import Callable, List, Optional, Union
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
args.use_habana = getattr(args, "use_habana", False)
if args.bf16:
if not args.use_habana:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
group.add_argument("--dict-only", action='store_true',
help="if true, only builds a dictionary and then exits")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
def get_args(
data: Union[str, Path],
task: str = "translation",
arch: str = "transformer",
**overrides
):
parser = get_training_parser(task)
args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch])
for k, v in overrides.items():
setattr(args, k, v)
return args
|
[
"mpandit@habana.ai"
] |
mpandit@habana.ai
|
e9b16c80258ae328972b1a66f590751c25508eb0
|
272aff93c6f399cd834835970891696e605a1e31
|
/dsp_ws/build/hector_navigation/hector_costmap/catkin_generated/pkg.installspace.context.pc.py
|
9203dd9dc86536536a7e49eceaa03f0ccbd62473
|
[] |
no_license
|
dingjianfeng/dsp_ding2
|
18c99958a022d2e2fae3aa5888fd07fa279568d6
|
a3327a1db4635865a07390023c5cc2932456b367
|
refs/heads/master
| 2020-05-02T12:41:26.516325
| 2019-03-27T10:10:26
| 2019-03-27T10:10:26
| 177,964,602
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/dsp/dsp_ws/install/include".split(';') if "/home/dsp/dsp_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_costmap"
PROJECT_SPACE_DIR = "/home/dsp/dsp_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"dingjianfeng"
] |
dingjianfeng
|
f21b83ad722ceb1b046532aaa66c353ff2d81b99
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Eltex/DSLAM/get_ifindexes.py
|
1daf655961d379978c1d13de48ccb37a501d5fc5
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
# ---------------------------------------------------------------------
# Generic.get_ifindexes
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetifindexes import IGetIfindexes
from noc.core.mib import mib
class Script(BaseScript):
name = "Eltex.DSLAM.get_ifindexes"
interface = IGetIfindexes
cache = True
def execute_snmp(self):
r = {}
if self.is_platform_MXA24:
o = "1.3.6.1.4.1.34300.1.6"
ooid = "%s.15.2.1.2" % o
aoid = "%s.10.2.1.2" % o
for oid, name in self.snmp.getnext(aoid, max_retries=8):
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
for oid, name in self.snmp.getnext(ooid, max_retries=8):
if " " in name:
name = name.split()[2]
if name.startswith("p"):
name = "s%s" % name
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
else:
if self.is_platform_MXA32:
o = "1.3.6.1.4.1.35265.1.28"
else:
o = "1.3.6.1.4.1.35265.1.33"
aoid = "%s.10.2.1.2" % o
for oid, name in self.snmp.getnext(mib["IF-MIB::ifDescr"], max_retries=8):
if name.startswith("p"):
name = "s%s" % name
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
for oid, name in self.snmp.getnext(aoid, max_retries=8):
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
return r
|
[
"sysfar@gmail.com"
] |
sysfar@gmail.com
|
be6efdac7b8c19e02de4aae801e0423401d88808
|
bef93432b7745ba5492f11e709e47a5a372590f0
|
/modules/dxtbx/format/FormatCBFMiniPilatusXXX.py
|
10e642a2bb5e1094c4d7b1d60ad3896600e1a1e7
|
[
"BSD-3-Clause"
] |
permissive
|
BlenderCN-Org/dials-dev20190819
|
939378744d546692e3de33d106a1b5218a584c2a
|
1b719b88a1642c13a5a8d488addbb215d0fa290c
|
refs/heads/master
| 2020-07-19T17:00:06.944870
| 2019-08-19T21:36:25
| 2019-08-19T21:36:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
# Copyright (C) 2014 Diamond Light Source, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# An implementation of the CBF image reader for Pilatus images, from the Pilatus
# 6M SN 100 currently on Diamond I04.
from __future__ import absolute_import, division, print_function
from dxtbx.format.FormatCBFMiniPilatus import FormatCBFMiniPilatus
class FormatCBFMiniPilatusXXX(FormatCBFMiniPilatus):
"""A class for reading mini CBF format Pilatus images for 6M SN XXX."""
@staticmethod
def understand(image_file):
"""Check to see if this looks like an Pilatus mini CBF format image,
i.e. we can make sense of it."""
header = FormatCBFMiniPilatus.get_cbf_header(image_file)
for record in header.split("\n"):
if (
"# Detector" in record
and "PILATUS" in record
and "S/N XX-XXX" in header
):
return True
return False
def __init__(self, image_file, **kwargs):
"""Initialise the image structure from the given file, including a
proper model of the experiment."""
from dxtbx import IncorrectFormatError
if not self.understand(image_file):
raise IncorrectFormatError(self, image_file)
FormatCBFMiniPilatus.__init__(self, image_file, **kwargs)
def _goniometer(self):
"""Return a model for a simple single-axis goniometer. This should
probably be checked against the image header, though for miniCBF
there are limited options for this."""
return self._goniometer_factory.single_axis_reverse()
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
f75d9363d3f58cb2e0d6affc103f3d42ab2fe867
|
340df4cb3e8d07b15ac1d99ebd63cf2150c9a415
|
/zentral/contrib/inventory/utils.py
|
daf58bf7ae1bca65fcae0d8177729d8259c2fe72
|
[
"Apache-2.0"
] |
permissive
|
dekoder/zentral
|
8e7bd2b51932a42ba3106a1d0ead2c43bfc51227
|
54cb7be31f8d8731f3a99bf0bd1c0f167404c58e
|
refs/heads/master
| 2023-01-20T03:43:05.282387
| 2020-11-18T18:55:00
| 2020-11-18T18:55:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,008
|
py
|
from collections import OrderedDict
import csv
from datetime import datetime
from itertools import chain
import logging
import os
import re
import tempfile
import urllib.parse
import zipfile
from django import forms
from django.db import connection
from django.http import QueryDict
from django.utils.text import slugify
from prometheus_client import CollectorRegistry, Gauge
import xlsxwriter
from zentral.core.incidents.models import OPEN_STATUSES, SEVERITY_CHOICES
from zentral.utils.json import save_dead_letter
from .events import (post_enrollment_secret_verification_failure, post_enrollment_secret_verification_success,
post_inventory_events)
from .exceptions import EnrollmentSecretVerificationFailed
from .models import EnrollmentSecret, MachineSnapshotCommit, MetaMachine
logger = logging.getLogger("zentral.contrib.inventory.utils")
class BaseMSFilter:
none_value = "\u2400"
unknown_value = "UNKNOWN"
title = "Untitled"
optional = False
free_input = False
query_kwarg = None
many = False
non_grouping_expression = None
expression = None
grouping_set = None
def __init__(self, idx, query_dict, hidden_value=None):
self.idx = idx
self.query_dict = query_dict
if hidden_value:
self.value = hidden_value
self.hidden = True
else:
self.value = query_dict.get(self.get_query_kwarg())
self.hidden = False
self.grouping_alias = "fg{}".format(idx)
def get_query_kwarg(self):
return self.query_kwarg
def get_expressions(self, grouping=False):
if grouping:
if self.grouping_set:
yield "grouping({}) as {}".format(self.grouping_set[0], self.grouping_alias)
if self.expression:
yield self.expression
elif self.expression:
if not self.many:
yield self.expression
else:
if "as" in self.expression:
expression, alias = self.expression.split(" as ")
expression = "json_agg({})".format(expression)
else:
expression = self.expression
alias = None
yield " as ".join(e for e in (expression, alias) if e)
elif self.non_grouping_expression:
yield self.non_grouping_expression
def get_group_by(self):
if self.many:
return None
elif self.grouping_set:
return self.grouping_set[-1]
elif self.non_grouping_expression:
return self.non_grouping_expression
def joins(self):
return []
def wheres(self):
return []
def where_args(self):
return []
def serialize(self):
return self.get_query_kwarg()
# process grouping results
def filter_grouping_results(self, grouping_results):
for gr in grouping_results:
if gr.get(self.grouping_alias) == 0:
yield gr
def grouping_value_from_grouping_result(self, grouping_result):
if not self.grouping_set:
return
return grouping_result.get(self.grouping_set[-1].split(".")[-1])
def label_for_grouping_value(self, grouping_value):
return str(grouping_value) if grouping_value else self.none_value
def query_kwarg_value_from_grouping_value(self, grouping_value):
return grouping_value
def grouping_choices_from_grouping_results(self, grouping_results):
choices = []
for grouping_result in self.filter_grouping_results(grouping_results):
grouping_value = self.grouping_value_from_grouping_result(grouping_result)
# label
label = self.label_for_grouping_value(grouping_value)
# query_dict
query_dict = self.query_dict.copy()
query_dict.pop("page", None)
query_kwarg_value = self.query_kwarg_value_from_grouping_value(grouping_value)
if query_kwarg_value is None:
query_kwarg_value = self.none_value
else:
query_kwarg_value = str(query_kwarg_value)
query_kwarg = self.get_query_kwarg()
if query_dict.get(query_kwarg) == query_kwarg_value:
# already filtered
down_query_dict = None
up_query_dict = query_dict
up_query_dict.pop(query_kwarg, None)
else:
down_query_dict = query_dict
down_query_dict[query_kwarg] = query_kwarg_value
up_query_dict = None
# count
count = grouping_result["count"]
choices.append((label, count, down_query_dict, up_query_dict))
return choices
# process fetching results
def process_fetched_record(self, record, for_filtering):
return
class SourceFilter(BaseMSFilter):
title = "Sources"
query_kwarg = "src"
expression = ("jsonb_build_object("
"'id', src.id, 'module', src.module, "
"'name', src.name, 'config', src.config) as src_j")
grouping_set = ("src.id", "src_j")
def joins(self):
yield "join inventory_source as src on (ms.source_id = src.id)"
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "src.id = %s"
else:
yield "src.id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
return gv
@staticmethod
def display_name(source):
# TODO: better. see also zentral.inventory.models
dn = [source["name"]]
config = source.get("config")
if config:
host = config.get("host")
if host:
dn.append(host)
return "/".join(e for e in dn if e)
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
else:
return self.display_name(grouping_value)
def query_kwarg_value_from_grouping_value(self, grouping_value):
if not grouping_value:
return None
else:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
source = record.pop("src_j", None)
if source and source["id"]:
source["display_name"] = self.display_name(source)
record["source"] = source
if for_filtering:
source.pop("config", None)
elif for_filtering:
record["source"] = {"display_name": self.unknown_value.title(),
"name": self.unknown_value}
class OSVersionFilter(BaseMSFilter):
title = "OS"
optional = True
query_kwarg = "osv"
expression = ("jsonb_build_object("
"'id', osv.id, "
"'name', osv.name, "
"'major', osv.major, "
"'minor', osv.minor, "
"'patch', osv.patch, "
"'build', osv.build) as osv_j")
grouping_set = ("osv.id", "osv_j")
def joins(self):
yield "left join inventory_osversion as osv on (ms.os_version_id = osv.id)"
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "osv.id = %s"
else:
yield "osv.id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
return gv
@staticmethod
def version(os_version):
return ".".join(str(num) for num in
(os_version.get(attr) for attr in ("major", "minor", "patch"))
if num is not None)
def version_with_build(self, os_version):
version = self.version(os_version)
build = os_version.get("build")
if build:
version = "{} ({})".format(version, build)
return version.strip()
def display_name(self, os_version):
return " ".join(e for e in (os_version["name"], self.version_with_build(os_version)) if e)
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
else:
return self.display_name(grouping_value)
def query_kwarg_value_from_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
os_version = record.pop("osv_j", None)
if os_version and os_version["id"]:
os_version["version"] = self.version(os_version)
os_version["display_name"] = self.display_name(os_version)
record["os_version"] = os_version
elif for_filtering:
record["os_version"] = {"version": self.unknown_value,
"display_name": self.unknown_value.title()}
class MetaBusinessUnitFilter(BaseMSFilter):
title = "Meta business units"
optional = True
query_kwarg = "mbu"
expression = "jsonb_build_object('id', mbu.id, 'name', mbu.name) as mbu_j"
grouping_set = ("mbu.id", "mbu_j")
def joins(self):
return ["left join inventory_businessunit as bu on (ms.business_unit_id = bu.id)",
"left join inventory_metabusinessunit as mbu on (bu.meta_business_unit_id = mbu.id)"]
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "mbu.id = %s"
else:
yield "mbu.id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
return gv
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
else:
return grouping_value["name"] or "?"
def query_kwarg_value_from_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
meta_business_unit = record.pop("mbu_j", None)
if meta_business_unit and meta_business_unit["id"]:
record["meta_business_unit"] = meta_business_unit
elif for_filtering:
record["meta_business_unit"] = {"name": self.unknown_value}
class MachineGroupFilter(BaseMSFilter):
title = "Groups"
optional = True
many = True
query_kwarg = "g"
expression = "jsonb_build_object('id', mg.id, 'name', mg.name) as mg_j"
grouping_set = ("mg.id", "mg_j")
def joins(self):
return ["left join inventory_machinesnapshot_groups as msg on (ms.id = msg.machinesnapshot_id)",
"left join inventory_machinegroup as mg on (mg.id = msg.machinegroup_id)"]
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "mg.id = %s"
else:
yield "mg.id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
return gv
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
else:
return grouping_value["name"] or "?"
def query_kwarg_value_from_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
machine_groups = []
for machine_group in record.pop("mg_j", []):
if not machine_group["id"]:
continue
if machine_group not in machine_groups:
machine_groups.append(machine_group)
record["machine_groups"] = machine_groups
class TagFilter(BaseMSFilter):
title = "Tags"
optional = True
many = True
query_kwarg = "t"
expression = (
"jsonb_build_object("
"'id', t.id, "
"'name', t.name, "
"'color', t.color, "
"'meta_business_unit', "
"jsonb_build_object('id', tmbu.id, 'name', tmbu.name)"
") as tag_j"
)
grouping_set = ("t.id", "tag_j")
def joins(self):
return [("left join lateral ("
"select distinct * "
"from inventory_tag "
"where id in ("
"select mt.tag_id "
"from inventory_machinetag as mt "
"where mt.serial_number = ms.serial_number "
"union "
"select mbut.tag_id "
"from inventory_metabusinessunittag as mbut "
"join inventory_businessunit as bu on (bu.meta_business_unit_id = mbut.meta_business_unit_id) "
"where bu.id = ms.business_unit_id "
")"
") t on TRUE"),
"left join inventory_metabusinessunit as tmbu on (tmbu.id = t.meta_business_unit_id)"]
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "t.id = %s"
else:
yield "t.id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
elif gv["meta_business_unit"]["id"] is None:
gv["meta_business_unit"] = None
return gv
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
label = grouping_value["name"] or "?"
mbu = grouping_value.get("meta_business_unit")
if mbu:
mbu_name = mbu.get("name")
if mbu_name:
label = "{}/{}".format(mbu_name, label)
return label
def query_kwarg_value_from_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
tags = []
for tag in record.pop("tag_j", []):
if not tag["id"]:
continue
display_name = tag["name"]
if not tag["meta_business_unit"]["id"]:
tag["meta_business_unit"] = None
else:
display_name = "/".join(s for s in (tag["meta_business_unit"]["name"], display_name) if s)
if for_filtering:
tag = display_name
else:
tag["display_name"] = display_name
if tag not in tags:
tags.append(tag)
record["tags"] = tags
class OSXAppInstanceFilter(BaseMSFilter):
title = "macOS app instances"
optional = True
many = True
query_kwarg = "mosai"
def joins(self):
yield "left join inventory_machinesnapshot_osx_app_instances as mosai on (mosai.machinesnapshot_id = ms.id)"
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "mosai.osxappinstance_id = %s"
else:
yield "mosai.osxappinstance_id is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
class BundleFilter(BaseMSFilter):
optional = True
many = True
def __init__(self, *args, **kwargs):
self.bundle_id = kwargs.pop("bundle_id", None)
self.bundle_name = kwargs.pop("bundle_name", None)
if not self.bundle_id and not self.bundle_name:
raise ValueError("no bundle id and no bundle name")
self.title = self.bundle_name or self.bundle_id
super().__init__(*args, **kwargs)
self.expression = (
"jsonb_build_object("
"'id', a{idx}.id, "
"'bundle_id', a{idx}.bundle_id, "
"'bundle_name', a{idx}.bundle_name, "
"'bundle_version', a{idx}.bundle_version, "
"'bundle_version_str', a{idx}.bundle_version_str"
") as a{idx}_j"
).format(idx=self.idx)
self.grouping_set = (
"a{idx}.id".format(idx=self.idx),
"a{idx}_j".format(idx=self.idx)
)
def get_query_kwarg(self):
return "a{}".format(self.idx)
def joins(self):
if self.bundle_id:
arg = self.bundle_id
subquery_cond = "a.bundle_id = %s"
elif self.bundle_name:
arg = self.bundle_name
subquery_cond = "a.bundle_name = %s"
yield (("left join lateral ("
"select a.* from inventory_osxapp as a "
"join inventory_osxappinstance as oai on (oai.app_id = a.id) "
"join inventory_machinesnapshot_osx_app_instances as msoai on (msoai.osxappinstance_id = oai.id) "
"where msoai.machinesnapshot_id = ms.id and {subquery_cond}"
") a{idx} on TRUE").format(idx=self.idx, subquery_cond=subquery_cond),
[arg])
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "a{}.id = %s".format(self.idx)
else:
yield "a{}.id is null".format(self.idx)
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def serialize(self):
if self.bundle_name:
return "a.n.{}".format(self.bundle_name)
elif self.bundle_id:
return "a.i.{}".format(self.bundle_id)
@staticmethod
def display_name(osx_app):
return " ".join(e for e in (osx_app["bundle_name"], osx_app["bundle_version_str"]) if e)
def label_for_grouping_value(self, grouping_value):
if not grouping_value:
return self.none_value
if self.bundle_id:
# TODO hack. Try to set a better title.
bundle_name = grouping_value["bundle_name"]
if bundle_name:
self.title = bundle_name
return self.display_name(grouping_value)
def grouping_value_from_grouping_result(self, grouping_result):
gv = super().grouping_value_from_grouping_result(grouping_result)
if gv["id"] is None:
return None
return gv
def query_kwarg_value_from_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value["id"]
def process_fetched_record(self, record, for_filtering):
osx_apps = []
for osx_app in record.pop(self.grouping_set[-1], []):
if not osx_app["id"]:
continue
osx_app["display_name"] = self.display_name(osx_app)
if osx_app not in osx_apps:
osx_apps.append(osx_app)
osx_apps.sort(key=lambda app: (app.get("bundle_version"), app.get("bundle_version_str"), app.get("id")))
if not for_filtering:
# TODO: verify no conflict
record.setdefault("osx_apps", OrderedDict())[self.title] = osx_apps
else:
bundles_dict = record.setdefault("bundles", {})
bundle_idx = len(bundles_dict) # we do not use self.idx because we want to start from 0
bundle_dict = bundles_dict.setdefault(str(bundle_idx), {})
if self.bundle_name:
bundle_dict["name"] = self.bundle_name
elif self.bundle_id:
bundle_dict["id"] = self.bundle_id
if not osx_apps:
bundle_dict["version"] = {"min": self.unknown_value, "max": self.unknown_value}
else:
bundle_dict["version"] = {"min": (osx_apps[0].get("bundle_version_str")
or osx_apps[0].get("bundle_version")),
"max": (osx_apps[-1].get("bundle_version_str")
or osx_apps[-1].get("bundle_version"))}
class TypeFilter(BaseMSFilter):
title = "Types"
optional = True
query_kwarg = "tp"
expression = "ms.type"
grouping_set = ("ms.type",)
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "ms.type = %s"
else:
yield "ms.type is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def label_for_grouping_value(self, grouping_value):
if grouping_value:
return grouping_value.title()
else:
return self.none_value
def process_fetched_record(self, record, for_filtering):
if for_filtering and record.get("type") is None:
record["type"] = self.unknown_value
class PlaformFilter(BaseMSFilter):
title = "Platforms"
optional = True
query_kwarg = "pf"
expression = "ms.platform"
grouping_set = ("ms.platform",)
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "ms.platform = %s"
else:
yield "ms.platform is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def process_fetched_record(self, record, for_filtering):
if for_filtering and record.get("platform") is None:
record["platform"] = self.unknown_value
class SerialNumberFilter(BaseMSFilter):
query_kwarg = "sn"
free_input = True
non_grouping_expression = "ms.serial_number"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.value:
self.value = self.value.strip()
def wheres(self):
if self.value:
yield "UPPER(ms.serial_number) LIKE UPPER(%s)"
def where_args(self):
if self.value:
yield "%{}%".format(connection.ops.prep_for_like_query(self.value))
def process_fetched_record(self, record, for_filtering):
if not for_filtering:
record["urlsafe_serial_number"] = MetaMachine.make_urlsafe_serial_number(record["serial_number"])
class ComputerNameFilter(BaseMSFilter):
query_kwarg = "cn"
free_input = True
non_grouping_expression = "si.computer_name"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.value:
self.value = self.value.strip()
def joins(self):
yield "left join inventory_systeminfo as si on (ms.system_info_id = si.id)"
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "si.id is not null and si.computer_name ~* %s"
else:
yield "si.id is null or si.computer_name is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def process_fetched_record(self, record, for_filtering):
computer_name = record.pop("computer_name", None)
if computer_name:
record.setdefault("system_info", {})["computer_name"] = computer_name
elif for_filtering:
record.setdefault("system_info", {})["computer_name"] = self.unknown_value
class HardwareModelFilter(BaseMSFilter):
title = "Hardware models"
optional = True
query_kwarg = "hm"
expression = "si.hardware_model"
grouping_set = ("si.hardware_model",)
def joins(self):
yield "left join inventory_systeminfo as si on (ms.system_info_id = si.id)"
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "si.hardware_model = %s"
else:
yield "si.hardware_model is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def process_fetched_record(self, record, for_filtering):
hardware_model = record.pop("hardware_model", None)
if hardware_model:
record.setdefault("system_info", {})["hardware_model"] = hardware_model
elif for_filtering:
record.setdefault("system_info", {})["hardware_model"] = self.unknown_value
class DateTimeFilter(BaseMSFilter):
query_kwarg = "dt"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.value:
self.value = datetime.strptime("%Y-%m-%d %H:%M:%S", self.value)
def joins(self):
if self.value:
yield "left join inventory_machinesnapshot as dtfms"
else:
yield "join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)"
def wheres(self):
if self.value:
return ["ms.serial_number = dtfms.serial_number",
"ms.source_id = dtfms.source_id",
"ms.mt_created_at > dtfms.mt_created_at",
"dtfms.id is null",
"ms.mt_created_at < %s"]
else:
return []
def where_args(self):
if self.value:
yield self.value
class IncidentSeverityFilter(BaseMSFilter):
title = "Incidents severities"
optional = True
query_kwarg = "mis"
expression = "mis.max_incident_severity as max_incident_severity"
grouping_set = ("mis.max_incident_severity",)
severities_dict = dict(SEVERITY_CHOICES)
def joins(self):
yield (
"left join ("
"select mi.serial_number as serial_number, max(i.severity) as max_incident_severity "
"from incidents_machineincident as mi "
"join incidents_incident as i on (i.id = mi.incident_id) "
"where i.status in ({}) "
"group by mi.serial_number"
") as mis on (mis.serial_number = ms.serial_number)"
).format(",".join("'{}'".format(s) for s in OPEN_STATUSES))
def wheres(self):
if self.value:
if self.value != self.none_value:
yield "mis.max_incident_severity = %s"
else:
yield "mis.max_incident_severity is null"
def where_args(self):
if self.value and self.value != self.none_value:
yield self.value
def label_for_grouping_value(self, grouping_value):
if grouping_value is None:
return self.none_value
else:
return self.severities_dict.get(grouping_value, str(grouping_value))
def process_fetched_record(self, record, for_filtering):
max_incident_severity = record.pop("max_incident_severity", None)
if max_incident_severity is not None:
record["max_incident_severity"] = {"value": max_incident_severity,
"keyword": str(self.severities_dict.get(max_incident_severity,
max_incident_severity))}
elif for_filtering:
record["max_incident_severity"] = {"value": 0,
"keyword": "No incidents"}
class MSQuery:
paginate_by = 50
itersize = 1000
default_filters = [
DateTimeFilter,
SourceFilter,
MetaBusinessUnitFilter,
TagFilter,
IncidentSeverityFilter,
TypeFilter,
PlaformFilter,
HardwareModelFilter,
OSVersionFilter,
SerialNumberFilter,
ComputerNameFilter,
]
def __init__(self, query_dict=None):
self.query_dict = query_dict or {}
try:
self.page = int(self.query_dict.get("page", 1))
except ValueError:
self.page = 1
self.filters = []
self._redirect = False
self._deserialize_filters(self.query_dict.get("sf"))
self._grouping_results = None
self._count = None
self._grouping_links = None
# filters configuration
def add_filter(self, filter_class, **filter_kwargs):
"""add a filter"""
self.filters.append(filter_class(len(self.filters), self.query_dict, **filter_kwargs))
def force_filter(self, filter_class, **filter_kwargs):
"""replace an existing filter from the same class or add it"""
found_f = None
for idx, f in enumerate(self.filters):
if isinstance(f, filter_class):
found_f = f
break
if not found_f:
self.add_filter(filter_class, **filter_kwargs)
else:
new_f = filter_class(found_f.idx, self.query_dict, **filter_kwargs)
self.filters = [f if f.idx != found_f.idx else new_f for f in self.filters]
def _deserialize_filters(self, serialized_filters):
try:
serialized_filters = serialized_filters.split("-")
default = False
except Exception:
serialized_filters = []
default = True
self._redirect = True
for filter_class in self.default_filters:
if default or not filter_class.optional or filter_class.query_kwarg in serialized_filters:
self.add_filter(filter_class)
for serialized_filter in serialized_filters:
if serialized_filter.startswith("a."):
attr, value = re.sub(r"^a\.", "", serialized_filter).split(".", 1)
if attr == "n":
self.add_filter(BundleFilter, bundle_name=value)
elif attr == "i":
self.add_filter(BundleFilter, bundle_id=value)
def serialize_filters(self, filter_to_add=None, filter_to_remove=None, include_hidden=False):
return "-".join(f.serialize() for f in chain(self.filters, [filter_to_add])
if f and f.optional and not f == filter_to_remove and (include_hidden or not f.hidden))
def get_url(self):
qd = self.query_dict.copy()
qd["sf"] = self.serialize_filters()
return "?{}".format(urllib.parse.urlencode(qd))
def redirect_url(self):
if self._redirect:
return self.get_url()
def get_canonical_query_dict(self):
# used to serialize the state of the msquery object
# even with forced hidden filter values
# see inventory export
qd = QueryDict(mutable=True)
qd["sf"] = self.serialize_filters(include_hidden=True)
for f in self.filters:
if f.value is not None:
qd[f.serialize()] = f.value
return qd
def get_urlencoded_canonical_query_dict(self):
return self.get_canonical_query_dict().urlencode()
def available_filters(self):
links = []
idx = len(self.filters)
for filter_class in self.default_filters:
for f in self.filters:
if isinstance(f, filter_class):
break
else:
available_filter = filter_class(idx, self.query_dict)
available_filter_qd = self.query_dict.copy()
available_filter_qd["sf"] = self.serialize_filters(filter_to_add=available_filter)
links.append((available_filter.title,
"?{}".format(urllib.parse.urlencode(available_filter_qd))))
idx += 1
return links
# common things for grouping and fetching
def _iter_unique_joins_with_args(self):
unique_joins = OrderedDict()
for f in self.filters:
for join in f.joins():
if isinstance(join, tuple):
join, join_args = join
elif isinstance(join, str):
join_args = []
else:
raise ValueError("invalid join")
if join not in unique_joins:
unique_joins[join] = join_args
elif unique_joins[join] != join_args:
raise ValueError("same join with different args exists")
yield from unique_joins.items()
# grouping
def _build_grouping_query_with_args(self):
query = ["select"]
args = []
# expressions
query.append(", ".join(e for f in self.filters for e in f.get_expressions(grouping=True)))
query.append(", count(distinct ms.serial_number)")
# base table
query.append("from inventory_machinesnapshot as ms")
# joins
for join, join_args in self._iter_unique_joins_with_args():
query.append(join)
args.extend(join_args)
# wheres
wheres = []
for f in self.filters:
wheres.extend(f.wheres())
args.extend(f.where_args())
if wheres:
query.append("WHERE")
query.append(" AND ".join(wheres))
# group by sets
grouping_sets = ["({})".format(", ".join(gsi for gsi in f.grouping_set))
for f in self.filters
if f.grouping_set]
grouping_sets.append("()")
query.append("GROUP BY GROUPING SETS ({})".format(", ".join(grouping_sets)))
return "\n".join(query), args
def _make_grouping_query(self):
query, args = self._build_grouping_query_with_args()
cursor = connection.cursor()
cursor.execute(query, args)
columns = [col[0] for col in cursor.description]
results = []
for row in cursor.fetchall():
results.append(dict(zip(columns, row)))
return results
def _get_grouping_results(self):
if self._grouping_results is None:
self._grouping_results = self._make_grouping_query()
return self._grouping_results
def count(self):
if self._count is None:
all_grouping_aliases = [f.grouping_alias for f in self.filters]
for grouping_result in self._get_grouping_results():
if all(grouping_result.get(a, 1) == 1 for a in all_grouping_aliases):
self._count = grouping_result["count"]
break
else:
self._count = 0
return self._count
def grouping_choices(self):
grouping_results = self._get_grouping_results()
for f in self.filters:
if f.hidden:
continue
f_choices = f.grouping_choices_from_grouping_results(grouping_results)
if f_choices:
yield f, f_choices
def grouping_links(self):
if self._grouping_links is None:
self._grouping_links = []
count = self.count()
for f, f_choices in self.grouping_choices():
f_links = []
f_up_links = []
for label, f_count, down_query_dict, up_query_dict in f_choices:
if up_query_dict is not None:
up_link = "?{}".format(urllib.parse.urlencode(up_query_dict))
f_up_links.append(up_link)
down_link = None
else:
up_link = None
down_link = "?{}".format(urllib.parse.urlencode(down_query_dict))
if count > 0:
f_perc = f_count * 100 / count
else:
f_perc = 0
f_links.append((label, f_count, f_perc, down_link, up_link))
f_links.sort(key=lambda t: (t[0] == f.none_value, (t[0] or "").upper()))
if f.optional:
remove_filter_query_dict = self.query_dict.copy()
remove_filter_query_dict.pop("page", None)
remove_filter_query_dict.pop(f.get_query_kwarg(), None)
remove_filter_query_dict["sf"] = self.serialize_filters(filter_to_remove=f)
f_r_link = "?{}".format(urllib.parse.urlencode(remove_filter_query_dict))
else:
f_r_link = None
f_up_link = None
if len(f_up_links) == 1:
f_up_link = f_up_links[0]
elif len(f_up_links) > 1:
# should not happen
logger.warning("More than one uplink for filter %s - %s", f.get_query_kwarg(), self.query_dict)
self._grouping_links.append((f, f_links, f_r_link, f_up_link))
return self._grouping_links
# fetching
def _build_fetching_query_with_args(self, paginate=True):
query = ["select"]
args = []
# expressions
query.append(", ".join(e for f in self.filters for e in f.get_expressions()))
# base table
query.append("from inventory_machinesnapshot as ms")
# joins
for join, join_args in self._iter_unique_joins_with_args():
query.append(join)
args.extend(join_args)
# wheres
wheres = []
for f in self.filters:
wheres.extend(f.wheres())
args.extend(f.where_args())
if wheres:
query.append("WHERE")
query.append(" AND ".join(wheres))
# group bys
group_bys = [gb for gb in (f.get_group_by() for f in self.filters) if gb]
if group_bys:
query.append("GROUP BY {}".format(", ".join(group_bys)))
query = "\n".join(query)
# pagination
if paginate:
limit = max(self.paginate_by, 1)
args.append(limit)
offset = max((self.page - 1) * limit, 0)
args.append(offset)
limit_offset = " limit %s offset %s"
else:
limit_offset = ""
meta_query = (
"select ms.serial_number, json_agg(row_to_json(ms.*)) as machine_snapshots "
"from ({}) ms "
"group by ms.serial_number "
"order by min(ms.computer_name) asc, ms.serial_number asc{}"
).format(query, limit_offset)
return meta_query, args
def _make_fetching_query(self, paginate=True):
query, args = self._build_fetching_query_with_args(paginate)
cursor = connection.cursor()
cursor.execute(query, args)
columns = [col[0] for col in cursor.description]
for rows in iter(lambda: cursor.fetchmany(self.itersize), connection.features.empty_fetchmany_value):
for row in rows:
yield dict(zip(columns, row))
def fetch(self, paginate=True, for_filtering=False):
for record in self._make_fetching_query(paginate):
for machine_snapshot in record["machine_snapshots"]:
for f in self.filters:
f.process_fetched_record(machine_snapshot, for_filtering)
yield record["serial_number"], record["machine_snapshots"]
# export
def export_sheets_data(self):
title = "Machines"
headers = [
"Source ID", "Source",
"SN",
"Meta Business Unit ID",
"Meta Business Unit Name",
"Type", "Platform",
"Name",
"Hardware model",
"OS",
"tags"
]
row_idx = 0
rows = []
include_max_incident_severity = False
for serial_number, machine_snapshots in self.fetch(paginate=False):
for machine_snapshot in machine_snapshots:
if row_idx == 0:
if "max_incident_severity" in machine_snapshot:
include_max_incident_severity = True
headers.extend(["Max incident severity", "Max incident severity display"])
for app_title in machine_snapshot.get("osx_apps", {}):
for suffix in ("min", "max"):
headers.append("{} {}".format(app_title, suffix))
row_idx += 1
system_info = machine_snapshot.get("system_info", {})
meta_business_unit = machine_snapshot.get("meta_business_unit", {})
row = [
machine_snapshot["source"]["id"],
machine_snapshot["source"].get("display_name") or "",
serial_number,
meta_business_unit.get("id") or "",
meta_business_unit.get("name") or "",
machine_snapshot.get("type") or "",
machine_snapshot.get("platform") or "",
system_info.get("computer_name") or "",
system_info.get("hardware_model") or ""
]
os_version = machine_snapshot.get("os_version")
if os_version:
os_version_dn = os_version.get("display_name") or ""
else:
os_version_dn = ""
row.append(os_version_dn)
row.append(
"|".join(dn for dn in (t.get("display_name") for t in machine_snapshot.get("tags", [])) if dn)
)
if include_max_incident_severity:
mis = machine_snapshot.get("max_incident_severity", {})
row.extend([mis.get("value") or "",
mis.get("keyword") or ""])
for _, app_versions in machine_snapshot.get("osx_apps", {}).items():
if app_versions:
min_app_version = app_versions[0]["display_name"]
max_app_version = app_versions[-1]["display_name"]
else:
min_app_version = max_app_version = ""
row.extend([min_app_version, max_app_version])
rows.append(row)
yield title, headers, rows
# aggregations
for f, f_links, _, _ in self.grouping_links():
rows = []
for label, f_count, f_perc, _, _ in f_links:
if label == "\u2400":
label = "NULL"
elif not isinstance(label, str):
label = str(label)
rows.append([label, f_count, f_perc])
yield f.title, ["Value", "Count", "%"], rows
def export_xlsx(self, f_obj):
workbook = xlsxwriter.Workbook(f_obj)
# machines
for title, headers, rows in self.export_sheets_data():
ws = workbook.add_worksheet(title)
row_idx = col_idx = 0
for header in headers:
ws.write_string(row_idx, col_idx, header)
col_idx += 1
for row in rows:
row_idx += 1
col_idx = 0
for value in row:
if isinstance(value, (int, float)):
ws.write_number(row_idx, col_idx, value)
else:
if not isinstance(value, str):
value = str(value)
ws.write_string(row_idx, col_idx, value)
col_idx += 1
workbook.close()
def export_zip(self, f_obj):
with zipfile.ZipFile(f_obj, mode='w', compression=zipfile.ZIP_DEFLATED) as zip_f:
for title, headers, rows in self.export_sheets_data():
tmp_file_fh, tmp_file = tempfile.mkstemp()
with os.fdopen(tmp_file_fh, mode='w', newline='') as csv_f:
w = csv.writer(csv_f)
w.writerow(headers)
for row in rows:
w.writerow(row)
zip_f.write(tmp_file, "{}.csv".format(slugify(title)))
os.unlink(tmp_file)
class BundleFilterForm(forms.Form):
bundle_id = forms.CharField(label="Bundle id", required=False,
widget=forms.TextInput(attrs={"class": "form-control",
"placeholder": "Bundle id"}))
bundle_name = forms.CharField(label="Bundle name", required=False,
widget=forms.TextInput(attrs={"class": "form-control",
"placeholder": "Bundle name"}))
def clean(self):
cleaned_data = super().clean()
bundle_name = cleaned_data.get("bundle_name")
bundle_id = cleaned_data.get("bundle_id")
if bundle_name and bundle_id:
raise forms.ValidationError("Bundle id and bundle name cannot be both specified.")
elif not bundle_name and not bundle_id:
raise forms.ValidationError("Choose a bundle id or a bundle name.")
def osx_app_count():
query = """
select a.bundle_name as name, a.bundle_version_str as version_str,
s.id as source_id, s.module as source_module, foo.count
from (
select ai.app_id, cms.source_id, count(*) as count
from inventory_osxappinstance as ai
join inventory_machinesnapshot_osx_app_instances as msai on (msai.osxappinstance_id = ai.id)
join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = msai.machinesnapshot_id)
group by ai.app_id, cms.source_id
) as foo
join inventory_osxapp as a on (foo.app_id = a.id)
join inventory_source as s on (foo.source_id = s.id)
"""
cursor = connection.cursor()
cursor.execute(query)
columns = [col[0] for col in cursor.description]
for row in cursor.fetchall():
d = dict(zip(columns, row))
d['source'] = '{}#{}'.format(d.pop('source_module'), d.pop('source_id'))
for k, v in d.items():
if k != 'count' and not v:
d[k] = '_'
yield d
def os_version_count():
query = """
select o.name, o.major, o.minor, o.patch, o.build, s.id as source_id, s.module as source_module,
count(*) as count
from inventory_osversion as o
join inventory_machinesnapshot as ms on (ms.os_version_id = o.id)
join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)
join inventory_source as s on (cms.source_id = s.id)
group by o.name, o.major, o.minor, o.patch, o.build, s.id, s.module
"""
cursor = connection.cursor()
cursor.execute(query)
columns = [col[0] for col in cursor.description]
for row in cursor.fetchall():
d = dict(zip(columns, row))
d['source'] = '{}#{}'.format(d.pop('source_module'), d.pop('source_id'))
for k, v in d.items():
if k != 'count' and not v:
d[k] = '_'
yield d
def get_prometheus_inventory_metrics():
registry = CollectorRegistry()
g = Gauge('zentral_inventory_osx_apps', 'Zentral inventory OSX apps',
['name', 'version_str', 'source'],
registry=registry)
for r in osx_app_count():
count = r.pop('count')
g.labels(**r).set(count)
g = Gauge('zentral_inventory_os_versions', 'Zentral inventory OS Versions',
['name', 'major', 'minor', 'patch', 'build', 'source'],
registry=registry)
for r in os_version_count():
count = r.pop('count')
g.labels(**r).set(count)
return registry
def inventory_events_from_machine_snapshot_commit(machine_snapshot_commit):
source = machine_snapshot_commit.source.serialize()
diff = machine_snapshot_commit.update_diff()
if diff is None:
yield ('inventory_machine_added',
None,
{'source': source,
'machine_snapshot': machine_snapshot_commit.machine_snapshot.serialize()})
yield ('inventory_heartbeat',
machine_snapshot_commit.last_seen,
{'source': source})
return
for m2m_attr, event_type in (('links', 'inventory_link_update'),
('network_interfaces', 'inventory_network_interface_update'),
('osx_app_instances', 'inventory_osx_app_instance_update'),
('deb_packages', 'inventory_deb_package_update'),
('certificates', 'inventory_certificate_update'),
('groups', 'inventory_group_update')):
m2m_diff = diff.get(m2m_attr, {})
for action in ['added', 'removed']:
for obj in m2m_diff.get(action, []):
obj['action'] = action
if 'source' not in obj:
obj['source'] = source
yield (event_type, None, obj)
for fk_attr in ('reference',
'machine',
'business_unit',
'os_version',
'system_info',
'teamviewer',
'puppet_node',
'principal_user'):
event_type = 'inventory_{}_update'.format(fk_attr)
fk_diff = diff.get(fk_attr, {})
for action in ['added', 'removed']:
obj = fk_diff.get(action, None)
if obj:
if isinstance(obj, dict):
event = obj
if 'source' not in obj:
event['source'] = source
else:
event = {'source': source,
fk_attr: obj}
event['action'] = action
yield (event_type, None, event)
added_last_seen = diff.get("last_seen", {}).get("added")
if added_last_seen:
yield ("inventory_heartbeat",
added_last_seen,
{'source': source})
def commit_machine_snapshot_and_trigger_events(tree):
try:
machine_snapshot_commit, machine_snapshot = MachineSnapshotCommit.objects.commit_machine_snapshot_tree(tree)
except Exception:
logger.exception("Could not commit machine snapshot")
save_dead_letter(tree, "machine snapshot commit error")
else:
if machine_snapshot_commit:
post_inventory_events(machine_snapshot_commit.serial_number,
inventory_events_from_machine_snapshot_commit(machine_snapshot_commit))
return machine_snapshot
def verify_enrollment_secret(model, secret,
user_agent, public_ip_address,
serial_number=None, udid=None,
meta_business_unit=None,
**kwargs):
try:
request = EnrollmentSecret.objects.verify(model, secret,
user_agent, public_ip_address,
serial_number, udid,
meta_business_unit,
**kwargs)
except EnrollmentSecretVerificationFailed as e:
post_enrollment_secret_verification_failure(model,
user_agent, public_ip_address, serial_number,
e.err_msg, e.enrollment_secret)
raise
else:
post_enrollment_secret_verification_success(request, model)
return request
|
[
"eric.falconnier@112hz.com"
] |
eric.falconnier@112hz.com
|
edd9d190611d86c93ad2a0c1bd2e9ba947c8e046
|
52c5b78f3afab4573926dd6d0a49e10ee1a77e26
|
/project_4/app1/migrations/0001_initial.py
|
31865ddbc05d99f2596e900e8cbe2b3c4fa2036f
|
[] |
no_license
|
zime-py/eight
|
d9eefc28a00a8411f3a58b0e931807492bc5bfc2
|
2138b2a8884dea299654ff7c41060c72f183486c
|
refs/heads/master
| 2023-01-11T23:03:53.062441
| 2020-11-14T14:43:04
| 2020-11-14T14:43:04
| 312,831,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# Generated by Django 3.1.1 on 2020-09-05 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('roll', models.IntegerField()),
],
),
]
|
[
"mahmudhossain836@gmail.com"
] |
mahmudhossain836@gmail.com
|
3bfe8561268459da6f00fece955b503d03c776ef
|
8ae8c4ab4ec7d33d31b55d4678e5e40d555ee24e
|
/node.py
|
115960529e2c6188bb926873641d0f2588a03382
|
[] |
no_license
|
firefirer1983/python_programing
|
23229b2ae201310752dd919d3757717c96473662
|
dfe49b9cace0639e49a9e67295e3d76110103103
|
refs/heads/master
| 2020-05-30T17:33:59.099549
| 2019-06-04T12:40:44
| 2019-06-04T12:40:44
| 189,877,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
from collections import defaultdict
class Node:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def __str__(self):
return self._name
class Edge:
def __init__(self, src, dst):
self._src = src
self._dst = dst
@property
def source(self):
return self._src
@property
def destination(self):
return self._dst
def __str__(self):
return "%s -> %s" % (self.source, self.destination)
class WeightEdge(Edge):
def __init__(self, src, dst, weight):
super().__init__(src, dst)
self._weight = weight
@property
def weight(self):
return self._weight
def __str__(self):
return "%s -> %s [weight:%r]" % (self.source, self.destination, self.weight)
class Digraph:
def __init__(self):
self._nodes = list()
self._edges = defaultdict(lambda: list())
def add_node(self, node):
if node in self._nodes:
print("%s already in list" % node)
return Node
self._nodes.append(node)
def add_edge(self, edge):
if edge.source not in self._nodes and edge.destination not in self._nodes:
print("edge %s is invalid" % edge)
print("+ %s -> %s" % (edge.source, edge.destination))
self._edges[edge.source].append(edge.destination)
def children_of(self, node):
return self._edges[node]
def has_node(self, node):
return node in self._nodes
def __str__(self):
res = ""
for s in self._nodes:
for d in self._edges[s]:
res += "%s -> %s\n" % (s, d)
return res
def __iter__(self):
yield from self._nodes
class Graph(Digraph):
def add_edge(self, edge):
super().add_edge(edge)
super().add_edge(Edge(edge.destination, edge.source))
|
[
"fyman.zhang@gmail.com"
] |
fyman.zhang@gmail.com
|
731c60ac11e13721e6a93743ada9af4811db31aa
|
3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a
|
/ceres/util/block_cache.py
|
54f514b4d7f2add3e596a58e06bcca9279fe65af
|
[
"Apache-2.0"
] |
permissive
|
signingup/ceres-combineharvester
|
a8874ab11145e7ba2223b85483b96dea01054ad0
|
aad918a03a4a522e0e2f3bac104d19d693d6bf79
|
refs/heads/main
| 2023-07-25T04:11:13.765471
| 2021-09-09T14:59:48
| 2021-09-09T14:59:48
| 404,918,382
| 1
| 0
|
Apache-2.0
| 2021-09-10T01:22:20
| 2021-09-10T01:22:20
| null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
import logging
from typing import Dict, List, Optional
from ceres.consensus.block_record import BlockRecord
from ceres.consensus.blockchain_interface import BlockchainInterface
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from ceres.types.header_block import HeaderBlock
from ceres.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from ceres.util.ints import uint32
class BlockCache(BlockchainInterface):
def __init__(
self,
blocks: Dict[bytes32, BlockRecord],
headers: Dict[bytes32, HeaderBlock] = None,
height_to_hash: Dict[uint32, bytes32] = None,
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = None,
):
if sub_epoch_summaries is None:
sub_epoch_summaries = {}
if height_to_hash is None:
height_to_hash = {}
if headers is None:
headers = {}
self._block_records = blocks
self._headers = headers
self._height_to_hash = height_to_hash
self._sub_epoch_summaries = sub_epoch_summaries
self._sub_epoch_segments: Dict[uint32, SubEpochSegments] = {}
self.log = logging.getLogger(__name__)
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self._block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db: bool = False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self._sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self._sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if height not in self._height_to_hash:
self.log.warning(f"could not find height in cache {height}")
return None
return self._height_to_hash[height]
def contains_block(self, header_hash: bytes32) -> bool:
return header_hash in self._block_records
def contains_height(self, height: uint32) -> bool:
return height in self._height_to_hash
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return self._block_records
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
block_records: List[BlockRecord] = []
for height in heights:
block_records.append(self.height_to_block_record(height))
return block_records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
return self._block_records[header_hash]
def remove_block_record(self, header_hash: bytes32):
del self._block_records[header_hash]
def add_block_record(self, block: BlockRecord):
self._block_records[block.header_hash] = block
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return self._headers
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_height: uint32, segments: List[SubEpochChallengeSegment]
):
self._sub_epoch_segments[sub_epoch_summary_height] = SubEpochSegments(segments)
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_height: uint32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments = self._sub_epoch_segments.get(sub_epoch_summary_height)
if segments is None:
return None
return segments.challenge_segments
|
[
"hulatang_eric@163.com"
] |
hulatang_eric@163.com
|
2a26001d443cb6b58b8139b330be87998641c886
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/21. Merge Two Sorted Lists/solution2.py
|
c0c75749ec0249059072e52b316690eeb23bd917
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 and not l2: return None
if not l1: return l2
if not l2: return l1
dummy = curr = ListNode()
while l1 and l2:
if l1.val > l2.val:
curr.next = ListNode(l2.val)
curr = curr.next
l2 = l2.next
else:
curr.next = ListNode(l1.val)
curr = curr.next
l1 = l1.next
while l1:
curr.next = ListNode(l1.val)
curr = curr.next
l1 = l1.next
while l2:
curr.next = ListNode(l2.val)
curr = curr.next
l2 = l2.next
return dummy.next
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
0cbca552ac46d5ff810627de47fd32c725cb9f9b
|
7738e950c103fb23b48d5e004eddcf108ea71fa1
|
/Cursoemvideo/Mundo3/exercise106.py
|
ee09831fdf4985b1abe94c9121e775d495431ca2
|
[] |
no_license
|
pedrottoni/Studies-Python
|
c9bdbaf4b4aaa209bf32aa93d6ee4814a0a39c53
|
f195bcb4c6868689ec0cf05c34cd4d5a6c7b3ea1
|
refs/heads/master
| 2021-09-26T05:23:02.398552
| 2020-02-12T04:48:23
| 2020-02-12T04:48:23
| 203,452,221
| 0
| 0
| null | 2021-09-22T18:21:51
| 2019-08-20T20:48:07
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
"""
Faça um mini-sistema que utilize o Interactive Help do Python. O usuário vai digitar o comando e o manual vai aparecer. Quando o usuário digitar a palavra 'FIM', o programa se encerrará. Importante: use cores.
"""
colors = (
'\033[m', # Defaut
'\033[0;30;41m', # red
'\033[0;30;42m', # green
'\033[0;30;43m', # yellow
'\033[0;30;44m' # blue
)
def custom_help(py_command):
print(colors[2])
help(py_command)
print(colors[0])
def title(msg, color):
print(colors[color])
print(f'\n {msg} \n')
def interface():
py_command = input('Digite um comando: ')
title('Sistema de ajuda', 4)
while py_command != 'fim':
custom_help(py_command)
py_command = input('Digite um comando: ').lower()
title('Fim do dia', 1)
print(colors[0])
interface()
|
[
"pedrottoni@outlook.com"
] |
pedrottoni@outlook.com
|
ec06410ce2adc7455ba1077d79d2470120d5230a
|
fb0e82ab4b4d15965cce2396fd9ae31ed2de1080
|
/file2.py
|
7b8e4b69230dec1d4849187abac9ca76554623e8
|
[] |
no_license
|
flerchy/My-1-PyProj
|
477a1ed212d2f4721b1048f43033f1803eda7302
|
e95973dd424d2bb3557d5475501dafdc3fa46317
|
refs/heads/master
| 2020-12-03T04:10:09.120252
| 2017-06-29T22:55:23
| 2017-06-29T22:55:23
| 95,823,795
| 0
| 2
| null | 2017-08-24T11:05:43
| 2017-06-29T22:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
py
|
#import math
class vect:
x = 0
y = 0
def Multiply(self, v):
v2 = vect(0, 0)
v2.x = v.x * self.x
v2.y = v.y * self.y
return v2
def __init__(self, x: object, y: object) -> object:
self.x = x
self.y = y
class dot:
x = 0
y = 0
def Add(self, d):
d2 = dot(0, 0)
d2.x = d.x + self.x
d2.y = d.y + self.y
return d2
def __init__(self, x: object, y: object) -> object:
self.x = x
self.y = y
def VectorOperations():
a = vect(2, 4)
b = vect(5, 2)
return a.Multiply(b)
def DotOperations():
a = dot(2, 4)
b = dot(5, 2)
return a.Add(b)
def Main():
res = VectorOperations()
print(res.x, res.y)
res = DotOperations()
print(res.x, res.y)
return 0
if __name__ == "__main__":
Main()
|
[
"flerchy@gmail.com"
] |
flerchy@gmail.com
|
45bca23a6ef9d24fca6aae8566d120036237ddfb
|
e754fd34d40b41cd56adc947309832574094e0b6
|
/jiajun_experiment/cifar10_experiment/cifar10.py
|
0cc9f2f34dea255f2751c7d028bd33e60afa1d25
|
[] |
no_license
|
yaliamit/Python
|
7d071fe76eba14c78540b5008d616080bca78ed9
|
d0e441212a9f86a91723a99f8bfc89d245992a2e
|
refs/heads/master
| 2021-01-11T05:01:07.293444
| 2020-05-28T01:27:20
| 2020-05-28T01:27:20
| 71,490,136
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,688
|
py
|
import lasagne
import numpy as np
import theano.tensor as T
import theano
import os, sys, gzip
from six.moves import urllib
import tarfile
import pickle
import cifar10_input
import lasagne
from lasagne.layers import LocalResponseNormalization2DLayer, DenseLayer, Conv2DLayer, MaxPool2DLayer, InputLayer, DimshuffleLayer, BatchNormLayer
from lasagne.regularization import regularize_layer_params_weighted, l2
#from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
#from lasagne.layers.dnn import MaxPool2DDNNLayer as MaxPool2DLayer
# Basic model parameters.
#tf.app.flags.DEFINE_integer('batch_size', 128,
# """Number of images to process in a batch.""")
#tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
# """Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def build_cnn(input_var=None):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
input_layer = InputLayer((None, 3, IMAGE_SIZE, IMAGE_SIZE), input_var=input_var)
norm0 = BatchNormLayer(input_layer)
# conv1
conv1 = Conv2DLayer(norm0, num_filters=64, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="conv1")
conv1a = Conv2DLayer(conv1, num_filters=64, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="conv1a")
pool1 = MaxPool2DLayer(conv1a, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm1 = LocalResponseNormalization2DLayer(pool1, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm1 = BatchNormLayer(pool1)
# pool1
# conv2
conv2 = Conv2DLayer(lasagne.layers.dropout(norm1, p = 0.5),
num_filters=128, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv2')
conv2a = Conv2DLayer(conv2,
num_filters=128, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv2a')
pool2 = MaxPool2DLayer(conv2a, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm2 = LocalResponseNormalization2DLayer(pool2, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm2 = BatchNormLayer(pool2)
# pool2
conv3 = Conv2DLayer(lasagne.layers.dropout(norm2, p = 0.5),
num_filters=256, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv3')
pool3 = MaxPool2DLayer(conv3, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm3 = LocalResponseNormalization2DLayer(pool3, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm3 = BatchNormLayer(pool3)
# fc1
fc1 = DenseLayer(lasagne.layers.dropout(norm3, p = 0.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.1),
name="fc1")
# fc3
softmax_layer = DenseLayer(lasagne.layers.dropout(fc1, p = 0.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax,
W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="softmax")
# Weight Decay
weight_decay_layers = {fc1: 0.0}
l2_penalty = regularize_layer_params_weighted(weight_decay_layers, l2)
return softmax_layer, l2_penalty
|
[
"shenjiajun90@gmail.com"
] |
shenjiajun90@gmail.com
|
165cab5c40f49ff674b4d822d6ce98cf4a91d964
|
ed8cdcce521b8cab33c66f716c0886e17f035d21
|
/.history/script/get_cpu_mem_info_20191222113543.py
|
a4ddcbbcc1043433a45c3289e4fc9c9a263e6935
|
[] |
no_license
|
deancsdfy/AndroidPerformanceTool_windows
|
8ac35729bc651c3af551f090d6788b6ee3f17eb5
|
c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f
|
refs/heads/master
| 2020-11-27T20:38:55.014228
| 2020-01-09T15:55:52
| 2020-01-09T15:55:52
| 229,593,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
#! python3
#coding=utf-8
import sys,os,re
print(sys.path)
sys.path.append('.')
from public import publicfunction as util
PATH = lambda p: os.path.abspath(p)
#获取当前应用包名
package_name = util.get_current_packagename()
# print('本次测试APP为:%s' %(package_name))
#获取men cpu 占用情况
def top():
print('Starting get mem cpu information...')
pid=get_pid()
print(pid)
top_info = util.shell("top -n 1 | grep %d" %(int(pid))).stdout.readlines()
for x in top_info:
temp_list = x.split()
#print(temp_list[8])
cpu=float(temp_list[8])
#cpu.append(float(temp_list[8]))
#print(temp_list[9])
mem=float(temp_list[9])
#mem.append(float(temp_list[9]))
print(cpu)
print(mem)
return (cpu,mem)
def getCpuNums():
num_info = util.shell('cat /proc/cpuinfo|grep processor').stdout.readlines()
# print("cpu nums is %d" %(len(num_info)))
return len(num_info)
def getCpuInfo():
# print('Starting get mem cpu information...')
pid = get_pid()
print(pid)
cpunums=getCpuNums()
top_info = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
if(len(top_info)!=0):
for x in top_info:
temp_list = x.split()
# print(temp_list[8])
if(temp_list[8]!=" "):
cpu = round(float(temp_list[8])/cpunums,2)
# print(cpu)
else:
cpu = 0.0
return cpu
else:
return 0.0
def getMemInfo():
# print('start get mem information....')
pid=get_pid()
# print(pid)
mem_info = util.shell('dumpsys meminfo %d |grep TOTAL:' %(int(pid))).stdout.readlines()
for x in mem_info:
temp_list = x.split()
mem=round(float(temp_list[1])/1024,1)
# print(mem)
return mem
#获取机型用作表格sheet
def getDevicesName():
devicesName = str(util.shell('getprop ro.product.model').stdout.read())
return devicesName
#获取pid
def get_pid():
# 正则匹配出package和activity的pid
pattern = re.compile(r"[a-zA-Z0-9\.]+=.[0-9\.]+")
package = util.shell('dumpsys activity top| grep ACTIVITY').stdout.read()
pid = pattern.findall(package.decode())[-1].split('=')[1]
# pid_info = util.shell('ps| grep %s' %(package_name)).stdout.readlines()
# print(pid_info)
# pid = pid_info[0].split()[1]
# print('pid为: %s' %(pid))
return pid
#获取uid
def get_uid():
cmd = 'cat /proc/'+ get_pid() + '/status'
uid_info = util.shell(cmd).stdout.readlines()
uid = uid_info[6].split()[1]
print('uid为:%s' %(uid))
return str(uid)
#上传流量,暂时不可用,需查下其他方式获取上行流量
def get_flow_send():
cmd = '"cat proc/net/xt_qtaguid/stats|grep '+'%s"'%get_uid()
print(cmd)
flow = util.shell(cmd).stdout.readlines()
print(flow)
if __name__ == "__main__":
print("Starting get top information...")
#get_flow_send()
#top()
getCpuInfo()
getMemInfo()
|
[
"denacsdfy@gmail.com"
] |
denacsdfy@gmail.com
|
850d09c0f348c17d5cf4d4f63e9c595609e66659
|
8364e4d23191ee535c163debffafa8418d705843
|
/test/test_v1beta1_cron_job_status.py
|
0240f917290f60dc3c62a8ed58c7e06548496912
|
[
"Apache-2.0"
] |
permissive
|
olitheolix/aiokubernetes
|
2bb6499030e2e6e9b7ca0db63c4441293d70a09b
|
266718b210dff2a9b2212183261ea89adf89115e
|
refs/heads/master
| 2020-03-21T23:02:30.484410
| 2018-10-20T19:33:01
| 2018-10-22T05:52:42
| 139,162,905
| 28
| 3
|
Apache-2.0
| 2018-10-22T05:52:51
| 2018-06-29T15:02:59
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import aiokubernetes
from aiokubernetes.models.v1beta1_cron_job_status import V1beta1CronJobStatus # noqa: E501
from aiokubernetes.rest import ApiException
class TestV1beta1CronJobStatus(unittest.TestCase):
"""V1beta1CronJobStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CronJobStatus(self):
"""Test V1beta1CronJobStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = aiokubernetes.models.v1beta1_cron_job_status.V1beta1CronJobStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"olitheolix@gmail.com"
] |
olitheolix@gmail.com
|
140c36e514ac1e06b410d5e548d03c864a0c432c
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/wicd/rev519-537/wicdMerge/wicd/backend.py
|
2cd969a4f2227e3f956a033686a6a1581ac2fd22
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
""" Backend manager for wicd.
Manages and loads the pluggable backends for wicd.
"""
import sys
import os
import wicd.wpath as wpath
from baseinterface import BaseInterface
class BackendManager (object) :
def __init__(self):
""" Initialize the backend manager. """
self.backend_dir = "backends"
self.__loaded_backends = {}
def _valid_backend(self, be_dir):
""" Make sure the backend is valid. """
access = os.access(be_dir, os.F_OK)
isdir = os.path.isdir(be_dir)
starts_with_be = os.path.basename(be_dir).startswith('be-')
return access and isdir and starts_with_be
def get_loaded_backends(self):
if self.__loaded_backends and not self.__loaded_backends is None:
return self.__loaded_backends
else:
return None
def get_backend_by_type(self, type):
return self.__loaded_backends[type]
def get_available_backend_modules(self):
""" Returns a list of all valid backends in the backend directory. """
be_list = []
for f in os.listdir(self.backend_dir):
if self._valid_backend(os.path.join(self.backend_dir, f)):
be_list.append(f[3:])
return be_list
def load_all_available_backends(self):
for backend in self.get_available_backend_modules():
print 'loading backend',backend
self.load_backend(backend)
def load_backend(self, backend_name):
""" Load and return a backend module.
Given a backend name be-foo, attempt to load a python module
in the backends directory called be-foo. The module must
include a certain set of classes and variables to be considered
valid.
"""
def fail(backend_name, reason):
print "failed to load backend %s: %s" % (backend_name, reason)
print 'trying to load backend %s' % backend_name
backend_path = os.path.join(self.backend_dir, 'be-' + backend_name)
if self._valid_backend(backend_path):
sys.path.insert(0, self.backend_dir)
backend = __import__('be-' + backend_name)
else:
fail(backend_name, 'invalid backend file.')
return None
new_backends = [ i for i in dir(backend.interface) if i.startswith('Backend') ]
for backend_class_name in new_backends:
backend_class = getattr(backend.interface, backend_class_name)
if issubclass(backend_class, BaseInterface):
self.__loaded_backends[backend_class.get_type()] = backend_class
print 'successfully loaded backend %s' % backend_class.__name__
else:
fail(backend_class, 'does not subclass BaseInterface')
if __name__ == "__main__":
print "main"
be = BackendManager()
print be.get_available_backend_modules()
be.load_all_available_backends()
print be.get_loaded_backends()
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
7b3a80399f925636af6425245743c95bcadcf42d
|
6a730375ce4b5af3d832f9b81a9164edd2cd2880
|
/igdb_match_names.py
|
2bc32d2990ffdbb6ecb7a6422fef92ee02bc1d57
|
[
"MIT"
] |
permissive
|
woctezuma/metacouncil-goty
|
b20fc76e8e8c4b15c159663b6181db0c31045682
|
bbbc718b2ae171d92bfb360527e0d6f41c332594
|
refs/heads/master
| 2023-09-04T09:50:35.309976
| 2023-02-05T18:48:57
| 2023-02-05T18:48:57
| 163,838,387
| 1
| 0
|
MIT
| 2023-09-14T17:51:15
| 2019-01-02T12:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 20,395
|
py
|
import copy
import time
from disqualify_vote import is_a_noisy_vote
from extend_igdb import extend_both_igdb_databases, extend_igdb_match_database
from igdb_databases import (
load_igdb_local_database,
load_igdb_match_database,
save_igdb_local_database,
save_igdb_match_database,
)
from igdb_look_up import look_up_game_name, wait_for_cooldown
from igdb_utils import get_pc_platform_no, get_pc_platform_range, get_steam_service_no
from load_ballots import load_ballots
def get_link_to_igdb_website(igdb_id, igdb_local_database, hide_dummy_app_id=True):
igdb_base_url = 'https://www.igdb.com/games/'
igdb_id_as_str = str(igdb_id)
igdb_data = igdb_local_database[igdb_id_as_str]
slug = igdb_data['slug']
if int(igdb_id) > 0:
link_to_store = (
'[URL=' + igdb_base_url + slug + '/]' + igdb_id_as_str + '[/URL]'
)
else:
if hide_dummy_app_id:
link_to_store = 'n/a'
else:
link_to_store = igdb_id_as_str
return link_to_store
def get_igdb_human_release_dates(igdb_id, igdb_local_database):
igdb_id_as_str = str(igdb_id)
igdb_data = igdb_local_database[igdb_id_as_str]
try:
human_release_dates = {
date['human']
for date in igdb_data['release_dates']
if 'human' in date and (date['platform'] in get_pc_platform_range())
}
except KeyError:
# Unknown release date
human_release_dates = [None]
if len(human_release_dates) > 0:
human_release_date_to_remember = max(human_release_dates)
else:
human_release_date_to_remember = None
return human_release_dates, human_release_date_to_remember
def get_igdb_release_years(igdb_data, target_release_year=None):
try:
release_years = {
date['y']
for date in igdb_data['release_dates']
if 'y' in date and (date['platform'] in get_pc_platform_range())
}
except KeyError:
# Unknown release date
release_years = [None]
year_to_remember = -1
if target_release_year is not None:
target_release_year_as_int = int(target_release_year)
if len(release_years) > 0:
if target_release_year_as_int in release_years:
year_to_remember = target_release_year_as_int
else:
the_most_recent_date = max(release_years)
year_to_remember = the_most_recent_date
if year_to_remember is None:
year_to_remember = -1
return release_years, year_to_remember
def format_game_name_for_igdb(raw_name, verbose=True):
formatted_game_name_for_igdb = raw_name
for character in ['®', '~', '\'', ': ', ' - ', '!', '™', ' / ']:
formatted_game_name_for_igdb = formatted_game_name_for_igdb.replace(
character,
' ',
)
formatted_game_name_for_igdb = formatted_game_name_for_igdb.strip()
if verbose:
print(
'Game name is reformatted from {} to {}'.format(
raw_name,
formatted_game_name_for_igdb,
),
)
return formatted_game_name_for_igdb
def match_names_with_igdb(
raw_votes,
release_year=None,
must_be_available_on_pc=True,
must_be_a_game=True,
goty_field='goty_preferences',
year_constraint='equality',
verbose=True,
):
seen_game_names = set()
igdb_match_database = {}
igdb_local_database = {}
num_requests = 0
start_time = time.time()
for voter in raw_votes:
for raw_name in raw_votes[voter][goty_field].values():
if raw_name not in seen_game_names:
seen_game_names.add(raw_name)
if not is_a_noisy_vote(raw_name):
formatted_game_name_for_igdb = format_game_name_for_igdb(raw_name)
igdb_matches = look_up_game_name(
game_name=formatted_game_name_for_igdb,
enforced_year=release_year,
must_be_available_on_pc=must_be_available_on_pc,
must_be_a_game=must_be_a_game,
year_constraint=year_constraint,
)
num_requests += 1
start_time = wait_for_cooldown(
num_requests=num_requests,
start_time=start_time,
)
try:
igdb_best_match = igdb_matches[0]
except IndexError:
print(f'Relaxing the year constraint for {raw_name}')
igdb_matches = look_up_game_name(
game_name=formatted_game_name_for_igdb,
enforced_year=None,
must_be_available_on_pc=must_be_available_on_pc,
must_be_a_game=must_be_a_game,
)
num_requests += 1
start_time = wait_for_cooldown(
num_requests=num_requests,
start_time=start_time,
)
try:
igdb_best_match = igdb_matches[0]
except IndexError:
print(
'Relaxing all of the constraints for {}'.format(
raw_name,
),
)
igdb_matches = look_up_game_name(
game_name=formatted_game_name_for_igdb,
enforced_year=None,
must_be_available_on_pc=False,
must_be_a_game=False,
)
num_requests += 1
start_time = wait_for_cooldown(
num_requests=num_requests,
start_time=start_time,
)
igdb_matched_ids = []
for element in igdb_matches:
igdb_id = element['id']
igdb_data = element
igdb_matched_ids.append(igdb_id)
igdb_local_database[igdb_id] = igdb_data
# Caveat: For now, matches returned by match_names_with_igdb() does not have the same structure as
# matches returned by precompute_matches(). cf. transform_structure_of_matches()
igdb_match_database[raw_name] = igdb_matched_ids
if verbose:
recently_matched_game_names = sorted(
[name for name in seen_game_names if not is_a_noisy_vote(name)],
)
if len(recently_matched_game_names) > 0:
s = [f'{i+1}) {name}' for i, name in enumerate(recently_matched_game_names)]
print('[Changelog]\n{}\n'.format('\n'.join(s)))
return igdb_match_database, igdb_local_database
def print_igdb_matches(
igdb_match_database,
igdb_local_database,
constrained_release_year=None,
year_constraint='equality',
):
sorted_input_names = sorted(igdb_match_database.keys())
for raw_name in sorted_input_names:
igdb_matched_ids = igdb_match_database[raw_name]
try:
igdb_best_matched_id = igdb_matched_ids[0]
except IndexError:
igdb_best_matched_id = None
if igdb_best_matched_id is not None:
igdb_data = igdb_local_database[str(igdb_best_matched_id)]
release_years, year_to_remember = get_igdb_release_years(
igdb_data,
target_release_year=constrained_release_year,
)
if len(release_years) > 1:
displayed_release_years = sorted(release_years)
print(f'[!]\tSeveral release years are found for {raw_name}.')
else:
try:
displayed_release_years = list(release_years)[0]
except IndexError:
displayed_release_years = None
if constrained_release_year is not None:
cleaned_release_years = [
int(year) for year in release_years if year is not None
]
if year_constraint == 'equality':
constraint_is_okay = any(
year == int(constrained_release_year)
for year in cleaned_release_years
)
elif year_constraint == 'minimum':
constraint_is_okay = any(
year >= int(constrained_release_year)
for year in cleaned_release_years
)
elif year_constraint == 'maximum':
constraint_is_okay = any(
year <= int(constrained_release_year)
for year in cleaned_release_years
)
else:
# There is an issue if a constrained release year is provided without a valid type of constraint.
constraint_is_okay = False
if not constraint_is_okay:
print(
'[!]\tRelease year(s) ({}) do not match the ballot year ({}, constraint:{}) for {}.'.format(
displayed_release_years,
constrained_release_year,
year_constraint,
raw_name,
),
)
print(
'\t{} ---> IGDB id: {}\t;\t{} ({})'.format(
raw_name,
igdb_data['id'],
igdb_data['name'],
displayed_release_years,
),
)
else:
print(f'[X]\t{raw_name}')
return
def merge_databases(new_database, previous_database):
merged_database = new_database
for element in previous_database:
if element not in merged_database:
merged_database[element] = previous_database[element]
return merged_database
def download_igdb_local_databases(
ballots,
release_year=None,
apply_hard_coded_extension_and_fixes=True,
extend_previous_databases=True,
must_be_available_on_pc=True,
must_be_a_game=True,
goty_field='goty_preferences',
year_constraint='equality',
verbose=True,
):
igdb_match_database, igdb_local_database = match_names_with_igdb(
ballots,
release_year=release_year,
must_be_available_on_pc=must_be_available_on_pc,
must_be_a_game=must_be_a_game,
goty_field=goty_field,
year_constraint=year_constraint,
)
# Merge with previous databases, if they were passed to the function as optional parameters
if extend_previous_databases:
try:
previous_igdb_match_database = load_igdb_match_database(
release_year=release_year,
)
except FileNotFoundError:
previous_igdb_match_database = {}
try:
previous_igdb_local_database = load_igdb_local_database(
release_year=release_year,
)
except FileNotFoundError:
previous_igdb_local_database = {}
igdb_match_database = merge_databases(
igdb_match_database,
previous_database=previous_igdb_match_database,
)
igdb_local_database = merge_databases(
igdb_local_database,
previous_database=previous_igdb_local_database,
)
# Save data before applying any hard-coded change
num_queries = 0
for voter_name in ballots:
for _game_position, game_name in ballots[voter_name][goty_field].items():
if not is_a_noisy_vote(game_name):
num_queries += 1
save_to_disk = bool(num_queries > 0)
if save_to_disk:
save_igdb_match_database(data=igdb_match_database, release_year=release_year)
save_igdb_local_database(data=igdb_local_database, release_year=release_year)
# Apply hard-coded changes: i) database extension and ii) fixes to name matching
if apply_hard_coded_extension_and_fixes:
igdb_match_database, igdb_local_database = extend_both_igdb_databases(
release_year=release_year,
igdb_match_database=igdb_match_database,
igdb_local_database=igdb_local_database,
verbose=verbose,
)
return igdb_match_database, igdb_local_database
def figure_out_ballots_with_missing_data(
ballots,
igdb_match_database=None,
release_year=None,
goty_field='goty_preferences',
verbose=False,
):
# The extended match database is loaded so that there is no IGDB query for games which are already manually matched.
# This means that we could work in offline mode once the manual matches cover all the empty results of IGDB queries.
#
# If you want to try again to automatically match these games, backup and delete the manual fixes to match database.
extended_igdb_match_database = extend_igdb_match_database(
release_year=release_year,
igdb_match_database=igdb_match_database,
verbose=verbose,
)
# Reference: https://stackoverflow.com/a/5105554
new_ballots = copy.deepcopy(ballots)
for voter_name in new_ballots:
for game_position, game_name in new_ballots[voter_name][goty_field].items():
if (
game_name in extended_igdb_match_database
and len(extended_igdb_match_database[game_name]) > 0
):
new_ballots[voter_name][goty_field][game_position] = ''
return new_ballots
def download_igdb_data_for_ballots_with_missing_data(
new_ballots,
release_year=None,
apply_hard_coded_extension_and_fixes=True,
must_be_available_on_pc=True,
must_be_a_game=True,
goty_field='goty_preferences',
year_constraint='equality',
verbose=False,
):
# Caveat: it is mandatory to set 'extend_previous_databases' to True, if you want to:
# - first download data for new ballots,
# - then merge the result with databases stored on the disk for the previously seen ballots,
# Otherwise, you will obtain incomplete databases (for new ballots), and overwrite the stored databases, likely
# losing progress in the process.
extend_previous_databases = True
igdb_match_database, igdb_local_database = download_igdb_local_databases(
new_ballots,
release_year=release_year,
apply_hard_coded_extension_and_fixes=apply_hard_coded_extension_and_fixes,
extend_previous_databases=extend_previous_databases,
must_be_available_on_pc=must_be_available_on_pc,
must_be_a_game=must_be_a_game,
goty_field=goty_field,
year_constraint=year_constraint,
verbose=verbose,
)
return igdb_match_database, igdb_local_database
def load_igdb_local_databases(
ballots,
release_year=None,
apply_hard_coded_extension_and_fixes=True,
must_be_available_on_pc=True,
must_be_a_game=True,
goty_field='goty_preferences',
year_constraint='equality',
verbose=False,
):
try:
igdb_match_database = load_igdb_match_database(release_year=release_year)
except FileNotFoundError:
igdb_match_database = {}
# Download missing data for some ballots
new_ballots = figure_out_ballots_with_missing_data(
ballots=ballots,
igdb_match_database=igdb_match_database,
release_year=release_year,
goty_field=goty_field,
verbose=verbose,
)
(
igdb_match_database,
igdb_local_database,
) = download_igdb_data_for_ballots_with_missing_data(
new_ballots=new_ballots,
release_year=release_year,
apply_hard_coded_extension_and_fixes=apply_hard_coded_extension_and_fixes,
must_be_available_on_pc=must_be_available_on_pc,
must_be_a_game=must_be_a_game,
goty_field=goty_field,
year_constraint=year_constraint,
verbose=verbose,
)
# Apply hard-coded changes: i) database extension and ii) fixes to name matching
if apply_hard_coded_extension_and_fixes:
igdb_match_database, igdb_local_database = extend_both_igdb_databases(
release_year=release_year,
igdb_match_database=igdb_match_database,
igdb_local_database=igdb_local_database,
verbose=verbose,
)
if verbose:
print_igdb_matches(
igdb_match_database,
igdb_local_database,
constrained_release_year=release_year,
year_constraint=year_constraint,
)
return igdb_match_database, igdb_local_database
def transform_structure_of_matches(igdb_match_database, igdb_local_database):
# Retro-compatibility with code written for SteamSpy
matches = {}
for raw_name in igdb_match_database:
igdb_matched_ids = [str(igdb_id) for igdb_id in igdb_match_database[raw_name]]
igdb_matched_pc_release_dates = []
for igdb_id_as_str in igdb_matched_ids:
try:
release_dates = igdb_local_database[igdb_id_as_str]['release_dates']
except KeyError:
continue
for element in release_dates:
if element['platform'] == get_pc_platform_no():
release_date = element['human']
igdb_matched_pc_release_dates.append(release_date)
steam_matched_ids = []
for igdb_id_as_str in igdb_matched_ids:
try:
external_games = igdb_local_database[igdb_id_as_str]['external_games']
except KeyError:
continue
for element in external_games:
if element['category'] == get_steam_service_no():
steam_app_id = element['uid']
steam_matched_ids.append(steam_app_id)
igdb_matched_slugs = [
igdb_local_database[igdb_id_as_str]['slug']
for igdb_id_as_str in igdb_matched_ids
]
igdb_matched_names = [
igdb_local_database[igdb_id_as_str]['name']
for igdb_id_as_str in igdb_matched_ids
]
dummy_distances = [None for _ in igdb_matched_ids]
element = {}
element['input_name'] = raw_name
element[
'matched_appID'
] = igdb_matched_ids # For IGDB, this is IGDB ID. For SteamSpy, this is Steam appID.
element['matched_pc_release_date'] = igdb_matched_pc_release_dates
element[
'matched_steam_appID'
] = steam_matched_ids # Steam urls use an appID, which is the game ID on the store
element[
'matched_slug'
] = igdb_matched_slugs # IGDB urls rely on the slug, which is an url-friendly game name.
element['matched_name'] = igdb_matched_names
element['match_distance'] = dummy_distances
matches[raw_name] = element
return matches
def main():
from load_ballots import get_ballot_file_name
ballot_year = '2018'
input_filename = get_ballot_file_name(ballot_year, is_anonymized=True)
ballots = load_ballots(input_filename)
release_year = ballot_year
# Before manual fixes
igdb_match_database, igdb_local_database = load_igdb_local_databases(
ballots,
release_year=release_year,
apply_hard_coded_extension_and_fixes=False,
)
print_igdb_matches(
igdb_match_database,
igdb_local_database,
constrained_release_year=release_year,
)
# After manual fixes
igdb_match_database, igdb_local_database = load_igdb_local_databases(
ballots,
release_year=release_year,
apply_hard_coded_extension_and_fixes=True,
)
print_igdb_matches(
igdb_match_database,
igdb_local_database,
constrained_release_year=release_year,
)
return True
if __name__ == '__main__':
main()
|
[
"woctezuma@users.noreply.github.com"
] |
woctezuma@users.noreply.github.com
|
fde44d5d006e5ec5248ff47a658973924c676d68
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L36/36-35_MD_NVT_rerun/set_1ns_equi_1.py
|
0ea1c77d5e2d79791410687f4e64ca8b29dd7c03
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L36/MD_NVT_rerun/ti_one-step/36_35/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../36-35_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
b0c36703950d6c12151ca5149dff452c3190ec04
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/138/usersdata/201/53117/submittedfiles/volumeTV.py
|
8edb280ea7ea90acf2183f107078000ec0dd653e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
-*- coding: utf-8 -*-
V=int(input('Volume inicial:'))
T=int(input('Variação do volume:'))
soma=V
for i in range(1,T+1,1):
n=int(input('Muudança de volume:'))
soma=soma+1
if soma>100:
soma=100
elif soma<0:
soma=0
print(soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4e7f60a0275bdcfe3703e3ac63914ff611e793df
|
1c9abb4b27517d99d579a4284035f39e092033e5
|
/0x11-python-network_1/2-post_email.py
|
d0dc27e1da547b11f12ea8805e2802503a9fc511
|
[] |
no_license
|
95ktsmith/holbertonschool-higher_level_programming
|
15160e6e76f7f6f7e4ddfd2266cf9bf60fddbcb5
|
c845402a9b4c7ad9d1c1b1a983f9fb7a4727209d
|
refs/heads/master
| 2022-12-19T05:56:00.288537
| 2020-09-24T23:32:20
| 2020-09-24T23:32:20
| 259,328,593
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
#!/usr/bin/python3
""" POST an email """
if __name__ == "__main__":
from urllib import request
from urllib import parse
from sys import argv
data = parse.urlencode({'email': argv[2]}).encode('ascii')
req = request.Request(argv[1], data)
with request.urlopen(req) as response:
print(response.read().decode('utf-8'))
|
[
"95ktsmith@gmail.com"
] |
95ktsmith@gmail.com
|
c65f1abb5a53c4cd127b36179a9397dbb9797578
|
93f200a88e6084be9dad4422195f5e7af6eecb68
|
/src/pymor/analyticalproblems/text.py
|
d1bb4d4fe6c8f6bfa6f6d941c81d83651fca99de
|
[
"BSD-2-Clause"
] |
permissive
|
mahgadalla/pymor
|
dfc163b396c15dec05ea519ee0e9b3277ba5c84f
|
ee2806b4c93748e716294c42454d611415da7b5e
|
refs/heads/master
| 2020-03-21T13:08:00.819939
| 2018-06-15T12:19:00
| 2018-06-18T08:08:07
| 138,589,646
| 1
| 0
| null | 2018-06-25T12:05:39
| 2018-06-25T12:05:39
| null |
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.core.defaults import defaults
from pymor.domaindescriptions.basic import RectDomain
from pymor.functions.basic import ConstantFunction, LincombFunction
from pymor.functions.bitmap import BitmapFunction
from pymor.parameters.functionals import ProjectionParameterFunctional
from pymor.parameters.spaces import CubicParameterSpace
@defaults('font_name')
def text_problem(text='pyMOR', font_name=None):
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tempfile import NamedTemporaryFile
font_list = [font_name] if font_name else ['DejaVuSansMono.ttf', 'VeraMono.ttf', 'UbuntuMono-R.ttf', 'Arial.ttf']
font = None
for filename in font_list:
try:
font = ImageFont.truetype(filename, 64) # load some font from file of given size
except (OSError, IOError):
pass
if font is None:
raise ValueError('Could not load TrueType font')
size = font.getsize(text) # compute width and height of rendered text
size = (size[0] + 20, size[1] + 20) # add a border of 10 pixels around the text
def make_bitmap_function(char_num): # we need to genereate a BitmapFunction for each character
img = Image.new('L', size) # create new Image object of given dimensions
d = ImageDraw.Draw(img) # create ImageDraw object for the given Image
# in order to position the character correctly, we first draw all characters from the first
# up to the wanted character
d.text((10, 10), text[:char_num + 1], font=font, fill=255)
# next we erase all previous character by drawing a black rectangle
if char_num > 0:
d.rectangle(((0, 0), (font.getsize(text[:char_num])[0] + 10, size[1])), fill=0, outline=0)
# open a new temporary file
with NamedTemporaryFile(suffix='.png') as f: # after leaving this 'with' block, the temporary
# file is automatically deleted
img.save(f, format='png')
return BitmapFunction(f.name, bounding_box=[(0, 0), size], range=[0., 1.])
# create BitmapFunctions for each character
dfs = [make_bitmap_function(n) for n in range(len(text))]
# create an indicator function for the background
background = ConstantFunction(1., 2) - LincombFunction(dfs, np.ones(len(dfs)))
# form the linear combination
dfs = [background] + dfs
coefficients = [1] + [ProjectionParameterFunctional('diffusion', (len(text),), (i,)) for i in range(len(text))]
diffusion = LincombFunction(dfs, coefficients)
return StationaryProblem(
domain=RectDomain(dfs[1].bounding_box, bottom='neumann'),
neumann_data=ConstantFunction(-1., 2),
diffusion=diffusion,
parameter_space=CubicParameterSpace(diffusion.parameter_type, 0.1, 1.)
)
|
[
"stephanrave@uni-muenster.de"
] |
stephanrave@uni-muenster.de
|
48bd9280efa0de89650a0336d76d194c09989518
|
dd949f215d968f2ee69bf85571fd63e4f085a869
|
/systems/css-2011-teams/blue/subarchitectures/planner.sa/src/base_planners/downward/plan.py
|
b98bbe87d41a3acc5beeb19f1f35a0306dfed8db
|
[] |
no_license
|
marc-hanheide/cogx
|
a3fd395805f1b0ad7d713a05b9256312757b37a9
|
cb9a9c9cdfeba02afac6a83d03b7c6bb778edb95
|
refs/heads/master
| 2022-03-16T23:36:21.951317
| 2013-12-10T23:49:07
| 2013-12-10T23:49:07
| 219,460,352
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
#! /usr/bin/env python2.5
import os
import subprocess
import sys
import shutil
path = os.path.abspath(os.path.dirname(__file__)) # where this file resides
def main():
def run(*args, **kwargs):
input = kwargs.pop("input", None)
output = kwargs.pop("output", None)
assert not kwargs
redirections = {}
if input:
redirections["stdin"] = open(input)
if output:
redirections["stdout"] = open(output, "w")
# print args, redirections
subprocess.check_call(args, **redirections)
if len(sys.argv) == 3:
domain, problem = sys.argv[1:]
# run translator
run(os.path.join(path, "translate/translate.py"), domain, problem)
else:
domain, problem, mutex = sys.argv[1:]
# run translator
run(os.path.join(path, "translate/translate.py"), domain, problem, "-m", mutex)
# run preprocessing
run(os.path.join(path, "preprocess/preprocess"), input="output.sas")
# run search
run(os.path.join(path, "search/search"), "yY", input="output")
# epsilonize plan
# shutil.move("%s.1" % result_name, result_name)
# run("search/epsilonize_plan.py", input=result_name, output="%s_eps" % result_name)
# shutil.move("%s_eps" % result_name, result_name)
if __name__ == "__main__":
main()
|
[
"marc@hanheide.net"
] |
marc@hanheide.net
|
4ea429fb0979407907d6336d069e7fbbe0fd2e87
|
8cde806e824208949fd9e34806445d05114860cc
|
/detools/compression/heatshrink.py
|
c7f535d58b3d9ff41dbd3c60cd647beae73eb8f7
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
tips367/detools
|
80003facc744147c39f339cfe20b2d3eb8dccd70
|
21092202cdefc3358f450801be0e1855ea06a18d
|
refs/heads/master
| 2022-12-29T14:33:08.882316
| 2020-10-16T15:20:08
| 2020-10-16T15:20:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
"""Heatshrink wrapper.
"""
import bitstruct
from heatshrink2.core import Writer
from heatshrink2.core import Reader
from heatshrink2.core import Encoder
def pack_header(window_sz2, lookahead_sz2):
return bitstruct.pack('u4u4', window_sz2 - 4, lookahead_sz2 - 3)
def unpack_header(data):
window_sz2, lookahead_sz2 = bitstruct.unpack('u4u4', data)
return window_sz2 + 4, lookahead_sz2 + 3
class HeatshrinkCompressor(object):
def __init__(self):
window_sz2 = 8
lookahead_sz2 = 7
self._data = pack_header(window_sz2, lookahead_sz2)
self._encoder = Encoder(Writer(window_sz2=window_sz2,
lookahead_sz2=lookahead_sz2))
def compress(self, data):
compressed = self._encoder.fill(data)
if self._data:
compressed = self._data + compressed
self._data = b''
return compressed
def flush(self):
return self._data + self._encoder.finish()
class HeatshrinkDecompressor(object):
def __init__(self, number_of_bytes):
self._number_of_bytes_left = number_of_bytes
self._data = b''
self._encoder = None
def decompress(self, data, size):
if self._encoder is None:
if not data:
return b''
window_sz2, lookahead_sz2 = unpack_header(data[:1])
self._encoder = Encoder(Reader(window_sz2=window_sz2,
lookahead_sz2=lookahead_sz2))
data = data[1:]
self._number_of_bytes_left -= 1
if self._number_of_bytes_left > 0:
self._data += self._encoder.fill(data)
self._number_of_bytes_left -= len(data)
if self._number_of_bytes_left == 0:
self._data += self._encoder.finish()
self._number_of_bytes_left = -1
decompressed = self._data[:size]
self._data = self._data[size:]
return decompressed
@property
def needs_input(self):
return self._data == b'' and not self.eof
@property
def eof(self):
return self._number_of_bytes_left == -1 and self._data == b''
|
[
"erik.moqvist@gmail.com"
] |
erik.moqvist@gmail.com
|
fa1b8dd2c5c5d52eb24c24a8d293b35c86edfe04
|
13c111d2c405fef3b074fd7f7ed7cd06cc05084a
|
/graphql_start/migrations/0003_auto_20181101_1259.py
|
516bc9625590708dfdd411fe9a0b44ec7caf73ea
|
[] |
no_license
|
Dimas4/GraphQL-Django
|
7da5f73cb0427f4e7bbf5a48e1c4bd45fc35cfb6
|
3fb6919cd2c30848e08e251279e0445dab6f8247
|
refs/heads/master
| 2020-04-04T09:48:38.987618
| 2018-11-09T09:33:15
| 2018-11-09T09:33:15
| 155,831,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
# Generated by Django 2.1.2 on 2018-11-01 12:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('graphql_start', '0002_auto_20181101_1259'),
]
operations = [
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='graphql_start.Category'),
preserve_default=False,
),
migrations.AlterField(
model_name='article',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"ivanshatukho@yandex.ru"
] |
ivanshatukho@yandex.ru
|
87d539feff1c1a1de2c0a589f30bbf9f8563d755
|
055581f9d6c81eda2f73ea05b90b7a2256da1219
|
/parts/zodiac/pyramid/tests/pkgs/forbiddenapp/__init__.py
|
62e9ac8ce0495f112404a61b8044172505e83eb9
|
[] |
no_license
|
Tosti770/zodiac
|
488a91c3e872a62d09a3ebb22a951dadcbd1c2df
|
af0380e20eb90699a84e3b7c6cb2085a1fb81667
|
refs/heads/master
| 2020-04-13T06:54:26.333228
| 2014-03-03T20:10:11
| 2014-03-03T20:10:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
/home/ruben/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/tests/pkgs/forbiddenapp/__init__.py
|
[
"ruben_tc@hotmail.es"
] |
ruben_tc@hotmail.es
|
77aa658e02bb132300c8e65d1c3916d6b8025cbd
|
0956319ecf55da86b05237e2a26a0ebae41fe884
|
/scrape-circuit-patch-share.py
|
d8b01153c1d2393e0658f7d14b0b738e04634d4a
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
SpotlightKid/personal-scripts
|
65492ea919ec3634daa40eac6803067e390ffa79
|
8e93005e740987adc5a8403ab80e0049998bfbbe
|
refs/heads/master
| 2023-08-20T16:27:25.717092
| 2023-08-19T11:48:42
| 2023-08-19T12:37:26
| 167,387,617
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
#!/usr/bin/env python2
"""Scrape Novation Circuit SysEx patch data from HTML saved from Circuit Patch Share site."""
import argparse
import logging
import os
import re
import sys
from os.path import exists, join
from base64 import b64decode
log = logging.getLogger('scrape-circuit-patch-share')
def safe_name(name):
return "".join(c if re.match(r'\w', c) else '_' for c in name)
def unescape(match):
return chr(int(match.group(1)))
def scrape_patches(html):
html = re.sub('&#(\d+);', unescape, html)
patches = re.findall(r"sendPatchToCircuit\('(.*?)',\s*atob\('(.*?)'\),\s*(\d+)\)", html)
result = {}
for name, patch, synth in patches:
name = name.strip()
if name in result:
continue
result[(name, int(synth))] = bytearray([int(c) for c in b64decode(patch).split(b',')])
return result
def main(args=None):
ap = argparse.ArgumentParser(description=__doc__.splitlines()[0])
ap.add_argument('-v', '--verbose', action="store_true",
help="Be verbose")
ap.add_argument('-o', '--output-dir', metavar='DIR', default=os.getcwd(),
help="Output directory (default: current directory)")
ap.add_argument('html', help="HTML input file")
args = ap.parse_args(args)
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO)
with open(args.html) as fp:
html = fp.read()
patches = scrape_patches(html)
log.info("Found %i patches.", len(patches))
for i, ((name, synth), data) in enumerate(sorted(patches.items())):
outdir = join(args.output_dir, "Synth %i" % (synth + 1,))
if not exists(outdir):
os.makedirs(outdir)
outfn = join(outdir, "%s.syx" % safe_name(name))
log.info("Writing patch '%s' to '%s'...", name, outfn)
data[7] = synth
with open(outfn, 'wb') as fp:
fp.write(patches[(name, synth)])
log.info("%i patch files written.", i + 1)
return 0
if __name__ == '__main__':
sys.exit(main() or 0)
|
[
"chris@chrisarndt.de"
] |
chris@chrisarndt.de
|
5226b1f41f04ac77a049d211af76f2e57c43105c
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pandas/tests/dtypes/test_concat.py
|
02daa185b1cdb687219a6be79e0400731acdfc72
|
[
"MIT"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684
| 2023-06-26T00:53:53
| 2023-06-26T00:53:53
| 94,152,032
| 14
| 0
|
MIT
| 2023-09-04T02:53:29
| 2017-06-13T00:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
import pytest
import pandas.core.dtypes.concat as _concat
from pandas import DatetimeIndex, Period, PeriodIndex, Series, TimedeltaIndex
@pytest.mark.parametrize(
"to_concat, expected",
[
# int/float/str
([["a"], [1, 2]], ["i", "object"]),
([[3, 4], [1, 2]], ["i"]),
([[3, 4], [1, 2.1]], ["i", "f"]),
# datetimelike
([DatetimeIndex(["2011-01-01"]), DatetimeIndex(["2011-01-02"])], ["datetime"]),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 days"])], ["timedelta"]),
# datetimelike object
(
[
DatetimeIndex(["2011-01-01"]),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime", "datetime64[ns, US/Eastern]"],
),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime64[ns, Asia/Tokyo]", "datetime64[ns, US/Eastern]"],
),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 hours"])], ["timedelta"]),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
TimedeltaIndex(["1 days"]),
],
["datetime64[ns, Asia/Tokyo]", "timedelta"],
),
],
)
def test_get_dtype_kinds(index_or_series, to_concat, expected):
to_concat_klass = [index_or_series(c) for c in to_concat]
result = _concat.get_dtype_kinds(to_concat_klass)
assert result == set(expected)
@pytest.mark.parametrize(
"to_concat, expected",
[
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="M")],
["period[M]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="M")]),
],
["period[M]"],
),
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="D")],
["period[M]", "period[D]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="D")]),
],
["period[M]", "period[D]"],
),
],
)
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat.get_dtype_kinds(to_concat)
assert result == set(expected)
|
[
"ulcamilo@gmail.com"
] |
ulcamilo@gmail.com
|
986576fea2470a7c79037f9f2e1ec6d1f08251f2
|
b83a23fa50e8f1ca6ce1fb3b550e6ceb1b513261
|
/aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddCasterComponentRequest.py
|
c939357ca0b2390c92cbbcb114802eddb670e807
|
[
"Apache-2.0"
] |
permissive
|
sunfuze/aliyun-openapi-python-sdk
|
c9f8143cf1ceac1bdd09f36d5f4493a510f48a0b
|
09910c57081f207da294d6d2fe981f7f913bc501
|
refs/heads/master
| 2021-03-24T12:01:12.107284
| 2018-01-18T06:32:06
| 2018-01-18T06:32:06
| 118,209,677
| 1
| 0
| null | 2018-01-20T04:41:58
| 2018-01-20T04:41:58
| null |
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddCasterComponentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCasterComponent')
def get_ImageLayerContent(self):
return self.get_query_params().get('ImageLayerContent')
def set_ImageLayerContent(self,ImageLayerContent):
self.add_query_param('ImageLayerContent',ImageLayerContent)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_ComponentLayer(self):
return self.get_query_params().get('ComponentLayer')
def set_ComponentLayer(self,ComponentLayer):
self.add_query_param('ComponentLayer',ComponentLayer)
def get_ComponentName(self):
return self.get_query_params().get('ComponentName')
def set_ComponentName(self,ComponentName):
self.add_query_param('ComponentName',ComponentName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_ComponentType(self):
return self.get_query_params().get('ComponentType')
def set_ComponentType(self,ComponentType):
self.add_query_param('ComponentType',ComponentType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_LocationId(self):
return self.get_query_params().get('LocationId')
def set_LocationId(self,LocationId):
self.add_query_param('LocationId',LocationId)
def get_Effect(self):
return self.get_query_params().get('Effect')
def set_Effect(self,Effect):
self.add_query_param('Effect',Effect)
def get_TextLayerContent(self):
return self.get_query_params().get('TextLayerContent')
def set_TextLayerContent(self,TextLayerContent):
self.add_query_param('TextLayerContent',TextLayerContent)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
872f46b1d5265e8af1c408c23647f1e3647142a3
|
29e91d422f0fcad92f0e25b3dbb9efd39dc01162
|
/electronic-station/weak-point.py
|
63745fe8a2295a4af4e98785b732a6fed26e2473
|
[] |
no_license
|
cielavenir/checkio
|
c206410b7d8d368e80ad0f66f6314097bd900bcd
|
e2dfcdef75cd68ca3cced159225b5433570bd85b
|
refs/heads/master
| 2021-01-22T20:34:29.899146
| 2018-02-22T15:16:21
| 2018-02-22T15:16:21
| 85,328,995
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
w=lambda m:min(list(range(len(m))),key=lambda i:sum(m[i][j]for j in range(len(m))))
weak_point=lambda m:[w(m),w(list(zip(*m)))]
if __name__ == '__main__':
assert isinstance(weak_point([[1]]), (list, tuple)), "The result should be a list or a tuple"
assert list(weak_point([[7, 2, 7, 2, 8],
[2, 9, 4, 1, 7],
[3, 8, 6, 2, 4],
[2, 5, 2, 9, 1],
[6, 6, 5, 4, 5]])) == [3, 3], "Example"
assert list(weak_point([[7, 2, 4, 2, 8],
[2, 8, 1, 1, 7],
[3, 8, 6, 2, 4],
[2, 5, 2, 9, 1],
[6, 6, 5, 4, 5]])) == [1, 2], "Two weak point"
assert list(weak_point([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])) == [0, 0], "Top left"
|
[
"cielartisan@gmail.com"
] |
cielartisan@gmail.com
|
b548121d1bdde5836f7a4a846c7841648d2592bc
|
e29f8c29a993156b7de7b0451d63ad8cca51c9a6
|
/zajecia10/czas_godzina1.py
|
cd07774d76e7b37bd5bcebda52c33b00869cbffd
|
[] |
no_license
|
remekwilk/python_basic
|
d898ad26aba809eb14ebed9d94bd93db69154ffa
|
af145a9711dabca232dc5f5be8fe4c407a5fda54
|
refs/heads/master
| 2020-05-03T14:42:19.523070
| 2020-04-11T20:42:03
| 2020-04-11T20:42:03
| 176,701,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
from datetime import time
samo_poludnie = time(12)
print(samo_poludnie)
kwadrans_po_osmej = time(8, 15)
print(kwadrans_po_osmej)
usain_bolt_na_100m = time(0, 0, 9, 580000) # ostatni argument jest w mikrosekundach
usain_bolt_na_100m = time(second=9, microsecond=580000) # ostatni argument jest w mikrosekundach
print(usain_bolt_na_100m)
# UWAGA! Ten obiekt nie umożliwia pobrania aktualnej godziny
|
[
"remekwilk@gmail.com"
] |
remekwilk@gmail.com
|
f2355712075a733730ccd7d93e61854573372a8e
|
769f6d88fd777459eb60eb1bbb0fba17cb20d963
|
/Chapter05/05_01_Robots.py
|
cbb36dfa52b7f3c567c4f06bfda9bf6e6ebc3cb9
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Web-Scraping-Cookbook
|
141379d09abe2c7d8f408858a2eb44ff0fe3ef26
|
030eb974ba1437b2590b59d38f19fb697bbf9d4c
|
refs/heads/master
| 2023-02-16T04:29:49.942243
| 2023-01-30T04:19:03
| 2023-01-30T04:19:03
| 120,744,571
| 115
| 105
|
MIT
| 2019-10-03T17:38:37
| 2018-02-08T10:08:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
from reppy.robots import Robots
url = "http://www.amazon.com"
robots = Robots.fetch(url + "/robots.txt")
paths = [
'/',
'/gp/dmusic/',
'/gp/dmusic/promotions/PrimeMusic/',
'/gp/registry/wishlist/'
]
for path in paths:
print("{0}: {1}".format(robots.allowed(path, '*'), url + path))
|
[
"packt.danishs@gmail.com"
] |
packt.danishs@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.