blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59c3785a4ac2ee9b31690323dc85dd3b30e8673e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/get_20200805213236.py
|
40816acd83fa8a37b6e300d4e36406c9b1c906c9
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
def produce(num1,num2):
totalValue = 0
for i in range(num1):
print(i)
totalValue +=num2
print(totalValue)
produce(2,3)
# 4513 = 4 *
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
6a7d2bc23a45d9781a2a50fbd1c51140a331df8f
|
8efe56ee34c455a6b1336897f6d457acbc9c10f9
|
/examples/tf/trpo_gym_tf_cartpole.py
|
d68045cd1292b920783ca7aa1a65100b8b7b1e9b
|
[
"MIT"
] |
permissive
|
neurips2020submission11699/metarl
|
ab18d11e708bf569d76cb2fab2bcce089badd111
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
refs/heads/master
| 2022-10-15T22:03:09.948673
| 2020-06-11T19:22:55
| 2020-06-11T19:30:58
| 268,410,657
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
#!/usr/bin/env python3
"""An example to train a task with TRPO algorithm."""
import gym
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = MetaRLEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=200,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=4000)
trpo_gym_tf_cartpole()
|
[
"neurips2020submission11699@gmail.com"
] |
neurips2020submission11699@gmail.com
|
91ae813803ef41b2393de367a4d0b898cf8d03a7
|
caaa1c57129a3e2369e4d6eeda46a94247c686d6
|
/flight/migrations/0002_auto_20180921_1020.py
|
9af863ff7971c398ad3a5436ebedac81b720fbb6
|
[] |
no_license
|
Sundarmax/Ariline--Django-REST
|
3884c14ab8440f809c1dbac7d91b7349a9e4d3a0
|
131f875f58c94f0c297a2b82c31c4880d7f5de08
|
refs/heads/master
| 2020-03-31T02:31:57.754379
| 2018-10-06T10:16:03
| 2018-10-06T10:16:03
| 151,828,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Generated by Django 2.1.1 on 2018-09-21 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flight', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flight',
name='no_seats',
field=models.IntegerField(),
),
]
|
[
"sundar.info22@gmail.com"
] |
sundar.info22@gmail.com
|
aa7516b3f811b25c098019f35297cf83e86f947f
|
ac54aa0127a47fb59211fba9e6cb8431d9d864cd
|
/apps/post/api.py
|
35052cf6869626cc68264ddbd273a9f3f4d61940
|
[] |
no_license
|
xiringlama/manutd.org.np
|
8919e3c1ad0494f88b819089686a756d67d38598
|
f394f16edb96c05e2e864dcec1ec52532cd35ac2
|
refs/heads/master
| 2021-07-12T00:33:17.197706
| 2017-10-16T14:45:10
| 2017-10-16T14:45:10
| 107,222,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from rest_framework import viewsets, mixins
from apps.key.permissions import DistributedKeyAuthentication
from apps.post.models import Post
from apps.post.serializers import PostSerializer
class PostViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = PostSerializer
queryset = Post.objects.filter(status='Published')
# permission_classes = (DistributedKeyAuthentication,)
|
[
"roshanshrestha01@gmail.com"
] |
roshanshrestha01@gmail.com
|
ac82a99b56e519a3667ee7e102affd07d4921e27
|
38ecf426f34b025b70208faf5d7de03d7ce1e7f3
|
/Loan.py
|
04b28028a80419f800080a9e8ca9e2d6c3e6abb6
|
[] |
no_license
|
ic1396/LearnPython
|
3ed6805e0cfcc622a8376084715f8c5fe3db8058
|
faabfecace5dd2ebf28ad75f35c61a29dee801ee
|
refs/heads/master
| 2021-11-22T23:26:10.085015
| 2021-11-06T06:48:19
| 2021-11-06T06:48:19
| 89,790,705
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
#!/usr/bin/python3
# 《Python语言程序设计》程序清单7-8
# Programed List 7-8
# 示例:类的抽象与封装 利息类
class Loan:
def __init__(self, annualInterestRate = 2.5,
numberOfYears = 1, loanAmount = 1000, borrower = " "):
self.__annualInterestRate = annualInterestRate
self.__numberOfYears = numberOfYears
self.__loanAmount = loanAmount
self.__borrower = borrower
def getAnnualInterestRate(self):
return self.__annualInterestRate
def getNumberOfYears(self):
return self.__numberOfYears
def getLoanAmount(self):
return self.__loanAmount
def getBorrower(self):
return self.__borrower
def setAnnualInterestRate(self, annualInterestRate):
self.__annualInterestRate = annualInterestRate
def setNumberOfYears(self, numberOfYears):
self.__numberOfYears = numberOfYears
def setLoanAmount(self, loanAmount):
self.__loanAmount = loanAmount
def setBorrower(self, borrower):
self.__borrower = borrower
def getMonthlyPayment(self):
monthlyInterestRate = self.__annualInterestRate / 1200
monthlyPayment = self.__loanAmount * monthlyInterestRate / \
(1 - (1 / (1 + monthlyInterestRate) ** (self.__numberOfYears * 12)))
return monthlyPayment
def getTotalPayment(self):
totalPayment = self.getMonthlyPayment() * self.__numberOfYears * 12
return totalPayment
|
[
"redsirius@foxmail.com"
] |
redsirius@foxmail.com
|
6a7583878c83c37b2fc3a1416f0088ec77d2d1b2
|
0c70dcec22a090e70b1f20613ea6e0a64fd9a037
|
/GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/frame/methods/test_pop.py
|
8029640b10a0a2d2d7b0862092fba802273d2e96
|
[
"MIT"
] |
permissive
|
payiz-asj/Gis
|
82c1096d830878f62c7a0d5dfb6630d4e4744764
|
3d315fed93e2ab850b836ddfd7a67f5618969d10
|
refs/heads/main
| 2023-06-27T15:25:17.301154
| 2021-08-03T10:02:58
| 2021-08-03T10:02:58
| 392,269,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFramePop:
def test_pop(self, float_frame):
float_frame.columns.name = "baz"
float_frame.pop("A")
assert "A" not in float_frame
float_frame["foo"] = "bar"
float_frame.pop("foo")
assert "foo" not in float_frame
assert float_frame.columns.name == "baz"
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
b = a.pop("B")
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
tm.assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=["X", "Y"], name="B") + 1
tm.assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
assert type(res) == DataFrame
assert len(res) == 2
assert len(df.columns) == 1
assert "b" in df.columns
assert "a" not in df.columns
assert len(df.index) == 2
|
[
"1778029840@qq.com"
] |
1778029840@qq.com
|
aa9a0e1ec0fb65029c338c1783ad70bdc8b72522
|
39c99883c3e55c0a0a7684fc5fd89c767ea93cc8
|
/model.py
|
1ba435317b63996e67640f13e3a5cb9aac440e01
|
[] |
no_license
|
shawntan/billion-word-imputation
|
872f478926966aa17e44a1738c58fdb90681a552
|
ea581253537ad76e89ec5eaa0cf80d57d61121e4
|
refs/heads/master
| 2020-04-09T11:14:02.828445
| 2014-11-21T09:32:43
| 2014-11-21T09:32:43
| 22,462,900
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,425
|
py
|
# coding=utf-8
import theano
import sys
import theano.tensor as T
import numpy as np
from theano_toolkit import utils as U
from theano_toolkit import updates
import cPickle as pickle
from numpy_hinton import print_arr
from theano.printing import Print
from vocab import read_file
def create_vocab_vectors(vocab2id,size):
V = U.create_shared(U.initial_weights(len(vocab2id) + 1,size))
V_b = U.create_shared(U.initial_weights(len(vocab2id) + 1))
return V,V_b
def recurrent_combine(X,V,V_b,W_input,b_input,W_state_p,b_state_p,b_state,W_input_hidden,W_state_p_hidden):
def step(curr_input,state_p):
# Build next layer
state = T.dot(curr_input,W_input) + T.dot(state_p,W_state_p) + b_state
state = T.tanh(state)
# RAE stuff
rep_word_vec = T.dot(state,W_input.T) + b_input
rep_curr_input = T.dot(rep_word_vec,V.T) + V_b
rep_state_p = T.dot(state,W_state_p.T) + b_state_p
# Contributions to predictive hidden layer
hidden_partial = T.dot(state_p,W_state_p_hidden) + T.dot(curr_input,W_input_hidden)
return state,rep_curr_input,rep_state_p,hidden_partial
[states,rep_inputs,rep_states,hidden_partials],_ = theano.scan(
step,
sequences = [X[1:]],
outputs_info = [X[0],None,None,None]
)
return states,T.nnet.softmax(rep_inputs),rep_states,hidden_partials
def missing_cost(scores,Y):
probs = T.nnet.softmax(scores)[0]
total_scores_diff = -T.log(probs[Y])
"""
label_score = scores[Y]
scores_diff = -(label_score - (scores + 1))
scores_diff = scores_diff * (scores_diff > 0)
total_scores_diff = (T.sum(scores_diff) - scores_diff[Y])/(scores.shape[0]-1)
"""
return total_scores_diff
def rae_cost(ids,X,states,rep_inputs,rep_states):
# Actual input - reconstructed input error
#input_rec_cost = T.mean(T.sum((X[1:]-rep_inputs)**2,axis=1))
input_rec_cost = -T.mean(T.log(rep_inputs[T.arange(rep_inputs.shape[0]),ids[1:]]))
# Actual prev state - reconstructed prev state error
state_rec_cost = (
# All states except last, all rec states except first
T.sum((states[:-1] - rep_states[1:])**2) +\
# First state (first input) and first rec state
T.sum((X[0] - rep_states[0])**2)
)/states.shape[0]
return input_rec_cost + state_rec_cost
def create_model(ids,Y,vocab2id,size):
word_vector_size = size
rae_state_size = size
predictive_hidden_size = size * 2
V,V_b = create_vocab_vectors(vocab2id,word_vector_size)
X = V[ids]
# RAE parameters
W_input = U.create_shared(U.initial_weights(word_vector_size,rae_state_size))
b_input = U.create_shared(U.initial_weights(rae_state_size))
W_state_p = U.create_shared(U.initial_weights(rae_state_size,rae_state_size))
b_state_p = U.create_shared(U.initial_weights(rae_state_size))
b_state = U.create_shared(U.initial_weights(rae_state_size))
W_input_hidden = U.create_shared(U.initial_weights(word_vector_size,predictive_hidden_size))
W_state_p_hidden = U.create_shared(U.initial_weights(rae_state_size,predictive_hidden_size))
W_full_context_hidden = U.create_shared(U.initial_weights(rae_state_size,predictive_hidden_size))
b_hidden = U.create_shared(U.initial_weights(predictive_hidden_size))
W_output = U.create_shared(U.initial_weights(predictive_hidden_size))
states,rep_inputs,rep_states,hidden_partials = recurrent_combine(
X,
V,V_b,
W_input,b_input,
W_state_p,b_state_p,b_state,
W_input_hidden,W_state_p_hidden,
)
context = states[-1]
hidden = T.dot(context,W_full_context_hidden) + hidden_partials + b_hidden
# hidden = T.tanh(hidden)
hidden = hidden * (hidden > 0)
scores = T.dot(hidden,W_output)
parameters = [
V,
V_b,
W_input,
b_input,
W_state_p,
b_state_p,
b_state,
W_input_hidden,
W_state_p_hidden,
W_full_context_hidden,
b_hidden,
W_output
]
cost = rae_cost(ids,X,states,rep_inputs,rep_states) + missing_cost(scores,Y) + 1e-5*sum(T.sum(w**2) for w in parameters)
return scores, cost, parameters
def training_model(vocab2id,size):
ids = T.ivector('ids')
Y = T.iscalar('Y')
scores, cost, parameters = create_model(ids,Y,vocab2id,size)
gradients = T.grad(cost,wrt=parameters)
print "Computed gradients"
train = theano.function(
inputs = [ids,Y],
updates = updates.adadelta(parameters,gradients,0.95,1e-6),
outputs = cost
)
test = theano.function(
inputs = [ids],
outputs = T.argmax(scores)
)
return test,train, parameters
|
[
"shawn@wtf.sg"
] |
shawn@wtf.sg
|
d9b69a220873901f4475849a5acbf53bdab5a693
|
21b201ebf2ffbbc19fa8d74e5657e12ef597b02d
|
/research/neural_programmer/data_utils.py
|
d5bae2d30db51a295f9719d42498a4e5bfc775fa
|
[] |
no_license
|
alhsnouf/model
|
fa619691ad9d0afc7ad849a9471e6bb0643a8d47
|
5fe429b115634e642a7469b3f1d4bc0c5cf98782
|
refs/heads/master
| 2021-04-12T11:16:02.150045
| 2018-03-27T15:19:18
| 2018-03-27T15:19:18
| 126,702,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a53d41aacbe166afd6c9e4a1822c313dd0e1cef93707ab41e75a8afd1ffeb53b
size 27733
|
[
"alhanouf987@hotmail.com"
] |
alhanouf987@hotmail.com
|
b400fba31f2fdf357fddd49b9f1a2872913b8b9d
|
d4cdc6c9e2580b2011d63f6d62f70ab9e13cd317
|
/sld-api-backend/api_v1/endpoints/auth.py
|
5c25ca1f22d744b558c204861daeb55ec28aeaec
|
[
"MIT"
] |
permissive
|
timezombi/Stack-Lifecycle-Deployment
|
75cc92bc0267953039f0d66c7c219a8d444817c8
|
d84241099fb44762476b4201a2fc195e76975e26
|
refs/heads/master
| 2023-07-13T11:11:35.001371
| 2021-08-20T13:35:14
| 2021-08-20T13:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from typing import Any
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends
from fastapi.security import OAuth2PasswordRequestForm
from schemas import schemas
from schemas.schemas import Token
from security import deps
from security.tokens import validate_user
router = APIRouter()
@router.post("/access-token", response_model=Token)
def login_access_token(
user: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(deps.get_db)) -> Any:
"""
OAuth2 compatible token login, get an access token for future requests
"""
return validate_user(db, user.username, user.password)
@router.post("/access-token-json", response_model=Token)
def login_access_token_json(
user: schemas.UserAuthenticate,
db: Session = Depends(deps.get_db)) -> dict:
"""
OAuth2 compatible token login, get an access token for future requests
"""
return validate_user(db, user.username, user.password)
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
0a08e11ff1d01c391f047776fe01e6807cafe721
|
be0e978e39dd4ab192590e97b2e907b4072c461f
|
/conf.py
|
4ea22b874cef6819e261d30a7a5e5c553f8d29bc
|
[] |
no_license
|
kbarbary/dessn-analysis
|
825007f1bcddf2e7fb92a99ee280cc644fd1ab70
|
2090b417b757447a28b766a6a5a38a7e7ea68c8e
|
refs/heads/master
| 2016-09-05T13:16:07.219179
| 2014-03-04T23:56:41
| 2014-03-04T23:56:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
import os
pikdir = 'pik'
plotdir = 'plots'
# Make directories
for d in [pikdir, plotdir]:
if not os.path.exists(d): os.mkdir(d)
|
[
"kylebarbary@gmail.com"
] |
kylebarbary@gmail.com
|
7dd3b559cde230f1cd49d4201ecfa533315f92fe
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/application/evolution/do.py
|
403717b769ec36230b647c36b6fe96b538c75f9a
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.app.cabaret.views.application.evolution.base import EvolutionHandler
from platinumegg.app.cabaret.models.Player import PlayerGold, PlayerDeck
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.util.cabareterror import CabaretError
from platinumegg.app.cabaret.util.db_util import ModelRequestMgr
from platinumegg.app.cabaret.util import db_util
import settings_sub
import urllib
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.models.Card import Card
class Handler(EvolutionHandler):
"""進化合成実行.
"""
@classmethod
def getViewerPlayerClassList(cls):
return [PlayerGold, PlayerDeck]
def procBench(self):
v_player = self.getViewerPlayer()
uid = v_player.id
self.__baseid = Card.makeID(uid, 11)
self.__materialid = Card.makeID(uid, 12)
def process(self):
args = self.getUrlArgs('/evolutiondo/')
try:
if settings_sub.IS_BENCH:
requestkey = OSAUtil.makeSessionID()
else:
self.__baseid = int(args.get(0))
self.__materialid = self.getMaterialId()
requestkey = urllib.unquote(args.get(1))
except:
raise CabaretError(u'引数が想定外です', CabaretError.Code.ILLEGAL_ARGS)
v_player = self.getViewerPlayer()
try:
model_mgr = db_util.run_in_transaction(Handler.tr_write, v_player, self.__baseid, self.__materialid, requestkey)
model_mgr.write_end()
except CabaretError,e:
if e.code == CabaretError.Code.ALREADY_RECEIVED:
pass
else:
if settings_sub.IS_LOCAL:
raise CabaretError(u'合成できませんでした.%s' % CabaretError.getCodeString(e.code))
url = UrlMaker.evolution()
self.appRedirect(self.makeAppLinkUrlRedirect(url))
return
url = UrlMaker.evolutionanim()
if settings_sub.IS_BENCH:
self.response.set_status(200)
self.response.send()
else:
self.appRedirect(self.makeAppLinkUrlRedirect(url))
@staticmethod
def tr_write(v_player, basecardid, materialcardid, key):
"""書き込み.
"""
model_mgr = ModelRequestMgr()
BackendApi.tr_evolution_do(model_mgr, v_player, basecardid, materialcardid, key)
model_mgr.write_all()
return model_mgr
def main(request):
return Handler.run(request)
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
f0a2557bcbcb8ad398c5927172e5d6cba1dc2da0
|
da199a7ff8bcc7a37efe2ac9036b785bf45c71c0
|
/service_mds/lun_inactive.py
|
0114f0829c74333e4b42bcfcf171b7d1b4f7836d
|
[] |
no_license
|
saxisuer/smartmgr-v2
|
f8ed495ce7ce940477f27c12980bfd159bc159c3
|
6e3895062d37b6815a0d6de031652048b8f22ad3
|
refs/heads/master
| 2021-01-15T21:24:56.622142
| 2017-07-24T14:35:17
| 2017-07-24T14:35:17
| 99,865,861
| 0
| 2
| null | 2017-08-10T01:03:19
| 2017-08-10T01:03:19
| null |
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from pdsframe import *
from service_mds import g
from service_mds import common
import message.pds_pb2 as msg_pds
import message.mds_pb2 as msg_mds
class LunInactiveMachine(BaseMachine):
__metaclass__ = MataMachine
MID = msg_mds.LUN_INACTIVE_REQUEST
def INIT(self, request):
self.response = MakeResponse(msg_mds.LUN_INACTIVE_RESPONSE, request)
self.request = request
self.request_body = request.body.Extensions[msg_mds.lun_inactive_request]
if g.is_ready == False:
self.response.rc.retcode = msg_mds.RC_MDS_SERVICE_IS_NOT_READY
self.response.rc.message = "MDS service is not ready"
self.SendResponse(self.response)
return MS_FINISH
items = self.request_body.lun_name.split("_")
if len(items) != 2 or items[0] != g.node_info.node_name:
self.response.rc.retcode = msg_mds.RC_MDS_LUN_NOT_EXIST
self.response.rc.message = "Lun '%s' is not exist" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
lun_name = items[1]
lun_info = common.GetLunInfoByName(lun_name)
if lun_info == None:
self.response.rc.retcode = msg_mds.RC_MDS_LUN_NOT_EXIST
self.response.rc.message = "Lun %s not exist" % (self.request_body.lun_name)
self.SendResponse(self.response)
return MS_FINISH
if lun_info.asm_status == "INACTIVE":
self.response.rc.retcode = msg_mds.RC_MDS_LUN_INACTIVE_NOT_ALLOWED
self.response.rc.message = "Lun %s already inactive state" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
if lun_info.asm_status != "ACTIVE":
self.response.rc.retcode = msg_mds.RC_MDS_LUN_INACTIVE_NOT_ALLOWED
self.response.rc.message = "Lun %s not active state, please active first!" % self.request_body.lun_name
self.SendResponse(self.response)
return MS_FINISH
self.database_node_list = [node_info for node_info in g.nsnode_list.nsnode_infos if node_info.sys_mode != "storage"]
self.mds_database_request = MakeRequest(msg_mds.ASMDISK_OFFLINE_REQUEST)
asmdisk_info = common.GetASMDiskInfoByLunName(self.request_body.lun_name)
self.mds_database_request.body.Extensions[msg_mds.asmdisk_offline_request].asmdisk_name = asmdisk_info.asmdisk_name
# 先向第一个计算节点发送请求
self.request_num = 1
return self.send_asm_request()
def send_asm_request(self):
node_info = self.database_node_list[self.request_num-1]
self.SendRequest(node_info.listen_ip, node_info.listen_port, self.mds_database_request, self.Entry_LunInactive)
return MS_CONTINUE
def Entry_LunInactive(self, response):
if response.rc.retcode != msg_pds.RC_SUCCESS:
# 向另外的计算节点发送请求,全部失败才返回
if self.request_num < len(self.database_node_list):
self.request_num += 1
return self.send_asm_request()
else:
self.response.rc.CopyFrom(response.rc)
self.SendResponse(self.response)
return MS_FINISH
self.response.rc.retcode = msg_pds.RC_SUCCESS
self.SendResponse(self.response)
return MS_FINISH
|
[
"wuweisunshine@163.com"
] |
wuweisunshine@163.com
|
8ffd0e1b3034be62335188db3ccdd16b0c58540c
|
38ac429d63369922e12e19cdda042b08b8123027
|
/swagger_client/models/json_sort_field_find_attribute_types_request.py
|
d563a4b2286f13ff3b7dce9832b2c61c8a9565ad
|
[] |
no_license
|
aviv-julienjehannet/collibra_apiclient
|
0dfebe5df2eb929645b87eba42fab4c06ff0a6be
|
10a89e7acaf56ab8c7417698cd12616107706b6b
|
refs/heads/master
| 2021-09-12T16:52:19.803624
| 2018-04-19T01:35:20
| 2018-04-19T01:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
# coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JsonSortFieldFindAttributeTypesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
NAME = "NAME"
KIND = "KIND"
STATISTICS_ENABLED = "STATISTICS_ENABLED"
IS_INTEGER = "IS_INTEGER"
ALLOWED_VALUES = "ALLOWED_VALUES"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""JsonSortFieldFindAttributeTypesRequest - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JsonSortFieldFindAttributeTypesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"busworld08@gmail.com"
] |
busworld08@gmail.com
|
27307bfe340d36b6bb9ee49fcbd2dc75bc39a97f
|
bd9d75816e6bb174c2b9e443492096339e3f90e3
|
/sympy/mpmath/tests/test_rootfinding.py
|
f6221ada8ef6e46460c9edab405b2b624bc71af7
|
[
"BSD-3-Clause"
] |
permissive
|
Rezaian-ma/sympy
|
ae800f0f1420f2cdbef1e4535e44f5cd47c9d8b0
|
7d8d096215c8f65ba1d4a9c09af78ec0c3844518
|
refs/heads/master
| 2021-12-03T01:17:38.048732
| 2010-02-14T05:53:55
| 2010-02-14T05:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
from sympy.mpmath import *
from sympy.mpmath.optimization import *
def test_findroot():
# old tests, assuming secant
mp.dps = 15
assert findroot(lambda x: 4*x-3, mpf(5)).ae(0.75)
assert findroot(sin, mpf(3)).ae(pi)
assert findroot(sin, (mpf(3), mpf(3.14))).ae(pi)
assert findroot(lambda x: x*x+1, mpc(2+2j)).ae(1j)
# test all solvers with 1 starting point
f = lambda x: cos(x)
for solver in [Newton, Secant, MNewton, Muller, ANewton]:
x = findroot(f, 2., solver=solver)
assert abs(f(x)) < eps
# test all solvers with interval of 2 points
for solver in [Secant, Muller, Bisection, Illinois, Pegasus, Anderson,
Ridder]:
x = findroot(f, (1., 2.), solver=solver)
assert abs(f(x)) < eps
# test types
f = lambda x: (x - 2)**2
assert isinstance(findroot(f, 1, force_type=mpf, tol=1e-10), mpf)
assert isinstance(findroot(f, 1., force_type=None, tol=1e-10), float)
assert isinstance(findroot(f, 1, force_type=complex, tol=1e-10), complex)
def test_mnewton():
f = lambda x: polyval([1,3,3,1],x)
x = findroot(f, -0.9, solver='mnewton')
assert abs(f(x)) < eps
def test_anewton():
f = lambda x: (x - 2)**100
x = findroot(f, 1., solver=ANewton)
assert abs(f(x)) < eps
def test_muller():
f = lambda x: (2 + x)**3 + 2
x = findroot(f, 1., solver=Muller)
assert abs(f(x)) < eps
def test_multiplicity():
for i in xrange(1, 5):
assert multiplicity(lambda x: (x - 1)**i, 1) == i
assert multiplicity(lambda x: x**2, 1) == 0
def test_multidimensional():
def f(*x):
return [3*x[0]**2-2*x[1]**2-1, x[0]**2-2*x[0]+x[1]**2+2*x[1]-8]
assert mnorm(jacobian(f, (1,-2)) - matrix([[6,8],[0,-2]]),1) < 1.e-7
for x, error in MDNewton(f, (1,-2), verbose=0,
norm=lambda x: norm(x, inf)):
pass
assert norm(f(*x), 2) < 1e-14
# The Chinese mathematician Zhu Shijie was the very first to solve this
# nonlinear system 700 years ago
f1 = lambda x, y: -x + 2*y
f2 = lambda x, y: (x**2 + x*(y**2 - 2) - 4*y) / (x + 4)
f3 = lambda x, y: sqrt(x**2 + y**2)
def f(x, y):
f1x = f1(x, y)
return (f2(x, y) - f1x, f3(x, y) - f1x)
x = findroot(f, (10, 10))
assert [int(round(i)) for i in x] == [3, 4]
def test_trivial():
assert findroot(lambda x: 0, 1) == 1
assert findroot(lambda x: x, 0) == 0
#assert findroot(lambda x, y: x + y, (1, -1)) == (1, -1)
|
[
"ondrej@certik.cz"
] |
ondrej@certik.cz
|
c306066a382072689bc5aec0380668d5f0faeed0
|
3b802edba5b97a4e97290be657395cd7635f5d35
|
/neoman/worker.py
|
8b960c99356d6fd8f2bc7ce851f4f9e769264c14
|
[
"BSD-2-Clause"
] |
permissive
|
moreati/yubikey-neo-manager
|
a7678fafbf8f88b29482caa843092f7598b6725c
|
b0fa3cdf5331bf1504e2744790caddff52b551f6
|
refs/heads/master
| 2021-01-19T06:55:58.099823
| 2015-06-04T11:30:53
| 2015-06-04T11:30:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,363
|
py
|
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from PySide import QtGui, QtCore, QtNetwork
from functools import partial
from neoman import messages as m
class _Event(QtCore.QEvent):
EVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, callback):
super(_Event, self).__init__(_Event.EVENT_TYPE)
self.callback = callback
class QtWorker(QtCore.QObject):
_work_signal = QtCore.Signal(tuple)
_work_done = QtCore.Signal(object)
_work_done_0 = QtCore.Signal()
def __init__(self, window):
super(QtWorker, self).__init__()
self.window = window
self.busy = QtGui.QProgressDialog('', None, 0, 0, window)
self.busy.setWindowTitle(m.wait)
self.busy.setWindowModality(QtCore.Qt.WindowModal)
self.busy.setMinimumDuration(0)
self.busy.setWindowFlags(self.busy.windowFlags()
^ QtCore.Qt.WindowContextHelpButtonHint)
self.busy.setAutoClose(True)
self.work_thread = QtCore.QThread()
self.moveToThread(self.work_thread)
self.work_thread.start()
self._work_signal.connect(self.work)
self._work_done_0.connect(self.busy.reset)
self._manager = QtNetwork.QNetworkAccessManager()
self._manager.finished.connect(self._work_done_0)
self._manager.finished.connect(self._dl_done)
def post(self, title, fn, callback=None):
self.busy.setLabelText(title)
self.busy.show()
self.post_bg(fn, callback)
def post_bg(self, fn, callback=None):
self._work_signal.emit((fn, callback))
def download(self, url, callback=None):
self.busy.setLabelText(m.downloading_file)
self.busy.show()
self.download_bg(url, callback)
def download_bg(self, url, callback=None):
url = QtCore.QUrl(url)
request = QtNetwork.QNetworkRequest(url)
response = self._manager.get(request)
self._dl = (request, response, callback)
def _dl_error(self):
(req, resp, callback) = self._dl
del self._dl
if callback:
event = _Event(partial(callback, resp.error()))
QtGui.QApplication.postEvent(self.window, event)
def _dl_done(self):
(req, resp, callback) = self._dl
del self._dl
if callback:
result = resp.error()
if result is QtNetwork.QNetworkReply.NoError:
result = resp.readAll()
resp.close()
event = _Event(partial(callback, result))
QtGui.QApplication.postEvent(self.window, event)
@QtCore.Slot(tuple)
def work(self, job):
QtCore.QThread.msleep(10) # Needed to yield
(fn, callback) = job
try:
result = fn()
except Exception as e:
result = e
if callback:
event = _Event(partial(callback, result))
QtGui.QApplication.postEvent(self.window, event)
self._work_done_0.emit()
Worker = QtWorker
|
[
"dain@yubico.com"
] |
dain@yubico.com
|
771df2d2ac822d8885a18f74c6dc9f8bae1bf489
|
ee721fac058d6c0472be24f95e3cc8df37f4198d
|
/Stack/reverse.py
|
4028f27a5effe0e1e23e87a5d35cf04f3f7f0712
|
[] |
no_license
|
Horlawhumy-dev/Python_DataStructures
|
51af03dcbed86a51009c13657b17584f09d0a40d
|
c5aad1fe6c6566414c76711a0871abf9529fe04f
|
refs/heads/master
| 2023-06-04T09:32:34.776313
| 2021-07-02T21:43:09
| 2021-07-02T21:43:09
| 377,631,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
# this program reverses what string inputs given by user
class Stack():
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
if not self.is_empty():
return self.items[-1]
return 'Stack is empty!'
def get_items(self):
return self.items
# input from user
word = input('Enter your word: ')
# Initializing stack object
stack = Stack()
# function reversing a given word
def reverse_str(word):
for i in range(len(word)):
stack.push(word[i])
rev = " "
arr = stack.get_items()
while len(arr) > 0:
rev += arr.pop()
return rev
print(reverse_str(word))
|
[
"harof.dev@gmail.com"
] |
harof.dev@gmail.com
|
ed3c30613036feb38c28bf2cee2a563c2faa8cc0
|
8f26514c451e2398d5e3688c184ea74d1dad21b2
|
/month_01/day_05/exercise_02.py
|
8d6e663126992c62ca4ef5f413334a35d88be2ec
|
[] |
no_license
|
CircularWorld/Python_exercise
|
25e7aebe45b4d2ee4e3e3afded082c56483117de
|
96d4d9c5c626f418803f44584c5350b7ce514368
|
refs/heads/master
| 2022-11-21T07:29:39.054971
| 2020-07-20T10:12:24
| 2020-07-20T10:12:24
| 281,081,559
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
'''
字符串: content = "我是京师监狱狱长金海。"
打印第一个字符、打印最后一个字符、打印中间字符
打印字前三个符、打印后三个字符
命题:金海在字符串content中
命题:京师监狱不在字符串content中
通过切片打印“京师监狱狱长”
通过切片打印“长狱狱监师京”
通过切片打印“我师狱海”
倒序打印字符
'''
content = "我是京师监狱狱长金海。"
# 打印第一个字符、打印最后一个字符、打印中间字符
# 打印字前三个符、打印后三个字符
print(content[0],content[-1],content[len(content)//2])
print(content[:3],content[-3:])
# 命题:金海在字符串content中
# 命题:京师监狱不在字符串content中
print('金海'in content)
print('京师监狱' not in content)
# 通过切片打印“京师监狱狱长”
# 通过切片打印“长狱狱监师京”
# 通过切片打印“我师狱海”
# 字符串: content = "我是京师监狱狱长金海。"
print(content[2:-3])
print(content[-4:2:-1])
print(content[::3])
print(content[-1::-1])
|
[
"jiayuhaowork@163.com"
] |
jiayuhaowork@163.com
|
cc34be56b4526ad16bfdbf503a373b9a3f5a56a3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03637/s505835119.py
|
2e2c10f3045b463cc3d59b8aad36d02afaeae057
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
N=int(input())
a=list(map(int,input().split()))
n4,n2=0,0
for i in range(N):
if a[i] % 4 == 0:n4 += 1
elif a[i] % 2 == 0:n2 += 1
if n4 >= N//2:print('Yes')
elif n4*2 + n2 >= N:print('Yes')
else:print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
191126e94cbf396eb4cb7f58ebd051eaa21c55b3
|
082782cfbd0d8ac77c0ec3901a9de1c1e748405a
|
/sutorbank/settings.py
|
7813c4759f3668de0041e98d18074ace7b3d9d84
|
[] |
no_license
|
daniel-kanchev/sutorbank
|
1c1eb020f86ff58f5a3edc2d1c6971e8d66a390d
|
0738ec698f5711a9ceeb59e0a683a853a3bf8979
|
refs/heads/main
| 2023-03-17T17:55:50.537998
| 2021-03-17T14:47:37
| 2021-03-17T14:47:37
| 348,746,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
BOT_NAME = 'sutorbank'
SPIDER_MODULES = ['sutorbank.spiders']
NEWSPIDER_MODULE = 'sutorbank.spiders'
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0',
ITEM_PIPELINES = {
'sutorbank.pipelines.DatabasePipeline': 300,
}
FEED_EXPORT_ENCODING = 'utf-8'
ROBOTSTXT_OBEY = True
LOG_LEVEL = 'WARNING'
# LOG_LEVEL = 'DEBUG'
|
[
"daniel.kanchev@adata.pro"
] |
daniel.kanchev@adata.pro
|
b734da395f91fb51745ae74515623e919ce896ee
|
2f2667682bb78578445b9e3aac7cc62cfba83d5a
|
/googlenet/SavedModel_to_trt.py
|
41b4f1216a144aa9b441b032c1fc82fe4ca0799b
|
[] |
no_license
|
Yorwxue/trt_experence
|
9c770c2a1cb7c48c9d7f21c46be0107de91f1c41
|
778a6cef019dd8afdae6b608b3cbacb56480c7b1
|
refs/heads/master
| 2022-12-21T12:38:13.108402
| 2019-08-01T08:11:10
| 2019-08-01T08:11:10
| 195,760,238
| 0
| 0
| null | 2022-12-08T05:57:26
| 2019-07-08T07:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
# from SavedModel to trt graph
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
from tensorflow.examples.tutorials.mnist import input_data
from googlenet.checkpoint_to_SavedModel import image_web_saved_encode
def directory_create(directory):
if not os.path.exists(directory):
os.makedirs(directory)
SavedModel_dir = "./SavedModel/cnn_model/"
SavedModel_path = os.path.join(SavedModel_dir, str(len(os.listdir(SavedModel_dir))-2))
model_tag = "serve" # can be queried by saved_model_cli
summaries_dir = "./trt_model/cnn_model/tensorboard/"
directory_create(summaries_dir)
trt_export_model_dir = "./trt_model/cnn_model/"
trt_export_model_dir = os.path.join(trt_export_model_dir, str(len(os.listdir(trt_export_model_dir))-1))
batch_size = 1
max_GPU_mem_size_for_TRT = 2 << 20
# preparing dataset
# """
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
# reshape from 784 to 28*28
# x_train = np.reshape(x_train, [x_train.shape[0], 28, 28, 1])
x_test = np.reshape(x_test, [x_test.shape[0], 28, 28, 1])
# base64 encode
# x_train = [image_web_saved_encode(np.concatenate([image, image, image], axis=2)*255) for image in list(x_train)]
x_test = [image_web_saved_encode(np.concatenate([image, image, image], axis=2) * 255) for image in list(x_test)]
# """
# Inference with TF-TRT `SavedModel` workflow:
# """
graph = tf.Graph()
with graph.as_default():
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True # maybe necessary
tfconfig.allow_soft_placement = True # maybe necessary
with tf.Session(config=tfconfig) as sess:
# Create a TensorRT inference graph from a SavedModel:
trt_graph = trt.create_inference_graph(
input_graph_def=None,
outputs=None,
# is_dynamic_op=True,
input_saved_model_dir=SavedModel_path,
input_saved_model_tags=[model_tag],
max_batch_size=batch_size,
max_workspace_size_bytes=max_GPU_mem_size_for_TRT,
precision_mode="FP32",
# use_calibration=False, # set False when using INT8
# The following command will create a directory automatically,
# and you must notice that "output_saved_model_dir" need to specific a path without point to any directory
output_saved_model_dir=trt_export_model_dir
)
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=["logits:0"]
)
trt_engine_ops = [n.name for n in trt_graph.node if str(n.op) == 'TRTEngineOp']
print("Number of trt op: %d" % len(trt_engine_ops))
print(trt_engine_ops)
# warm up
print("warm up")
for i in range(5):
prob = sess.run(output_node, {
"import/image_strings:0": [x_test[0]] * batch_size,
"import/image_shapes:0": [(28, 28, 3)] * batch_size
})
print("counter start")
START_TIME = time.time()
prob = sess.run(output_node, feed_dict={
"import/image_strings:0": [x_test[0]] * batch_size,
"import/image_shapes:0": [(28, 28, 3)] * batch_size
})
print("spent %f seconds" % (time.time() - START_TIME))
test_idx = 0
print("label: %d, prediction: %d" % (np.argmax(y_test[test_idx]), np.argmax(prob[0])))
# write graph
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_dir, trt_graph)
# """
# Inference with TF-TRT frozen graph workflow:
"""
graph = tf.Graph()
with graph.as_default():
with tf.Session() as sess:
# First deserialize your frozen graph:
frozen_model_path = os.path.join(frozen_model_dir, 'frozen_model.pb')
with tf.gfile.GFile(frozen_model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Now you can create a TensorRT inference graph from your
# frozen graph:
trt_graph = trt.create_inference_graph(
input_graph_def=graph_def,
outputs=["probs:0"],
max_batch_size=batch_size,
max_workspace_size_bytes=max_GPU_mem_size_for_TRT,
precision_mode="FP32")
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=["probs:0"])
sess.run(output_node, feed_dict={
"image_batch:0": img1
})
# """
|
[
"yorwxue@gmail.com"
] |
yorwxue@gmail.com
|
05d5bbe7b2195d31cb3a4e49a9314e81afe7450c
|
f8d9f893a7afa667a9b615742019cd5c52ee2c59
|
/core/platform/taskqueue/dev_mode_taskqueue_services_test.py
|
77e45c75f94fed4aa4120ec85940c3c7e56c064a
|
[
"Apache-2.0"
] |
permissive
|
FareesHussain/oppia
|
2ac6c48aaea6a70452b79d665995f6ba6560f70d
|
2862b7da750ce332c975b64237791f96189d7aa8
|
refs/heads/develop
| 2023-08-17T19:25:05.551048
| 2021-10-01T10:36:36
| 2021-10-01T10:36:36
| 323,160,532
| 2
| 0
|
Apache-2.0
| 2020-12-20T20:38:45
| 2020-12-20T20:38:44
| null |
UTF-8
|
Python
| false
| false
| 4,935
|
py
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the dev_mode_taskqueue_services."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from core import feconf
from core.domain import taskqueue_services
from core.platform.taskqueue import dev_mode_taskqueue_services
from core.tests import test_utils
import requests
from typing import Any, Dict, Optional
class DevModeTaskqueueServicesUnitTests(test_utils.TestBase):
"""Tests for dev_mode_taskqueue_services."""
def test_creating_dev_mode_task_will_create_the_correct_post_request(
self
) -> None:
correct_queue_name = 'dummy_queue'
dummy_url = '/dummy_handler'
correct_payload = {
'fn_identifier': (
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS),
'args': [['1', '2', '3']],
'kwargs': {}
}
correct_task_name = 'task1'
# In the type annotation below, payload is of type Dict[str, Any]
# because it mocks the behaviour of
# dev_mode_taskqueue_services.CLIENT.create_task.
def mock_create_task(
queue_name: str,
url: str,
payload: Dict[str, Any],
scheduled_for: Optional[datetime.datetime] = None, # pylint: disable=unused-argument
task_name: Optional[str] = None,
) -> None:
self.assertEqual(queue_name, correct_queue_name)
self.assertEqual(url, dummy_url)
self.assertEqual(payload, correct_payload)
self.assertEqual(task_name, correct_task_name)
swap_create_task = self.swap(
dev_mode_taskqueue_services.CLIENT, 'create_task', mock_create_task)
with swap_create_task:
dev_mode_taskqueue_services.create_http_task(
correct_queue_name, dummy_url, correct_payload,
task_name=correct_task_name)
def test_task_handler_will_create_the_correct_post_request(self) -> None:
queue_name = 'dummy_queue'
dummy_url = '/dummy_handler'
correct_port = dev_mode_taskqueue_services.GOOGLE_APP_ENGINE_PORT
correct_payload = {
'fn_identifier': (
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS),
'args': [['1', '2', '3']],
'kwargs': {}
}
task_name = 'task1'
correct_headers = {
'X-Appengine-QueueName': queue_name,
'X-Appengine-TaskName': task_name,
'X-Appengine-TaskRetryCount': '0',
'X-Appengine-TaskExecutionCount': '0',
'X-Appengine-TaskETA': '0',
'X-AppEngine-Fake-Is-Admin': '1',
'method': 'POST'
}
# In the type annotation below, we have used Dict[str, Any] for JSON.
# This is because this function mocks requests.post function where the
# type of JSON has been defined Any, hence using Dict[str, Any] here.
# https://github.com/python/typeshed/blob/5e0fc4607323a4657b587bf70e3c26becf1c88d0/stubs/requests/requests/api.pyi#L78
def mock_post(
url: str,
json: Dict[str, Any],
headers: Dict[str, str],
timeout: int
) -> None:
self.assertEqual(
url, 'http://localhost:%s%s' % (
correct_port, dummy_url))
self.assertEqual(json, correct_payload)
self.assertEqual(headers, correct_headers)
self.assertEqual(timeout, feconf.DEFAULT_TASKQUEUE_TIMEOUT_SECONDS)
swap_post = self.swap(requests, 'post', mock_post)
with swap_post:
# I have to test _task_handler by calling it because I cannot
# surround this task handler in a context manager reliably. The
# task_handler is called by a queue thread that is instantiated by
# the Cloud Tasks Emulator which has a non-determistic execution
# time. Creating a task will execute correctly but the program will
# exit the context before actually calling _task_handler().
dev_mode_taskqueue_services._task_handler( # pylint: disable=protected-access
dummy_url, correct_payload, queue_name, task_name=task_name)
|
[
"noreply@github.com"
] |
FareesHussain.noreply@github.com
|
d851c1b76ebb72393f7423de98c40690a78c7c5b
|
e1e3ee617a50c44c7027ebabc3c918797f8daef8
|
/sorter.py
|
9566fdab8378d1cc2d5fe387c44cc5a9bdb5fec2
|
[] |
no_license
|
Kain-Huang/pithy
|
179490f6af0d1a77dde015c5570d9d8f75bd3e41
|
6ed323782cad80954f9ab4a6d81726370d7ff53c
|
refs/heads/master
| 2022-01-05T12:12:27.395657
| 2019-05-19T18:25:08
| 2019-05-19T18:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
import sys
arg = sys.argv[1]
print arg
from commands import getoutput as go
a = arg.split("BREAKKKKK")
dird = ""
fil = a[-1]+".py"
for i in a[:-1]: dird+=i+"/"
go("mkdir -p code/"+dird)
print dird+fil
print go("cp code/%s.py code/%s/%s" % (arg,dird,fil))
|
[
"dan.steingart@gmail.com"
] |
dan.steingart@gmail.com
|
c9c89d48c222f86dee205223c3208cf1a0857b72
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=42/params.py
|
2298340fefd6fd7ac91681022ad274284fb678f8
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.512524',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 42,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
6665b847961f9fbe18e23a6309b0424a0ede5776
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/securityinsights/v20190101preview/get_dynamics365_data_connector.py
|
2076d0d3c76949d8502ecd1dc8596fbb938831a6
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 5,317
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDynamics365DataConnectorResult',
'AwaitableGetDynamics365DataConnectorResult',
'get_dynamics365_data_connector',
]
@pulumi.output_type
class GetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, kind=None, name=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.Dynamics365DataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Expected value is 'Dynamics365'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetDynamics365DataConnectorResult(GetDynamics365DataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDynamics365DataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
tenant_id=self.tenant_id,
type=self.type)
def get_dynamics365_data_connector(data_connector_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20190101preview:getDynamics365DataConnector', __args__, opts=opts, typ=GetDynamics365DataConnectorResult).value
return AwaitableGetDynamics365DataConnectorResult(
data_types=__ret__.data_types,
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
a6f320488fbfcac32b54d57d57061287558b662e
|
89e40bf548403e440c230e06fa6301021ec8b0c7
|
/sw_expert_academy/D2/p1946.py
|
12221cf2e3b7cf1e668cdf6f634c66ecca732743
|
[] |
no_license
|
TaeJuneJoung/Algorithm
|
b9cf5724501918c7302099b8194d26bd19512bd0
|
ecc2934a376c91ecec8bfd15af377d8a2973d71d
|
refs/heads/master
| 2020-06-19T13:50:14.720987
| 2019-08-04T14:35:43
| 2019-08-04T14:35:43
| 196,732,653
| 0
| 0
| null | 2019-08-04T14:35:44
| 2019-07-13T14:46:42
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
"""
[1946.간단한 압축 풀기]
10개가 찍히면 다음줄로 내려가는 처리가 중요한 문제
또한, 테스트케이스가 하나 끝나면 한줄을 내려줘야한다
T : 테스트케이스
N : 받는 갯수
sum_num : 10개씩 띄어주기 위해서 사용
value : string타입의 값
num : value가 나오는 횟수
"""
T = int(input())
for t in range(1, T+1):
print("#{}".format(t))
N = int(input())
sum_num = 0
for i in range(N):
value, num = map(str, input().split())
num = int(num)
for j in range(num):
print(value, end="")
sum_num += 1
if sum_num == 10:
sum_num = 0
print()
print()
|
[
"jtj0525@gmail.com"
] |
jtj0525@gmail.com
|
79ea21858064c500d5f2adf83982fe2f10cbeafd
|
04dc3d8883c7b5510610ec3e86e4238606fc1e45
|
/tasks/tasks_fetch_currency_exchange.py
|
5103de89945a2192d07ba82a923e1b3ed841eb2b
|
[
"MIT"
] |
permissive
|
xyla-io/almacen
|
72294c6d7758d39ca12c22af174145d716769b82
|
7b7f235dc7939777f971f1b5eadd5621e980c15e
|
refs/heads/main
| 2022-12-28T22:10:46.905278
| 2020-10-14T19:42:57
| 2020-10-16T19:50:55
| 304,113,749
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
import models
from . import base
from config import CompanyConfiguration
from typing import List, Dict, Optional
from jones import FixerAPI
from datetime import datetime
class FetchBaseCurrencyExchangeReportTask(base.FetchReportTask):
api: Optional[FixerAPI]
min_currency_dates_to_fetch: Dict[str, datetime]
max_currency_dates_to_fetch: Dict[str, datetime]
base_currency: str
def __init__(self, base_currency: str, task_set: CompanyConfiguration.TaskSet, identifier_prefix: str):
super().__init__(task_set=task_set, identifier_prefix=identifier_prefix)
self.base_currency = base_currency
self.min_currency_dates_to_fetch = {}
self.max_currency_dates_to_fetch = {}
@property
def task_type(self) -> models.ReportTaskType:
return models.ReportTaskType.fetch_base_currency_exchage_rates
@property
def debug_description(self) -> str:
return '{}: ({} -> {}) — {}'.format(
self.company_display_name,
self.base_currency,
', '.join(self.currencies),
self.task_type.value
)
@property
def task_identifier_columns(self) -> Dict[str, any]:
return {
'base': self.base_currency,
'target': self.currencies,
}
@property
def currencies(self) -> List[str]:
return self.task_set.config['currency_exchange']['currencies']
@property
def report_table_model(self) -> models.ReportTableModel:
return models.CurrencyExchangeRatesTableModel(schema_name=self.report_table_schema)
@property
def api_credentials_key(self) -> str:
return self.task_set.config['currency_exchange']['credentials_key']
class FetchCurrencyExchangeReportTask(base.CombinedReportTask):
@property
def task_type(self) -> models.ReportTaskType:
return models.ReportTaskType.fetch_currency_exchange_rates
@property
def debug_description(self) -> str:
return '{}: ({}) — {}'.format(
self.company_display_name,
', '.join(self.currencies),
self.task_type.value
)
@property
def currencies(self) -> List[str]:
return self.task_set.config['currency_exchange']['currencies']
def generate_subtasks(self) -> List[base.ReportTask]:
return [
FetchBaseCurrencyExchangeReportTask(
base_currency=c,
task_set=self.task_set,
identifier_prefix='{}.{}-{}'.format(self.identifier, c, '_'.join(self.currencies))
)
for c in self.currencies
]
|
[
"leif@leifmeyer.io"
] |
leif@leifmeyer.io
|
cdad717e47a15a103068cedb950db7175e3f5c00
|
34d88082307281333ef4aeeec012a3ff5f8ec06e
|
/100 python/Q090.py
|
57e5bf3e138599fd5fe71f041643aeb9d105c6eb
|
[] |
no_license
|
JKChang2015/Python
|
a6f8b56fa3f9943682470ae57e5ad3266feb47a7
|
adf3173263418aee5d32f96b9ea3bf416c43cc7b
|
refs/heads/master
| 2022-12-12T12:24:48.682712
| 2021-07-30T22:27:41
| 2021-07-30T22:27:41
| 80,747,432
| 1
| 8
| null | 2022-12-08T04:32:06
| 2017-02-02T17:05:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 364
|
py
|
# Q090
# Created by JKChang
# 09/05/2017, 15:37
# Tag: remove particular element
# Description: By using list comprehension, please write a program to print the list after removing the value 24 in
# [12,24,35,24,88,120,155].
li = [12, 24, 35, 24, 88, 120, 155]
l = [x for x in li if x != 24]
# l = [x for (i, x) in enumerate(li) if x != 24]
print(l)
|
[
"jkchang2015@gmail.com"
] |
jkchang2015@gmail.com
|
f26247827774f537f5498e3343140e8ee540b7e4
|
375e834e7a2ff7b085b88cc162fb8215e14cd132
|
/Python/largest-triangle-area.py
|
69ea932c4fb00f00dab5018641b8bbcd1559e8ed
|
[
"MIT"
] |
permissive
|
tickpeach/LeetCode-Solutions
|
0842086aa1781191fe68639c884986f843194262
|
16c96776781d04672d653cef48f4f7989685cbe9
|
refs/heads/master
| 2020-04-01T02:46:38.356672
| 2018-10-12T18:15:41
| 2018-10-12T18:15:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
# Time: O(n^3)
# Space: O(1)
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def largestTriangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
result = 0
for i in xrange(len(points)-2):
for j in xrange(i+1, len(points)-1):
for k in xrange(j+1, len(points)):
result = max(result,
0.5 * abs(points[i][0] * points[j][1] +
points[j][0] * points[k][1] +
points[k][0] * points[i][1] -
points[j][0] * points[i][1] -
points[k][0] * points[j][1] -
points[i][0] * points[k][1]))
return result
|
[
"kamyu104@gmail.com"
] |
kamyu104@gmail.com
|
2ed90e62775bcf2abcceb6808eb7d46bfad27f24
|
5a9d8c64c6478f3816b63f59f1cdaca73c0848eb
|
/pythonNet/ex07_Thread/array.py
|
fbebd4f0134d54c89387bb558a5577494bae457e
|
[] |
no_license
|
wangredfei/nt_py
|
f68134977e6d1e05cf17cec727644509f084c462
|
fedf03c0d52565f588e9b342d1c51df0b6dc2681
|
refs/heads/master
| 2020-04-08T07:55:08.302589
| 2018-11-23T09:53:48
| 2018-11-23T09:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
from multiprocessing import Process,Array
import time
# # 开辟一个共享内存,存入整数列表
# shm = Array('i', [1,2,3,4,5])
# def fun():
# for s in shm:
# print(s)
# shm[0]=1000
# p = Process(target = fun)
# p.start()
# p.join()
# for i in shm:
# print(i)
shm = Array('c',b'hello')
def fun():
for i in shm:
print(i)
shm[0] = b'H'
p = Process(target = fun)
p.start()
p.join()
for i in shm:
print(i,end="")
print()
print(shm.value)# 打印字符串
|
[
"289498360@qq.com"
] |
289498360@qq.com
|
3e05f88b40601505afeed262deb49042d529da7a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_277/ch118_2020_03_30_20_02_35_602471.py
|
1f099ee2f4af19fc094bf59344f12a9a10646426
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
import math
def reflexao_total_interna(n1,n2,o2):
o1=math.sin(o2*math.pi/180)*n2/n1
a=math.sin(o1)
if a > 1:
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
ab320487cab51af6170a88923ce8087b084a8206
|
9c529778ea60e590e448589e35eb4dae941e832a
|
/evennia-engine/evenv/share/doc/networkx-2.4/examples/drawing/plot_spectral_grid.py
|
3f2bc92202d5f7a92efbf193f918e48aa2443540
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
rajammanabrolu/WorldGeneration
|
3d0976ffba8588fcebda8b8593be694e1bc1501d
|
5e97df013399e1a401d0a7ec184c4b9eb3100edd
|
refs/heads/master
| 2022-11-25T20:10:52.682064
| 2021-09-08T11:50:23
| 2021-09-08T11:50:23
| 235,484,371
| 69
| 5
|
MIT
| 2022-11-22T08:50:22
| 2020-01-22T02:32:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
"""
==================
Spectral Embedding
==================
The spectral layout positions the nodes of the graph based on the
eigenvectors of the graph Laplacian $L = D - A$, where $A$ is the
adjacency matrix and $D$ is the degree matrix of the graph.
By default, the spectral layout will embed the graph in two
dimensions (you can embed your graph in other dimensions using the
``dim`` argument to either :func:`~drawing.nx_pylab.draw_spectral` or
:func:`~drawing.layout.spectral_layout`).
When the edges of the graph represent similarity between the incident
nodes, the spectral embedding will place highly similar nodes closer
to one another than nodes which are less similar.
This is particularly striking when you spectrally embed a grid
graph. In the full grid graph, the nodes in the center of the
graph are pulled apart more than nodes on the periphery.
As you remove internal nodes, this effect increases.
"""
import matplotlib.pyplot as plt
import networkx as nx
options = {
'node_color': 'C0',
'node_size': 100,
}
G = nx.grid_2d_graph(6, 6)
plt.subplot(332)
nx.draw_spectral(G, **options)
G.remove_edge((2, 2), (2, 3))
plt.subplot(334)
nx.draw_spectral(G, **options)
G.remove_edge((3, 2), (3, 3))
plt.subplot(335)
nx.draw_spectral(G, **options)
G.remove_edge((2, 2), (3, 2))
plt.subplot(336)
nx.draw_spectral(G, **options)
G.remove_edge((2, 3), (3, 3))
plt.subplot(337)
nx.draw_spectral(G, **options)
G.remove_edge((1, 2), (1, 3))
plt.subplot(338)
nx.draw_spectral(G, **options)
G.remove_edge((4, 2), (4, 3))
plt.subplot(339)
nx.draw_spectral(G, **options)
plt.show()
|
[
"williambroniec@gmail.com"
] |
williambroniec@gmail.com
|
efbc42ba62610026e9e989063cfe821d499f6971
|
7c17d6047a8a31a54a42dc213a0a3c26ccb320fd
|
/djlistener/djlistener/asgi.py
|
c9bd80b307fc14acff4568b13981b3e2eb69841c
|
[] |
no_license
|
morlandi/sinewave
|
7d8cd55d4b0fb72b30c99144b09ce55da1722c2d
|
39e2fe778ca84d045a877f0ef7938ba7a5ef05ce
|
refs/heads/master
| 2023-04-16T11:29:11.748802
| 2021-06-28T14:46:43
| 2021-06-28T14:46:43
| 152,848,099
| 9
| 5
| null | 2023-03-31T14:55:40
| 2018-10-13T07:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
"""
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
# import os
# import django
# from channels.routing import get_default_application
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djlistener.settings")
# django.setup()
# application = get_default_application()
import os
from channels.routing import ProtocolTypeRouter
from channels.routing import URLRouter
from django.core.asgi import get_asgi_application
from django.urls import path
from . import consumers
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djlistener.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": URLRouter([
path("ws/sinewave/", consumers.SinewaveSyncConsumer.as_asgi()),
]),
})
|
[
"morlandi@brainstorm.it"
] |
morlandi@brainstorm.it
|
8782e35c54ec3809fa7022d9699b4f8f0f1a0bb6
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/42300000.py
|
25e564709eaf15a934a790a92af651aa2170f74d
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/42300000.py generated: Fri, 27 Mar 2015 15:48:05
#
# Event Type: 42300000
#
# ASCII decay Descriptor: pp -> [W+ -> tau+ nu_tau]cc ...
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/Wtaunu.py" )
from Configurables import Generation
Generation().EventType = 42300000
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/W_taunutau.dec"
Generation().Special.CutTool = "PythiaHiggsType"
from Configurables import PythiaHiggsType
Generation().Special.addTool( PythiaHiggsType )
Generation().Special.PythiaHiggsType.NumberOfLepton = 1
from GaudiKernel import SystemOfUnits
Generation().Special.PythiaHiggsType.LeptonPtMin = 4*SystemOfUnits.GeV
Generation().Special.PythiaHiggsType.LeptonIsFromMother = True
Generation().Special.PythiaHiggsType.NumberOfbquarks = -1
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
f445c13cd68294138922d2b7dfada304cc3fb281
|
43a96eafd9108dd48f91d0b7c70cf4cd99e7eae2
|
/audio_zen/utils.py
|
0415dcc5518ed493db8332c1c592d75d1b272e7c
|
[
"MIT"
] |
permissive
|
yaoao2017/FullSubNet
|
ec5096f9ed958aa6aceacb5cefcd96a1c77be1c9
|
213df1b46d5bc3d61d774a75aebae5b731046bd2
|
refs/heads/main
| 2023-08-28T01:22:24.022365
| 2021-11-01T11:43:47
| 2021-11-01T11:43:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,430
|
py
|
import importlib
import os
import time
from copy import deepcopy
from functools import reduce
import torch
def load_checkpoint(checkpoint_path, device):
_, ext = os.path.splitext(os.path.basename(checkpoint_path))
assert ext in (".pth", ".tar"), "Only support ext and tar extensions of l1 checkpoint."
model_checkpoint = torch.load(os.path.abspath(os.path.expanduser(checkpoint_path)), map_location=device)
if ext == ".pth":
print(f"Loading {checkpoint_path}.")
return model_checkpoint
else: # load tar
print(f"Loading {checkpoint_path}, epoch = {model_checkpoint['epoch']}.")
return model_checkpoint["l1"]
def prepare_empty_dir(dirs, resume=False):
"""
if resume the experiment, assert the dirs exist. If not the resume experiment, set up new dirs.
Args:
dirs (list): directors list
resume (bool): whether to resume experiment, default is False
"""
for dir_path in dirs:
if resume:
assert dir_path.exists(), "In resume mode, you must be have an old experiment dir."
else:
dir_path.mkdir(parents=True, exist_ok=True)
def check_nan(tensor, key=""):
if torch.sum(torch.isnan(tensor)) > 0:
print(f"Found NaN in {key}")
class ExecutionTime:
"""
Count execution time.
Examples:
timer = ExecutionTime()
...
print(f"Finished in {timer.duration()} seconds.")
"""
def __init__(self):
self.start_time = time.time()
def duration(self):
return int(time.time() - self.start_time)
def initialize_module(path: str, args: dict = None, initialize: bool = True):
"""
Load module or function dynamically with "args".
Args:
path: module path in this project.
args: parameters that will be passed to the Class or the Function in the module.
initialize: whether to initialize the Class or the Function with args.
Examples:
Config items are as follows:
[model]
path = "model.FullSubNetModel"
[model.args]
n_frames = 32
...
This function will:
1. Load the "model.full_sub_net" module.
2. Call "FullSubNetModel" Class (or Function) in "model.full_sub_net" module.
3. If initialize is True:
instantiate (or call) the Class (or the Function) and pass the parameters (in "[model.args]") to it.
"""
module_path = ".".join(path.split(".")[:-1])
class_or_function_name = path.split(".")[-1]
module = importlib.import_module(module_path)
class_or_function = getattr(module, class_or_function_name)
if initialize:
if args:
return class_or_function(**args)
else:
return class_or_function()
else:
return class_or_function
def print_tensor_info(tensor, flag="Tensor"):
def floor_tensor(float_tensor):
return int(float(float_tensor) * 1000) / 1000
print(
f"{flag}\n"
f"\t"
f"max: {floor_tensor(torch.max(tensor))}, min: {float(torch.min(tensor))}, "
f"mean: {floor_tensor(torch.mean(tensor))}, std: {floor_tensor(torch.std(tensor))}")
def set_requires_grad(nets, requires_grad=False):
"""
Args:
nets: list of networks
requires_grad
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def merge_config(*config_dicts):
"""
Deep merge configuration dicts.
Args:
*config_dicts: any number of configuration dicts.
Notes:
1. The values of item in the later configuration dict(s) will update the ones in the former dict(s).
2. The key in the later dict must be exist in the former dict. It means that the first dict must consists of all keys.
Examples:
a = [
"a": 1,
"b": 2,
"c": {
"d": 1
}
]
b = [
"a": 2,
"b": 2,
"c": {
"e": 1
}
]
c = merge_config(a, b)
c = [
"a": 2,
"b": 2,
"c": {
"d": 1,
"e": 1
}
]
Returns:
New deep-copied configuration dict.
"""
def merge(older_dict, newer_dict):
for new_key in newer_dict:
if new_key not in older_dict:
# Checks items in custom config must be within common config
raise KeyError(f"Key {new_key} is not exist in the common config.")
if isinstance(older_dict[new_key], dict):
older_dict[new_key] = merge(older_dict[new_key], newer_dict[new_key])
else:
older_dict[new_key] = deepcopy(newer_dict[new_key])
return older_dict
return reduce(merge, config_dicts[1:], deepcopy(config_dicts[0]))
def prepare_device(n_gpu: int, keep_reproducibility=False):
"""
Choose to use CPU or GPU depend on the value of "n_gpu".
Args:
n_gpu(int): the number of GPUs used in the experiment. if n_gpu == 0, use CPU; if n_gpu >= 1, use GPU.
keep_reproducibility (bool): if we need to consider the repeatability of experiment, set keep_reproducibility to True.
See Also
Reproducibility: https://pytorch.org/docs/stable/notes/randomness.html
"""
if n_gpu == 0:
print("Using CPU in the experiment.")
device = torch.device("cpu")
else:
# possibly at the cost of reduced performance
if keep_reproducibility:
print("Using CuDNN deterministic mode in the experiment.")
torch.backends.cudnn.benchmark = False # ensures that CUDA selects the same convolution algorithm each time
torch.set_deterministic(True) # configures PyTorch only to use deterministic implementation
else:
# causes cuDNN to benchmark multiple convolution algorithms and select the fastest
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0")
return device
def expand_path(path):
return os.path.abspath(os.path.expanduser(path))
def basename(path):
filename, ext = os.path.splitext(os.path.basename(path))
return filename, ext
|
[
"haoxiangsnr@gmail.com"
] |
haoxiangsnr@gmail.com
|
7de97cc4e386019c8c8287f8821f5d0eba631a12
|
594fd699d9f8070c867b83b11881ca1f624b417b
|
/EstruturaDeDecisao/mais_barato.py
|
e1caf21790f912b0b381f4166b4196d14a2831b6
|
[] |
no_license
|
felipmarqs/exerciciospythonbrasil
|
f140df2c59b933cc0460d5986afc8c6ddd493556
|
6d02e85ae5986d3b20cfd8781174998d871eeb90
|
refs/heads/master
| 2020-04-04T05:25:23.751175
| 2018-12-12T18:44:38
| 2018-12-12T18:44:38
| 155,745,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
#Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.
p1 = float(input("Qual o preço do primeiro produto ? R$"))
p2 = float(input("Qual o preço do segundo produto ? R$"))
p3 = float(input("Qual o preço do terceiro produto ? R$"))
if p1 < p2 and p1 < p3:
print("Compre o primeiro!")
elif p2 < p1 and p2 < p3:
print(("Compre o segundo!"))
elif p3 < p1 and p3 < p2:
print("Compre o terceiro!")
|
[
"noreply@github.com"
] |
felipmarqs.noreply@github.com
|
fa9b010550b5d313d60bfd25b37849dd9fcabfb8
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/ugc/_admin_get_specific_content.py
|
80f1e848449d0efebed0a1dbcc9f1ac5fe2f4371
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Ugc Service (2.11.3)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.ugc import (
admin_get_specific_content as admin_get_specific_content_internal,
)
from accelbyte_py_sdk.api.ugc.models import ModelsContentDownloadResponse
from accelbyte_py_sdk.api.ugc.models import ResponseError
@click.command()
@click.argument("content_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_get_specific_content(
content_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_get_specific_content_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = admin_get_specific_content_internal(
content_id=content_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminGetSpecificContent failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_get_specific_content.operation_id = "AdminGetSpecificContent"
admin_get_specific_content.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
9090cf8f5afc34505f2a27b36145a5667b7bc8c2
|
aff3d82217ca3a43d42c215b7fde022017f3b779
|
/spec/one_image_spec.py
|
218822cbdcf28e9883c86acb41297198d17aea62
|
[] |
no_license
|
AndyDeany/turnbasedgame
|
96784a6f1fcf7c2c82e10012d81b4e0caf807a6b
|
362f973888a549535a854500da443613725ad0f0
|
refs/heads/master
| 2021-01-19T08:48:28.125410
| 2017-09-09T09:56:51
| 2017-09-09T09:56:51
| 76,188,966
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
from spec.spec_helper import *
from lib.one_image import OneImage
with description("OneImage"):
with it("should initialise"):
one_image = OneImage(game, "items/heart")
expect(one_image.image_name).to(equal("items/heart"))
expect(one_image.image).to(be(None))
with it("should load its image"):
one_image = OneImage(game, "items/heart")
one_image.load_image()
expect(one_image.image).to(be_a(game.pygame.Surface))
with it("should unload its image"):
one_image = OneImage(game, "items/heart")
one_image.load_image()
one_image.unload_image()
expect(one_image.image).to(be(None))
|
[
"oneandydean@hotmail.com"
] |
oneandydean@hotmail.com
|
4195bf38a598c838adeceb94937fad2949c57c3a
|
f373eaeba3f42d2e883a0338dbc7bf2eab8cdf88
|
/pycalq/tests/test_pycalq.py
|
2f8a9540b6d69608385cc68cae0e6db8d1a3aaea
|
[
"MIT"
] |
permissive
|
FriedrichK/pyCalq
|
6f41d561f4394c7c4d57df08a715b560e41812c9
|
b20c1c5694d34dbeb7439986189cae3f698bb910
|
refs/heads/master
| 2021-01-23T03:44:24.972747
| 2014-08-20T00:19:51
| 2014-08-20T00:19:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,685
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import unittest
from mock import Mock, patch
from hamcrest import *
from pycalq import CALQ_API_ENDPOINT_TRACKING, CALQ_API_ENDPOINT_PROFILE, CALQ_API_ENDPOINT_TRANSFER
from pycalq.tools import create_timestamp_string
from pycalq.validation import ActionParameterValidator, ParameterValidationException
from pycalq.tracking import track_action, submit_profile, transfer_user
TEST_DATETIME = datetime(2014, 8, 19, 13, 16, 30, 1)
TEST_DATETIME_STRING = '2014-08-19 13:16:30.000001'
TEST_ACTOR_NAME = 'test_actor'
TEST_ACTOR_NAME2 = "test_actor_2"
IP_ADDRESS_MOCK = '127.1.2.7'
TEST_ACTION = 'Does Amazing Thing'
WRITE_KEY_MOCK = 'allworkandnoplaymakesjackadullboy'
TEST_PROPERTY_VALID_GENDER = 'male'
TEST_PROPERTY_INVALID_GENDER = 'I prefer not to say'
TEST_PROPERTY_VALID_CURRENCY = 'usd'
TEST_PROPERTY_INVALID_CURRENCY = 'usdx'
TEST_PROPERTY_VALID_AGE = 29
TEST_PROPERTY_INVALID_AGE = 'twenty-nine'
TEST_PROPERTY_VALID_SALE_VALUE = 1
TEST_PROPERTY_VALID_SALE_CURRENCY = 'eur'
TEST_PROPERTY_VALID_DEVICE_AGENT = 'android'
TEST_PROPERTY_VALID_DEVICE_OS = 'android'
TEST_PROPERTY_VALID_DEVICE_RESOLUTION = '1024x768'
TEST_PROPERTY_VALID_DEVICE_MOBILE = True
TEST_PROPERTY_VALID_COUNTRY = 'DE'
TEST_PROPERTY_VALID_REGION = 'BE'
TEST_PROPERTY_VALID_CITY = 'Berlin'
TEST_PROPERTY_VALID_UTM_CAMPAIGN = 'campaign_name'
TEST_PROPERTY_VALID_UTM_SOURCE = 'utm_source'
TEST_PROPERTY_VALID_UTM_MEDIUM = 'radio'
TEST_PROPERTY_VALID_UTM_SOURCE = 'nytimes'
TEST_PROPERTY_VALID_UTM_CONTENT = 'content'
TEST_PROPERTY_VALID_UTM_TERM = 'some,keywords,convert,well'
class ToolsTestCase(unittest.TestCase):
def test_returns_timestamp_string_in_expected_format(self):
actual = create_timestamp_string(TEST_DATETIME)
self.assertEquals(actual, TEST_DATETIME_STRING)
class TrackingTestCase(unittest.TestCase):
@patch('pycalq.tracking.PoolManager')
def test_sends_tracking_request_as_expected(self, PoolManagerMock):
PoolManagerMock, url_open_mock = self._build_pool_manager_mock(PoolManagerMock)
properties = {'$country': 'NL', 'custom_property': True}
track_action(TEST_ACTOR_NAME, TEST_ACTION, WRITE_KEY_MOCK, properties, IP_ADDRESS_MOCK, TEST_DATETIME)
args, kwargs = url_open_mock.call_args
self.assertEquals(args[0], 'POST')
self.assertEquals(args[1], CALQ_API_ENDPOINT_TRACKING)
self.assertEquals(kwargs['headers'], {'Content-Type': 'application/json'})
expected = {
'timestamp': TEST_DATETIME_STRING,
'actor': TEST_ACTOR_NAME,
'action_name': TEST_ACTION,
'write_key': WRITE_KEY_MOCK,
'ip_address': IP_ADDRESS_MOCK,
'properties': properties
}
self.assertEquals(json.loads(kwargs['body']), expected)
@patch('pycalq.tracking.PoolManager')
def test_logs_that_action_request_properties_are_invalid(self, PoolManagerMock):
logger_mock = Mock()
logger_mock.debug = Mock()
properties = {'$gender': TEST_PROPERTY_INVALID_GENDER}
track_action(TEST_ACTOR_NAME, TEST_ACTION, WRITE_KEY_MOCK, properties, IP_ADDRESS_MOCK, TEST_DATETIME, log=logger_mock)
self.assertTrue(logger_mock.debug.called)
@patch('pycalq.tracking.PoolManager')
def test_sends_profile_request_as_expected(self, PoolManagerMock):
PoolManagerMock, url_open_mock = self._build_pool_manager_mock(PoolManagerMock)
properties = {'$age': TEST_PROPERTY_VALID_AGE, 'custom_property': True}
submit_profile(TEST_ACTOR_NAME, WRITE_KEY_MOCK, properties)
args, kwargs = url_open_mock.call_args
self.assertEquals(args[0], 'POST')
self.assertEquals(args[1], CALQ_API_ENDPOINT_PROFILE)
self.assertEquals(kwargs['headers'], {'Content-Type': 'application/json'})
expected = {
'actor': TEST_ACTOR_NAME,
'write_key': WRITE_KEY_MOCK,
'properties': properties
}
self.assertEquals(json.loads(kwargs['body']), expected)
@patch('pycalq.tracking.PoolManager')
def test_logs_that_profile_request_properties_are_invalid(self, PoolManagerMock):
logger_mock = Mock()
logger_mock.debug = Mock()
properties = {'$age': TEST_PROPERTY_INVALID_AGE}
submit_profile(TEST_ACTOR_NAME, WRITE_KEY_MOCK, properties, log=logger_mock)
self.assertTrue(logger_mock.debug.called)
@patch('pycalq.tracking.PoolManager')
def test_sends_transfer_request_as_expected(self, PoolManagerMock):
PoolManagerMock, url_open_mock = self._build_pool_manager_mock(PoolManagerMock)
transfer_user(TEST_ACTOR_NAME, TEST_ACTOR_NAME2, WRITE_KEY_MOCK)
args, kwargs = url_open_mock.call_args
self.assertEquals(args[0], 'POST')
self.assertEquals(args[1], CALQ_API_ENDPOINT_TRANSFER)
self.assertEquals(kwargs['headers'], {'Content-Type': 'application/json'})
expected = {
'old_actor': TEST_ACTOR_NAME,
'new_actor': TEST_ACTOR_NAME2,
'write_key': WRITE_KEY_MOCK
}
self.assertEquals(json.loads(kwargs['body']), expected)
def _build_pool_manager_mock(self, PoolManagerMock):
pool_manager_mock = Mock()
pool_manager_mock.urlopen = Mock()
PoolManagerMock.return_value = pool_manager_mock
return PoolManagerMock, pool_manager_mock.urlopen
class ValidationTestCase(unittest.TestCase):
def test_recognizes_data_as_valid(self):
data = {
'$sale_value': TEST_PROPERTY_VALID_SALE_VALUE,
'$sale_currency': TEST_PROPERTY_VALID_SALE_CURRENCY,
'$device_agent': TEST_PROPERTY_VALID_DEVICE_AGENT,
'$device_os': TEST_PROPERTY_VALID_DEVICE_OS,
'$device_resolution': TEST_PROPERTY_VALID_DEVICE_RESOLUTION,
'$device_mobile': TEST_PROPERTY_VALID_DEVICE_MOBILE,
'$country': TEST_PROPERTY_VALID_COUNTRY,
'$region': TEST_PROPERTY_VALID_REGION,
'$city': TEST_PROPERTY_VALID_CITY,
'$gender': TEST_PROPERTY_VALID_GENDER,
'$age': TEST_PROPERTY_VALID_AGE,
'$utm_campaign': TEST_PROPERTY_VALID_UTM_CAMPAIGN,
'$utm_source': TEST_PROPERTY_VALID_UTM_SOURCE,
'$utm_medium': TEST_PROPERTY_VALID_UTM_MEDIUM,
'$utm_content': TEST_PROPERTY_VALID_UTM_CONTENT,
'$utm_term': TEST_PROPERTY_VALID_UTM_TERM
}
actual = ActionParameterValidator().validate(data)
self.assertEquals(actual, (True, None,))
def test_flags_unrecognized_special_property(self):
data = {'$unrecognizedproperty': 'is unrecognized'}
self.assertRaises(ParameterValidationException, ActionParameterValidator().validate, data)
def test_flags_missing_required_parameter(self):
data = {'$sale_currency': TEST_PROPERTY_VALID_CURRENCY}
self.assertRaises(ParameterValidationException, ActionParameterValidator().validate, data)
def test_flags_max_length_violation(self):
data = {'$sale_currency': TEST_PROPERTY_INVALID_CURRENCY, '$sale_value': TEST_PROPERTY_VALID_SALE_VALUE}
self.assertRaises(ParameterValidationException, ActionParameterValidator().validate, data)
def test_flags_option_violation(self):
data = {'$gender': TEST_PROPERTY_INVALID_GENDER}
self.assertRaises(ParameterValidationException, ActionParameterValidator().validate, data)
def test_flags_integer_violation(self):
data = {'$age': TEST_PROPERTY_INVALID_AGE}
self.assertRaises(ParameterValidationException, ActionParameterValidator().validate, data)
|
[
"friedrich@cartogami.com"
] |
friedrich@cartogami.com
|
67b9b1f7bedfa92e7a3381dc098bc78b70b3407c
|
8ab7ffd8b84f242982d54467d1b72ce629eab6a3
|
/intents/qtask_exec.py
|
f5d2db7226ae706bb31fea2296e79136d2827005
|
[] |
no_license
|
piaoyangguo/serviceunit
|
358de1f1d2b9401a3829529247229bba3a776efc
|
b2bd20dcc91ef7e560b07ae3d791b3c988f9ae55
|
refs/heads/master
| 2022-12-10T01:31:11.323159
| 2018-07-15T11:46:32
| 2018-07-15T11:46:32
| 141,023,073
| 0
| 0
| null | 2022-12-08T02:16:47
| 2018-07-15T11:47:05
|
Python
|
UTF-8
|
Python
| false
| false
| 808
|
py
|
from intents.base import QueryTask
import teamin
class IntentQtaskExec(QueryTask):
NAME = 'QTASK_EXEC'
def __init__(self, request, intent):
self.request = request
self.intent = intent
def Go(self):
self.initSlots()
query = self.request.Message()
executor = teamin.NameFindNames().ResolveName(self.request.UID(), self.executor)
btc = teamin.BizTaskCount(self.request.AgentName, self.request.AgentUID)
(count, finished, expired), weburl = btc.SpecifyExecutors(query, executor)
self.intent.set_interval(0, 0, weburl)
return self.Response(count, finished, expired, weburl)
def initSlots(self):
self.slot_w = self.intent.slots.filter(type='user_qte_w').first()
self.executor = self.slot_w.original_word
|
[
"173077850@qq.com"
] |
173077850@qq.com
|
948abad1c62af6a4a8212819b33c31117eb6da0c
|
ac2b3f97b4f2423a3724fbf9af69e362183f7f3a
|
/crimtech_final_project/crimsononline/content/templatetags/top_articles.py
|
1bfa713ef2e1825332c6649b34a6a633e3c17125
|
[] |
no_license
|
cindyz8735/crimtechcomp
|
e4109855dd9a87fc11dd29fdf6bb81400c9ce97b
|
a9045ea79c73c7b864a391039799c2f22234fed3
|
refs/heads/master
| 2021-01-24T10:06:03.386553
| 2018-04-14T04:24:57
| 2018-04-14T04:24:57
| 123,037,281
| 0
| 0
| null | 2018-02-26T22:08:57
| 2018-02-26T22:08:56
| null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from django import template
from crimsononline.content.models import MostReadArticles
register = template.Library()
def most_read(context, specifier):
try:
context['mostreadarticles'] = (MostReadArticles.objects
.filter(key=specifier)
.order_by('-create_date')[0]
.articles)
except IndexError, MostReadArticles.DoesNotExist:
pass
return context
@register.inclusion_tag('templatetag/mostreadarticles.html',
takes_context=True)
def most_read_articles(context):
return most_read(context, 'content')
@register.inclusion_tag('templatetag/mostreadadmissions.html',
takes_context=True)
def most_read_admissions(context):
return most_read(context, 'admissions')
@register.inclusion_tag('templatetag/mostreadflyby.html',
takes_context=True)
def most_read_flyby(context):
return most_read(context, 'flyby')
@register.inclusion_tag('templatetag/relatedarticles.html',
takes_context=True)
def related_articles(context):
return context
@register.inclusion_tag('templatetag/recommended_articles.html',
takes_context=True)
def recommended_articles(context):
return context
|
[
"cindyz8735@gmail.com"
] |
cindyz8735@gmail.com
|
74ffa57caa17a79f79a4f556743b3885effb2976
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/5SJdiGXZwRiFK5vEJ_5.py
|
6b7d5006ba72e571e4e1d9396e974c2c30d63ed2
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
def reverse_capitalize(txt):
txt = txt[::-1]
new_txt = ''
for char in txt:
char = char.upper()
new_txt += char
return new_txt
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0f89aabb6afcac086be266a87470dd503016df7c
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_1480487_0/Python/alb4tor/GCJ.py
|
d451fb47ed78eefe43680fc14d2c7e07ec64c891
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
'''
Created on 18 mars 2012
@author: gnugnu
'''
import sys
class InputFile(object):
'''
classdocs
'''
def __init__(self, filename=""):
'''
Constructor
'''
if filename == "":
filename = sys.argv[1]
self.full_content = open(filename, "r").readlines()
self.size = int(self.full_content[0])
self.idx = 0
def __iter__(self):
return self
def __len__(self):
return self.size
def next(self):
self.idx += 1
try:
return self.full_content[self.idx].rstrip("\n")
except IndexError:
raise StopIteration
@property
def case(self):
return self.idx
class Output(object):
def __init__(self, filename=""):
self.case = 0
def prt(self, data):
self.case += 1
self._prt("Case #%d: %s" % (self.case, str(data)))
def _prt(self, data):
print data
def close(self):
pass
class OutputFile(Output):
def __init__(self, filename):
Output.__init__(self)
self.fp = open(filename, "w")
def _prt(self, data):
self.fp.write(data+"\n")
def close(self):
self.fp.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
a77f8e23b15fcb2a4caf310890f1a2d3ad7a7714
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/tensorflow/python/estimator/inputs/numpy_io.py
|
f3fc47e290a7b335b0bdba64fe0da0d41970f3e1
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:06d1937b0e2e65d868e3f6fb34a214b26552db5c1a5dabce73a56b55aa075f63
size 5108
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
e13aefe727018e905ec98a260eb06accff2fcd2d
|
751b094918ae9200afe7824d58804549082caa95
|
/src/python/WMCore/JobSplitting/Generators/BasicNaming.py
|
e0db794314212fbb6fd9f0ed93e09f522ce15972
|
[] |
no_license
|
cinquo/WMCore
|
7ebd13269f42eb97f416f8f2bdaca05fa93c6afc
|
122f9332f2e944154dd0df68b6b3f2875427b032
|
refs/heads/master
| 2021-01-09T06:28:58.947626
| 2013-06-05T08:31:53
| 2013-06-05T08:31:53
| 2,965,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#!/usr/bin/env python
"""
_BasicNaming_
Default name generator using a vaguely sensible convention.
Uses GUIDs to avoid having to keep state
"""
from WMCore.Services.UUID import makeUUID
from WMCore.JobSplitting.Generators.GeneratorInterface import GeneratorInterface
class BasicNaming(GeneratorInterface):
"""
_BasicNaming_
Basic task & guid based name generator
"""
def __call__(self, wmbsJob):
wmbsJob['id'] = "%s/%s" % (self.task.getPathName(), makeUUID())
wmbsJob['name'] = "%s/%s" % (self.task.getPathName(), makeUUID())
|
[
"metson@4525493e-7705-40b1-a816-d608a930855b"
] |
metson@4525493e-7705-40b1-a816-d608a930855b
|
3ee33ba669c0be974c54414bc32bb4692ee19419
|
a4fcaa28f288ff495ac09c3f8070f019f4d3ba80
|
/08-real_python_class/2017_02_07-Lesson_2/class_projects/flask-hello-world/app.py
|
677ab29cfff2fc13b95ed5933c362409ba9a180e
|
[] |
no_license
|
tomwhartung/always_learning_python
|
db44b0745f27f482e6482faa821f89dc7809dda8
|
ab27c164a724754e3e25518bf372bd4437995d64
|
refs/heads/master
| 2020-12-07T15:57:04.184391
| 2017-05-18T19:35:31
| 2017-05-18T19:35:31
| 67,449,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
##
# Class project
#
from flask import Flask
app = Flask(__name__)
if __name__ == '___main__':
app.run(debug=True)
@app.route('/')
def index():
return 'Index Page'
#
# Variable Rules:
# ---------------
#
# Greet the user by name
#
@app.route('/name/<username>')
def greet_user(username):
return 'Hello %s!' % username
|
[
"tomwhartung@gmail.com"
] |
tomwhartung@gmail.com
|
4795fdd80d0a83f8095f553a54e9c04a2712f2f0
|
42064191a5ac586ed088b293165b51abf16b1ee4
|
/Intro Machine Learning/Lesson 9/Min_Max_Rescale.py
|
752e6692ff0556a1d34473a31fb6f4068011bca4
|
[] |
no_license
|
ObinnaObeleagu/Udacity
|
637cd458824a835febacebd72ebef77b30ca7f94
|
761ba413934f66cbd9429fd9882f59f047eb065b
|
refs/heads/master
| 2023-03-15T23:27:23.022463
| 2019-01-03T04:05:03
| 2019-01-03T04:05:03
| 497,375,575
| 1
| 0
| null | 2022-05-28T16:46:12
| 2022-05-28T16:46:12
| null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
""" quiz materials for feature scaling clustering """
### FYI, the most straightforward implementation might
### throw a divide-by-zero error, if the min and max
### values are the same
### but think about this for a second--that means that every
### data point has the same value for that feature!
### why would you rescale it? Or even use it at all?
def featureScaling(arr):
if max(arr) == min(arr):
return arr
else:
return [(a - min(arr))*1.0/(max(arr)-min(arr)) for a in arr]
# tests of your feature scaler--line below is input data
data = [115, 140, 175]
print featureScaling(data)
|
[
"ryanzjlib@gmail.com"
] |
ryanzjlib@gmail.com
|
c75309b7bfbaa2e7f70f920f0c0c9a1fac74fe6b
|
1bc7456240639a4fac54c411fbcb562cdbcc420c
|
/5483. Make The String Great.py
|
7728ab54b08891d7f80b0856bb4def9591fe4547
|
[] |
no_license
|
Manash-git/CP-LeetCode-Solve
|
bdbb9f13946faee5da24e191a3d593b99da61ed2
|
45052c7613345c76f8a12bac780ffb899062dea9
|
refs/heads/master
| 2022-11-29T13:16:03.474242
| 2020-08-11T19:06:07
| 2020-08-11T19:06:07
| 275,853,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
def makeGood(s):
bucket=[s[0]]
for i in s[1:]:
if bucket and i.lower()==bucket[-1] and i!=bucket[-1]:
bucket.pop()
elif bucket and i.upper()==bucket[-1]and i!=bucket [-1]:
bucket.pop()
else:
bucket.append(i)
print(bucket)
# print("".join(bucket))
return "".join(bucket)
print(makeGood("mannNasSh"))
lst= [1,5,3,7]
print(lst[-1])
|
[
"emailatmanash@gmail.com"
] |
emailatmanash@gmail.com
|
b18a75629c957d414f6969ff82875ae136371895
|
27ff7fec0ae3f29f58089a2acab0aa3bc4e6e1f7
|
/Python_script/51zxw/unittest/testCase_combine.py
|
377d5e839107ecaf2be9a6fe29db01308a7086b3
|
[] |
no_license
|
zhangsong1417/xx
|
01435d6057364991b649c1acc00b36ab13debe5a
|
c40cfdede194daf3bdf91b36c1936150577128b9
|
refs/heads/master
| 2020-04-06T14:06:23.011363
| 2019-07-09T02:38:02
| 2019-07-09T02:38:02
| 157,528,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from calculator import *
import unittest
class Test_StartEnd(unittest.TestCase):
def setUp(self):
print("test start")
def tearDown(self):
print("test end")
class Testadd(Test_StartEnd):
def test_add(self):
j=Math(5,5)
self.assertEqual(j.add(),10)
class Testsub(Test_StartEnd):
def test_sub(self):
i=Math(10,5)
self.assertEqual(i.sub(),5)
if __name__=='__main__':
unittest.main()
|
[
"44634576+shuiling21@users.noreply.github.com"
] |
44634576+shuiling21@users.noreply.github.com
|
4ee02bc666d3115bc00a27981934c752402097f7
|
d78dfc5089717fc242bbd7097f507d811abb4260
|
/USA/script.module.coveapi/lib/coveapi/__init__.py
|
6190c730b35fd85b4204fbf8e5d11e8982f2c3a3
|
[] |
no_license
|
tustxk/AddOnRepo
|
995b980a9ec737e2c25bed423fc83f710c697e40
|
6b86a06cb37e6e10b4119584dd7311ebc2318e54
|
refs/heads/master
| 2022-10-08T21:34:34.632346
| 2016-10-28T09:48:01
| 2016-10-28T09:48:01
| 70,684,775
| 1
| 1
| null | 2022-10-01T16:27:13
| 2016-10-12T09:31:16
|
Python
|
UTF-8
|
Python
| false
| false
| 956
|
py
|
"""Package: `coveapi`
A Python client for the PBS COVE API service.
"""
# client version
__version__ = '0.2dev'
# coveapi constants
COVEAPI_VERSION = 'v1'
COVEAPI_HOST = 'http://api.pbs.org'
COVEAPI_ENDPOINT = '/cove/%s/' % COVEAPI_VERSION
COVEAPI_ENDPOINT_CATEGORIES = '%scategories/' % COVEAPI_ENDPOINT
COVEAPI_ENDPOINT_GROUPS = '%sgroups/' % COVEAPI_ENDPOINT
COVEAPI_ENDPOINT_PROGRAMS = '%sprograms/' % COVEAPI_ENDPOINT
COVEAPI_ENDPOINT_VIDEOS = '%svideos/' % COVEAPI_ENDPOINT
def connect(api_app_id, api_app_secret, api_host=COVEAPI_HOST):
"""Connect to the COVE API service.
Keyword arguments:
`api_app_id` -- your COVE API app id
`api_app_secret` -- your COVE API secret key
`api_host` -- host of COVE API (default: COVEAPI_HOST)
Returns:
`coveapi.connection.COVEAPIConnection` object
"""
from coveapi.connection import COVEAPIConnection
return COVEAPIConnection(api_app_id, api_app_secret, api_host)
|
[
"ke.xiao@netxeon.com"
] |
ke.xiao@netxeon.com
|
241db9e295f1a41795f43fba433f42583b271f89
|
52d6e9fb7176bf819ae8460d0fd03368614ce075
|
/datasource/PooledDataSource.py
|
9f0bb94bdadf9e96e5bc94dc8d47f6f27f780427
|
[
"BSD-2-Clause"
] |
permissive
|
mattduan/proof
|
076f23f20e28e6d59f091af11eb84cdd3e9f224d
|
52241b68e7170c9c6fd245192b7be35be1cdc33f
|
refs/heads/master
| 2021-01-13T01:44:22.937600
| 2013-03-17T16:19:32
| 2013-03-17T16:19:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
"""
A PooledDataSource object is a factory for PooledConnection objects.
"""
__version__='$Revision: 3194 $'[11:-2]
__author__ = "Duan Guoqiang (mattgduan@gmail.com)"
import logging
import util.logger.Logger as Logger
import proof.ProofException as ProofException
class PooledDataSource:
__is__ = 'interface'
def __init__( self,
host,
username,
password,
dbname,
pool,
logger = None ):
""" Constructor.
"""
self.__logger = Logger.makeLogger(logger)
self.log = self.__logger.write
#==================== Interfaces ==========================
def getPooledConnection(self, **kwargs):
""" Establish a database connection and return it.
"""
raise ProofException.ProofNotImplementedException( \
"PooledDataSource.getPooledConnection: need to be overrided by db specific PooledDataSource." )
def getLogger(self):
return self.__logger
def setLogger(self, logger):
self.__logger = Logger.makeLogger(logger)
self.log = self.__logger.write
|
[
"guoqiangduan@gmail.com"
] |
guoqiangduan@gmail.com
|
1f5e57c17346b3f327473b6fcffe2c8ed909d888
|
5374bd9a9fc8cc07f6966c490a137003ddc64d9b
|
/VEnCode/scripts/dendrogram_encode.py
|
44b445694b2ba41e172163ba360705fc56d94d73
|
[
"BSD-3-Clause"
] |
permissive
|
AndreMacedo88/VEnCode
|
31f9f545019f62e0af716395a11961515c229394
|
667c777c6ef12c43e993660e5c695d4d6d43385e
|
refs/heads/master
| 2021-01-06T03:55:44.385885
| 2020-11-24T18:05:38
| 2020-11-24T18:05:38
| 90,248,803
| 0
| 1
|
NOASSERTION
| 2020-02-04T22:29:39
| 2017-05-04T10:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
dendrogram_encode.py: file used to generate hierarchical clustering and subsequent dendrograms from ENCODE DNase-seq
data
"""
import os
import pandas as pd
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from VEnCode import common_variables as cv
DATA_TYPE = "enhancers"
encode_data_path = "D:/Utilizador HDD/OneDrive - Nova Medical School Faculdade de Ciências Médicas da UNL/" \
"1-Research/3-Vencode/Fantom5/Files/Validation_files/ENCODE/" \
"ENCODE DNase expression in FANTOM5 {}_merged.csv".format(DATA_TYPE)
encode_data = pd.read_csv(encode_data_path, sep=";", engine="python", index_col=0)
values = encode_data.T.values
index = encode_data.T.index
clustering = hierarchy.linkage(values, 'single')
plt.figure(figsize=(14, 14))
dn = hierarchy.dendrogram(clustering, labels=index, color_threshold=0, above_threshold_color='#333333',
leaf_rotation=0, orientation="left")
no_axes = False
no_border = True
ax = plt.gca()
if no_axes:
ax.axis('off')
else:
dflt_col = "#808080"
ylbls = ax.get_ymajorticklabels()
for lbl in ylbls:
lbl.set_color(dflt_col)
if no_border:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
path = "D:/Utilizador HDD\OneDrive - Nova Medical School Faculdade de Ciências Médicas da UNL/1-Research/3-Vencode/" \
"Fantom5/Dendrograms/"
file_name = "dendro_encode_{}_noBorders.png".format(DATA_TYPE)
output_path = os.path.join(path, file_name)
plt.savefig(output_path, dpi=600, bbox_inches="tight", transparent=True)
retrieve_leaves = False
if retrieve_leaves:
leaves_list = dn["leaves"]
leaves_names = [index[x] for x in leaves_list]
with open("leaves.csv", "w") as f:
for item in leaves_names:
f.write("{}\n".format(item))
print(leaves_names)
|
[
"andre.lopes.macedo@gmail.com"
] |
andre.lopes.macedo@gmail.com
|
39959cff761869bff2825119c1eb9906bd45241b
|
2f882f68806faf88e549a941e4d13833d9aa95df
|
/杨辉三角.py
|
b3f9f09a01baaa435f4f241a8ce69fa574e91c69
|
[] |
no_license
|
SmallPotY/leetcode_Python
|
5ac8420cdcb677a679a32fd6f5fce82411d813cd
|
0a2483195004c4d18237920b2f38b942e26b181b
|
refs/heads/master
| 2020-03-20T13:24:00.612054
| 2019-07-18T09:14:45
| 2019-07-18T09:14:45
| 137,454,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
"""
给定一个非负整数 numRows,生成杨辉三角的前 numRows 行。
在杨辉三角中,每个数是它左上方和右上方的数的和。
示例:
输入: 5
输出:
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
"""
class Solution:
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
yhsj = [[1],[1,1]]
if numRows:
if numRows == 1:
return [[1]]
elif numRows == 2:
return yhsj
else:
for i in range(3,numRows+1):
klb = [1,]
k =0
for j in range(1, i-1):
a = yhsj[i-2][k]
b = yhsj[i-2][k+1]
k = k+1
klb.append(a+b)
klb.append(1)
yhsj.append(klb)
return yhsj
else:
return []
|
[
"1041132457@qq.com"
] |
1041132457@qq.com
|
66111fe1a191a09bd2078e9d605863dc0d1f4e35
|
391ea6a7c730b9db50f14b359b0a8d123c590924
|
/mayan/apps/duplicates/apps.py
|
03f07a880d442904d5665d81caea9af292aef5f4
|
[
"Apache-2.0"
] |
permissive
|
Dave360-crypto/Mayan-EDMS-1
|
2e1891cea640ae2ac002d2c19eb22b88b271db29
|
7d79e748e8f6e47381a298ad8d219c15b09dd4d3
|
refs/heads/master
| 2023-08-19T06:48:48.566169
| 2021-10-11T06:22:24
| 2021-10-11T06:23:41
| 418,950,673
| 0
| 0
|
NOASSERTION
| 2021-10-19T14:07:24
| 2021-10-19T14:04:52
| null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
from django.apps import apps
from django.db.models.signals import post_delete
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_list_facet, menu_multi_item, menu_tools
)
from mayan.apps.documents.menus import menu_documents
from mayan.apps.documents.permissions import permission_document_view
from mayan.apps.documents.signals import signal_post_document_file_upload
from mayan.apps.navigation.classes import SourceColumn
from .classes import DuplicateBackend
from .handlers import (
handler_remove_empty_duplicates_lists, handler_scan_duplicates_for
)
from .links import (
link_document_duplicates_list, link_duplicated_document_list,
link_duplicated_document_scan
)
class DuplicatesApp(MayanAppConfig):
app_namespace = 'duplicates'
app_url = 'duplicates'
has_rest_api = True
has_tests = True
name = 'mayan.apps.duplicates'
verbose_name = _('Duplicates')
def ready(self):
super().ready()
Document = apps.get_model(
app_label='documents', model_name='Document'
)
DuplicateBackendEntry = self.get_model(
model_name='DuplicateBackendEntry'
)
DuplicateSourceDocument = self.get_model(
model_name='DuplicateSourceDocument'
)
DuplicateTargetDocument = self.get_model(
model_name='DuplicateTargetDocument'
)
DuplicateBackend.load_modules()
SourceColumn(
func=lambda context: DuplicateBackendEntry.objects.get_duplicates_of(
document=context['object'],
permission=permission_document_view,
user=context['request'].user
).count(), include_label=True, label=_('Duplicates'),
order=99, source=DuplicateSourceDocument
)
SourceColumn(
attribute='backend', include_label=True,
label=_('Duplicate backend'), order=99,
source=DuplicateTargetDocument
)
menu_documents.bind_links(
links=(link_duplicated_document_list,)
)
menu_list_facet.bind_links(
links=(link_document_duplicates_list,),
sources=(Document,)
)
menu_tools.bind_links(
links=(link_duplicated_document_scan,)
)
# DuplicateSourceDocument
menu_multi_item.add_proxy_inclusions(source=DuplicateSourceDocument)
# DuplicateTargetDocument
menu_multi_item.add_proxy_inclusions(source=DuplicateTargetDocument)
post_delete.connect(
dispatch_uid='duplicates_handler_remove_empty_duplicates_lists',
receiver=handler_remove_empty_duplicates_lists,
sender=Document
)
signal_post_document_file_upload.connect(
dispatch_uid='duplicates_handler_scan_duplicates_for',
receiver=handler_scan_duplicates_for
)
|
[
"roberto.rosario@mayan-edms.com"
] |
roberto.rosario@mayan-edms.com
|
849c8859c0d6340d8cbc066bbe9b0df238848e8f
|
f210ccc90f9e091f10639f071c4e460fa4dafec1
|
/src/helper/cluster.py
|
72afcd677e91161ea455a8ea6061d9c3c1d91a17
|
[
"MIT"
] |
permissive
|
qingchenkanlu/FlowPose6D
|
e21974bbbc73db8934e387943a002d009ac0b16f
|
2297ab5fa0afd0c247d59c2f1c7f899f078e2893
|
refs/heads/master
| 2023-01-20T13:43:59.737784
| 2020-11-22T10:52:23
| 2020-11-22T10:52:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,633
|
py
|
import os
import time
import logging
def move_dataset_to_ssd(env, exp):
try:
# Update the env for the model when copying dataset to ssd
if env.get('leonhard', {}).get('copy', False):
files = ['data', 'data_syn', 'models', 'viewpoints_renderings']
p_ls = os.popen('echo $TMPDIR').read().replace('\n', '')
p_ycb_new = p_ls + '/YCB_Video_Dataset'
p_ycb = env['p_ycb']
print(p_ls)
try:
os.mkdir(p_ycb_new)
os.mkdir('$TMPDIR/YCB_Video_Dataset')
except:
pass
for f in files:
p_file_tar = f'{p_ycb}/{f}.tar'
logging.info(f'Copying {f} to {p_ycb_new}/{f}')
if os.path.exists(f'{p_ycb_new}/{f}'):
logging.info(
"data already exists! Interactive session?")
else:
start_time = time.time()
if f == 'data':
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
else:
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR/YCB_Video_Dataset | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
os.system(bashCommand)
logging.info(
f'Transferred {f} folder within {str(time.time() - start_time)}s to local SSD')
env['p_ycb'] = p_ycb_new
except:
env['p_ycb'] = p_ycb_new
logging.info('Copying data failed')
return exp, env
def move_background(env, exp):
try:
# Update the env for the model when copying dataset to ssd
if env.get('leonhard', {}).get('copy', False):
p_file_tar = env['p_background'] + '/indoorCVPR_09.tar'
p_ls = os.popen('echo $TMPDIR').read().replace('\n', '')
p_n = p_ls + '/Images'
try:
os.mkdir(p_n)
except:
pass
if os.path.exists(f'{p_n}/office'):
logging.info(
"data already exists! Interactive session?")
else:
start_time = time.time()
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
os.system(bashCommand)
env['p_background'] = p_n
except:
logging.info('Copying data failed')
return exp, env
|
[
"frey.breitenbrunn@gmx.de"
] |
frey.breitenbrunn@gmx.de
|
883cdc5c29b87723f98b7e4e6b693ecfc75275de
|
92cd0601656e4cde04e56a896ca063926185041c
|
/shop/accounts/apps.py
|
ac57fd73bea3da34aa8754777ebac2e76e4e1165
|
[] |
no_license
|
Anych/shop
|
74599fd8f2405128c308f047ac9da13215a38912
|
e5190c1cb7d2b786b90cce9c88734427ea371fb8
|
refs/heads/master
| 2023-05-01T07:08:24.881512
| 2021-05-24T07:48:50
| 2021-05-24T07:48:50
| 355,591,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from django.apps import AppConfig
class AccountsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'accounts'
verbose_name = 'Аккаунты'
|
[
"anuar123@mail.ru"
] |
anuar123@mail.ru
|
05f10d2ee781ed9c5a53261db7d80fb1b86f2c53
|
cf7025ff7d02604ea146775a35894733d8338593
|
/core/settings.py
|
0491d6fc6eddf8fe942be18ae5190fac60296a53
|
[] |
no_license
|
boxabhi/CodeKeen-starter
|
7af6e13ec780df8a571e52d6cf10e16ac4717c3d
|
ac8be93494cf7013366ba7ad8cbd172d47feb466
|
refs/heads/main
| 2023-06-18T14:56:30.771286
| 2021-07-25T15:45:05
| 2021-07-25T15:45:05
| 382,294,773
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,653
|
py
|
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-7p$(e589sf%x_g%^36)s*k^w2t^nxxj=7!^&x_9h@7b_oi7(x8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'django_extensions',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WGI_APPLICATION = 'core.wsgi.application'
ASGI_APPLICATION = 'core.asgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
},
},
}
INTERNAL_IPS = [
# ...
'127.0.0.1',
# ...
]
|
[
"abhijeetg40@gmail.com"
] |
abhijeetg40@gmail.com
|
fa8957f1abd9be526285045d13f60e79976ae059
|
b3b9066196700269494b2a9350377bfd1aa8170e
|
/starlight_project/settings.py
|
5c7f1d8b99ce32ac6fa49ec3582d13239c206856
|
[] |
no_license
|
MagiCircles/RevueStarlight
|
f33000e06bc6ce6db506bd7460c47ffd2a3716c4
|
5ce8a023e2b618143fd9dcc3e78758c2623001d7
|
refs/heads/master
| 2022-08-13T20:12:25.201028
| 2022-07-10T15:14:44
| 2022-07-10T15:14:44
| 185,398,158
| 5
| 2
| null | 2022-07-30T18:11:03
| 2019-05-07T12:32:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,887
|
py
|
# -*- coding: utf-8 -*-
"""
Django settings for starlight_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#yt2*mvya*ulaxd+6jtr#%ouyco*2%3ngb=u-_$44j^86g0$$3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'bootstrapform',
'snowpenguin.django.recaptcha3',
'rest_framework',
'storages',
'magi',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'magi.middleware.languageFromPreferences.LanguageFromPreferenceMiddleWare',
'magi.middleware.httpredirect.HttpRedirectMiddleware',
)
ROOT_URLCONF = 'starlight_project.urls'
WSGI_APPLICATION = 'starlight_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
SITE = 'starlight'
AUTHENTICATION_BACKENDS = ('magi.backends.AuthenticationBackend',)
DEBUG_PORT = 8000
from django.utils.translation import ugettext_lazy as _
LANGUAGES = (
('en', _('English')),
('es', _('Spanish')),
('zh-hans', _('Simplified Chinese')),
('ru', _('Russian')),
('it', _('Italian')),
('fr', _('French')),
('de', _('German')),
('pl', _('Polish')),
('ja', _('Japanese')),
('kr', _('Korean')),
('id', _('Indonesian')),
('vi', _('Vietnamese')),
('zh-hant', _('Traditional Chinese')),
('pt', _('Portuguese')),
('pt-br', _('Brazilian Portuguese')),
('tr', _('Turkish')),
('th', _('Thai')),
('uk', _('Ukrainian')),
)
NATIVE_LANGUAGES = (
('en', u'English'),
('es', u'Español'),
('zh-hans', u'简体中文'),
('ru', u'Русский'),
('it', u'Italiano'),
('fr', u'Français'),
('de', u'Deutsch'),
('pl', u'polski'),
('ja', u'日本語'),
('kr', u'한국어'),
('id', u'Indonesia'),
('vi', u'Tiếng Việt Nam'),
('zh-hant', u'繁體中文'),
('pt', u'Português'),
('pt-br', u'Português Brasileiro'),
('tr', u'Türkçe'),
('th', u'ไทย'),
('uk', u'Українська'),
)
LANGUAGE_CODE = 'en'
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'magi/locale'),
]
STATIC_UPLOADED_FILES_PREFIX = None
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/.*$'
LOGIN_REDIRECT_URL = '/'
LOG_EMAIL = 'emails-log@schoolido.lu'
PASSWORD_EMAIL = 'password@schoolido.lu'
AWS_SES_RETURN_PATH = 'contact@starlight.academy'
RECAPTCHA_PRIVATE_KEY = ''
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_DEFAULT_ACTION = 'generic'
RECAPTCHA_SCORE_THRESHOLD = 0.5
FAVORITE_CHARACTERS = []
STAGE_GIRLS_NAMES = {}
STAFF_CONFIGURATIONS = {}
SCHOOLS = {}
IMPORTABLE_FIELDS = {}
VOICE_ACTRESSES = {}
MAX_STATISTICS = {}
MAX_WIDTH = 1200
MAX_HEIGHT = 1200
MIN_WIDTH = 300
MIN_HEIGHT = 300
STATIC_FILES_VERSION = ''
try:
from generated_settings import *
except ImportError, e:
pass
try:
from local_settings import *
except ImportError, e:
pass
INSTALLED_APPS = list(INSTALLED_APPS)
INSTALLED_APPS.append(SITE)
LOCALE_PATHS = list(LOCALE_PATHS)
LOCALE_PATHS.append(os.path.join(BASE_DIR, SITE, 'locale'))
if STATIC_UPLOADED_FILES_PREFIX is None:
STATIC_UPLOADED_FILES_PREFIX = SITE + '/static/uploaded/' if DEBUG else 'u/'
|
[
"db0company@gmail.com"
] |
db0company@gmail.com
|
3f0324d2aa68a7bb29d539c03f1c6a4cd9453169
|
acec8615e8cd8e81d58703024816fdedf43ecc0e
|
/replica/contrib/blip/dashboard/views.py
|
f7b9cb6dd51299c49738ab6d641aeab39392c0d4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
underlost/Replica
|
dec884522833e89bcec46d16b2349d0881a15cc9
|
2f092d3fc215b950fa6e409980a3f3e7c3633f7c
|
refs/heads/master
| 2021-03-12T23:39:19.196279
| 2015-06-04T07:53:15
| 2015-06-04T07:53:15
| 3,567,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
from __future__ import absolute_import
import logging
from django.shortcuts import render_to_response, render, get_object_or_404, redirect
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.generic.list import ListView
from replica.contrib.blip.models import Timeline, Blip
from replica.contrib.blip.forms import TimelineModelForm, BlipModelForm
class LatestBlipsListViewMobile(ListView):
paginate_by = 25
template_name = 'replica/dashboard/blip/blip_list.html'
def get_queryset(self):
return Blip.objects.filter(user=self.request.user).order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(LatestBlipsListViewMobile, self).get_context_data(**kwargs)
context.update({'hide_timeline': True, 'nav_title': 'All Blips',})
return context
class TimelinesListView(ListView):
paginate_by = 25
template_name = 'replica/dashboard/blip/timeline_list.html'
def get_queryset(self):
return Timeline.objects.filter(user=self.request.user).order_by('-pub_date')
def get_context_data(self, **kwargs):
context = super(TimelinesListView, self).get_context_data(**kwargs)
context.update({ 'nav_title': 'Timelines',})
return context
class TimelineBlipListView(ListView):
paginate_by = 100
template_name = 'replica/dashboard/blip/blip_list.html'
def get_queryset(self):
self.timeline = get_object_or_404(Timeline, slug=self.kwargs.pop('timeline_slug'))
b = Blip.objects.filter(user=self.request.user).filter(timeline=self.timeline)
if self.timeline.rev_order == True:
return b.order_by('-pub_date')
else:
return b.order_by('pub_date')
def get_context_data(self, **kwargs):
context = super(TimelineBlipListView, self).get_context_data(**kwargs)
context.update({'timeline': self.timeline, 'nav_title': self.timeline.name,})
return context
def AddTimeline(request):
#add a timeline.
instance = Timeline(user=request.user)
f = TimelineModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'New list created.')
return redirect('Replica:Blip-Timelines')
ctx = {'form': f, 'adding': True}
return render(request, 'replica/dashboard/blip/edit_timeline.html', ctx)
def EditTimeline(request, timeline_slug):
#Lets a user edit a blip they've previously added.
timeline = get_object_or_404(Timeline, slug=timeline_slug)
f = TimelineModelForm(request.POST or None, instance=timeline)
if f.is_valid():
f.save()
return redirect('Replica:Blip-Add-To-Timeline', timeline_slug=timeline_slug)
ctx = {'form': f, 'timeline': timeline, 'adding': False}
return render(request, 'replica/dashboard/blip/edit_timeline.html', ctx)
def SingleBlip(request, blip_guid):
#Shows a single blip.
blip = get_object_or_404(Blip, guid=blip_guid)
if blip.timeline:
recent_blips = Blip.objects.filter(timeline__id=blip.timeline.id, is_private=False)[:5]
ctx = {'blip': blip, 'recent_blips': recent_blips}
else:
ctx = {'blip': blip}
return render(request, 'replica/dashboard/blip/single_blip.html', ctx)
def AddBlip(request, timeline_slug=None):
object_list = Blip.objects.filter(user=request.user).order_by('-pub_date')[:10]
instance = Blip(user=request.user)
f = BlipModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'Blip Added.')
return redirect('Replica:Blip:Index')
ctx = {'form': f, 'object_list': object_list, 'adding': True, 'blip_submit': True, 'hide_timeline': True, 'nav_title': 'All Blips', }
return render(request, 'replica/dashboard/blip/blip_list.html', ctx)
def AddBlipToTimeline(request, timeline_slug):
ft = get_object_or_404(Timeline, slug=timeline_slug)
if ft.rev_order == True:
b = Blip.objects.filter(user=request.user).filter(timeline=ft).order_by('-pub_date')[:10]
else:
b = Blip.objects.filter(user=request.user).filter(timeline=ft).order_by('pub_date')[:10]
instance = Blip(user=request.user, timeline=ft)
f = BlipModelForm(request.POST or None, instance=instance)
if f.is_valid():
f.save()
messages.add_message(
request, messages.INFO, 'Blip Added.')
return redirect('Replica:Blip:Timeline', timeline_slug=timeline_slug)
ctx = {'form': f, 'timeline': ft, 'adding': True, 'blip_submit': True, 'nav_title': ft.name, 'object_list': b, }
return render(request, 'replica/dashboard/blip/blip_list.html', ctx)
def EditBlip(request, blip_guid):
#Lets a user edit a blip they've previously added.
blip = get_object_or_404(Blip, guid=blip_guid, user=request.user)
f = BlipModelForm(request.POST or None, instance=blip)
if f.is_valid():
f.save()
return redirect('Replica:Blip:Blip', blip_guid=blip_guid)
ctx = {'form': f, 'blip': blip, 'adding': False}
return render(request, 'replica/dashboard/blip/edit_blip.html', ctx)
def DeleteBlip(request, blip_guid):
blip = get_object_or_404(Blip, guid=blip_guid, user=request.user)
if request.method == 'POST':
blip.delete()
return redirect('Replica:Blip:Index')
return render(request, 'replica/dashboard/delete-confirm.html', {'object': blip, 'content_type': 'Blip'})
|
[
"underlost@gmail.com"
] |
underlost@gmail.com
|
8db0433ff501a68fe74000395c3a8da33fe9fb5b
|
7b60d9a48b1b18bbc4a8d8f2cf523654691b5a5e
|
/data_tracker_csv_reader.py
|
2c395f5f7d1c0b052242d527def113b0dab74806
|
[] |
no_license
|
bolducp/Data-Tracker-application-for-Bandwidth-
|
c0fe927db8b0897471ec8b2d453bc17622dafc91
|
9f8f567ab579691bd89f7f390057718866b1f665
|
refs/heads/master
| 2021-01-10T21:18:16.299626
| 2015-09-30T17:16:12
| 2015-09-30T17:16:12
| 42,602,537
| 0
| 1
| null | 2015-09-16T21:58:18
| 2015-09-16T17:26:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
"""A data tracker application for use with csv files from the Bandwidth+ application for OS X """
import csv
def open_and_read_files():
try:
filename = raw_input("Insert file name")
with open(filename, 'rb') as csvfile:
filelines = csv.reader(csvfile)
file_text = []
for row in filelines:
new_row = [entry.lower() for entry in row]
file_text.append(new_row)
return file_text
except IOError:
print "Please enter a valid file name"
return open_and_read_files()
def make_list_of_network_dates_and_data(file_text):
network = raw_input("Which network connection would you like to see data use for?")
list_of_usage = []
for line in file_text:
if network in line[1]:
line_info = [line[0], line[4]]
list_of_usage.append(line_info)
if list_of_usage == []:
print "Please enter a valid network name"
return make_list_of_network_dates_and_data(file_text)
return list_of_usage
def print_list_of_usage(list_of_usage):
sorted_by_date_list = sorted(list_of_usage, reverse=True)
for line in sorted_by_date_list:
print line[0], ": ", line[1]
def calculate_total_usage(list_of_usage):
sorted_by_date_list = sorted(list_of_usage, reverse=True)
total_usage = 0
first_date = sorted_by_date_list[-1][0]
last_date = sorted_by_date_list[0][0]
for line in sorted_by_date_list:
total_usage += float(line[1])
print "Your total usage from %s to %s: %f GBs" % (first_date, last_date, total_usage / 1000)
def main():
file_text = open_and_read_files()
list_of_usage = make_list_of_network_dates_and_data(file_text)
print "\n", "Here is the data usage in MB per day", "\n"
print_list_of_usage(list_of_usage)
print
calculate_total_usage(list_of_usage)
if __name__ == "__main__":
main()
|
[
"paigebolduc@gmail.com"
] |
paigebolduc@gmail.com
|
0df4b72bdd9e02254610431c265ceca056544974
|
53eee7eb899cb518983008532257037fb89def13
|
/672.bulb-switcher-ii.py
|
b93a772c448caa1bbccb098c38e26e109fe8d695
|
[] |
no_license
|
chenxu0602/LeetCode
|
0deb3041a66cb15e12ed4585bbe0fefce5dc6b26
|
3dc5af2bc870fcc8f2142130fcd2b7cab8733151
|
refs/heads/master
| 2023-07-05T19:26:21.608123
| 2023-07-02T08:35:35
| 2023-07-02T08:35:35
| 233,351,978
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
#
# @lc app=leetcode id=672 lang=python3
#
# [672] Bulb Switcher II
#
# https://leetcode.com/problems/bulb-switcher-ii/description/
#
# algorithms
# Medium (50.19%)
# Likes: 97
# Dislikes: 729
# Total Accepted: 10.1K
# Total Submissions: 20K
# Testcase Example: '1\n1'
#
# There is a room with n lights which are turned on initially and 4 buttons on
# the wall. After performing exactly m unknown operations towards buttons, you
# need to return how many different kinds of status of the n lights could be.
#
# Suppose n lights are labeled as number [1, 2, 3 ..., n], function of these 4
# buttons are given below:
#
#
# Flip all the lights.
# Flip lights with even numbers.
# Flip lights with odd numbers.
# Flip lights with (3k + 1) numbers, k = 0, 1, 2, ...
#
#
#
#
# Example 1:
#
#
# Input: n = 1, m = 1.
# Output: 2
# Explanation: Status can be: [on], [off]
#
#
#
#
# Example 2:
#
#
# Input: n = 2, m = 1.
# Output: 3
# Explanation: Status can be: [on, off], [off, on], [off, off]
#
#
#
#
# Example 3:
#
#
# Input: n = 3, m = 1.
# Output: 4
# Explanation: Status can be: [off, on, off], [on, off, on], [off, off, off],
# [off, on, on].
#
#
#
#
# Note: n and m both fit in range [0, 1000].
#
#
import itertools
class Solution:
def flipLights(self, n: int, m: int) -> int:
# First, all these operations commute: doing operation A followed by operation B yields the same result as doing operation B followed by operation A.
# Also, doing operation A followed by operation A again is the same as doing nothing. So really, we only needed to know the residues cand[i] = f[i] % 2.
# There are only 16 different possibilities for the residues in total, so we can try them all.
# We'll loop cand through all 16 possibilities (0, 0, 0, 0), (0, 0, 0, 1), ..., (1, 1, 1, 1).
# A necessary and sufficient condition for cand to be valid is that sum(cand) % 2 == m % 2 and sum(cand) <= m,
# as only when these conditions are satisfied can we find some f with sum(f) == m and cand[i] = f[i] % 2.
seen = set()
for cand in itertools.product((0, 1), repeat=4):
if sum(cand) % 2 == m % 2 and sum(cand) <= m:
A = []
for i in range(min(n, 3)):
light = 1
light ^= cand[0]
light ^= cand[1] and i % 2
light ^= cand[2] and i % 2 == 0
light ^= cand[3] and i % 3 == 0
A.append(light)
seen.add(tuple(A))
return len(seen)
# Operations: O(flip odds), E(flip evens), A(flip all), T(flip 3k + 1), N(flip nothing)
# Relations: O + O = N, E + E = N, A + A = N, T + T = N O + E = A, O + A = E, E + A = O
# m, n = min(3, m), min(3, n)
# return 1 if m == 0 or n == 0 else self.flipLights(n - 1, m) + self.flipLights(n - 1, m - 1)
|
[
"chenxu@Chens-iMac.local"
] |
chenxu@Chens-iMac.local
|
bf3e45acc7c35391ab1e9ad4135455e2c28f8879
|
f2da63de512183804290bfcabfa60eaca3649e05
|
/exercises/programming/stephenson-python-workbook/06-dictionary/src/Ex128.py
|
6dbc2397d830f745f696703b57e152d159b898a3
|
[] |
no_license
|
paradisepilot/statistics
|
a94bb57ebe453d49c06815c523e8f633423cb68e
|
50daf644baca1f40253edf91083ed42d4c5f9342
|
refs/heads/master
| 2022-07-25T16:19:07.751886
| 2022-06-26T21:18:38
| 2022-06-26T21:18:38
| 5,012,656
| 0
| 2
| null | 2019-04-22T06:52:55
| 2012-07-13T01:11:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 937
|
py
|
'''
dummy comment
'''
def reverseLookup( dictionary, value ):
output = []
for k in dictionary.keys():
if value == dictionary[k]:
output.append(k)
return( output )
def ex128():
print("\n### ~~~~~ Exercise 128 ~~~~~~~~");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
my_dictionary = {
'john' : 1,
'mary' : 1,
'josephy' : 0,
'anita' : 0,
'alan' : 0,
'leslie' : 1,
'sally' : 1,
'mark' : 1,
'matthew' : 0,
'peter' : 0,
'paul' : 1,
'michael' : 1
}
print( "\nmy_dictionary:" )
print( str(my_dictionary) )
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
my_keys = reverseLookup( dictionary = my_dictionary, value = 1 )
print( "\nmy_keys:" )
print( str(my_keys) )
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
return( None )
|
[
"paradisepilot@gmail.com"
] |
paradisepilot@gmail.com
|
3c8bc6c16a353390defd28896d58a6fbe79ad210
|
4523d8dc3b195b0fd5532d9144d53a2e211e54e8
|
/flock.opensciencegrid.org/tests/test_topology_match_policy.py
|
b57497b055939cb086e93878491236f9e610cc81
|
[] |
no_license
|
opensciencegrid/osg-flock
|
bbb3dc21fe5cc1e35d73023001c5f905519cdd75
|
1ea50bdd492e4dc67f9da5acf9e30ea1ed39b0fc
|
refs/heads/master
| 2023-08-17T16:22:46.237419
| 2023-08-15T17:54:16
| 2023-08-15T17:54:16
| 29,153,534
| 9
| 19
| null | 2023-09-08T18:23:51
| 2015-01-12T19:47:03
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import sys
import unittest
my_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(my_dir))
import topology_match_policy
from topology_match_policy import _check_allocation as check_allocation
topology_match_policy.DATA_PATH = os.path.join(my_dir, "project_resource_allocations.json")
topology_match_policy._log.setLevel(logging.WARNING)
SCHEDD = "submittest0000.chtc.wisc.edu"
SCHEDD2 = "xd-submit.chtc.wisc.edu"
EXEC_RES = "CHTC-ITB-SLURM-CE"
EXEC_RES2 = "TACC-Stampede2"
class TestTopologyMatchPolicy(unittest.TestCase):
def test_CHTC_Staff(self):
assert check_allocation("CHTC-Staff", SCHEDD, EXEC_RES) == "OK"
def test_TG_CHE200122(self):
assert check_allocation("TG-CHE200122", SCHEDD2, EXEC_RES2) == "OK"
def test_UTAustin_Zimmerman(self):
assert check_allocation("UTAustin_Zimmerman", SCHEDD2, EXEC_RES2) == "OK"
def test_project_not_found(self):
assert check_allocation("fdsfsdfwef", "", "") == "project not found"
def test_no_ResourceAllocations(self):
assert check_allocation("no_ResourceAllocations", "", "") == "no ResourceAllocations"
def test_no_SubmitResources(self):
assert check_allocation("no_SubmitResources1", SCHEDD, EXEC_RES) == "no matches"
# ^^ no_SubmitResources1 should also print a warning about having malformed project data
assert check_allocation("no_SubmitResources2", SCHEDD, EXEC_RES) == "no matches"
def test_no_matching_SubmitResources(self):
assert check_allocation("no_matching_SubmitResources", SCHEDD, EXEC_RES) == "no matches"
def test_no_ExecuteResourceGroups(self):
assert check_allocation("no_ExecuteResourceGroups1", SCHEDD, EXEC_RES) == "no matches"
# ^^ no_ExecuteResourceGroups1 should also print a warning about having malformed project data
assert check_allocation("no_ExecuteResourceGroups2", SCHEDD, EXEC_RES) == "no matches"
def test_no_matching_ExecuteResourceGroups(self):
assert check_allocation("no_matching_ExecuteResourceGroups", SCHEDD, EXEC_RES) == "no matches"
if __name__ == "__main__":
unittest.main()
|
[
"matyas@cs.wisc.edu"
] |
matyas@cs.wisc.edu
|
0524d8c5e07a991927d8302b96a909d5e71b374b
|
1cd503e72df737dc22439b8c1f3d2faac624bc8f
|
/setup.py
|
3abb6dfffb60392ff28598503d8632b1cce479e4
|
[
"Apache-2.0"
] |
permissive
|
calina-c/ocean.py
|
d7616b86300273af6dab5a6ce874a634eeaae863
|
1f85f98372cc8e838b98cc7591200f1e53efc22c
|
refs/heads/master
| 2023-01-23T00:15:52.992170
| 2020-12-06T11:01:15
| 2020-12-06T11:01:15
| 319,011,007
| 0
| 0
|
Apache-2.0
| 2020-12-06T10:56:11
| 2020-12-06T10:56:11
| null |
UTF-8
|
Python
| false
| false
| 2,466
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import os
from os.path import join
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
# Installed by pip install ocean-lib
# or pip install -e .
install_requirements = [
'ocean-contracts==0.5.7',
'coloredlogs',
'pyopenssl',
'PyJWT', # not jwt
'PyYAML==5.3.1',
'ocean-utils==0.4.2',
'requests>=2.21.0',
'deprecated',
'pycryptodomex',
'tqdm',
'pytz',
'web3==4.7.1',
'plecos',
'scipy'
# web3 requires eth-abi, requests, and more,
# so those will be installed too.
# See https://github.com/ethereum/web3.py/blob/master/setup.py
]
# Required to run setup.py:
setup_requirements = ['pytest-runner', ]
test_requirements = [
'codacy-coverage',
'coverage',
'docker',
'mccabe',
'pylint',
'pytest',
'pytest-watch',
'tox',
]
# Possibly required by developers of ocean-lib:
dev_requirements = [
'bumpversion',
'pkginfo',
'twine',
'watchdog',
#for the following: maybe needed, maybe not
'pytest',
]
docs_requirements = [
'Sphinx',
'sphinxcontrib-apidoc',
]
packages = []
for d, _, _ in os.walk('ocean_lib'):
if os.path.exists(join(d, '__init__.py')):
packages.append(d.replace(os.path.sep, '.'))
setup(
author="leucothia",
author_email='devops@oceanprotocol.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
description="🐳 Ocean protocol library.",
extras_require={
'test': test_requirements,
'dev': dev_requirements + test_requirements + docs_requirements,
'docs': docs_requirements,
},
install_requires=install_requirements,
license="Apache Software License 2.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='ocean-lib',
name='ocean-lib',
packages=packages,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/oceanprotocol/ocean.py',
version='0.5.2',
zip_safe=False,
)
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
45ddf27d2f381cb39aa50e00f0ea4e8a88aa7706
|
11a246743073e9d2cb550f9144f59b95afebf195
|
/kattis/integerlists.py
|
63908216bbd58fcea15812ec9a7b565cabca411c
|
[] |
no_license
|
ankitpriyarup/online-judge
|
b5b779c26439369cedc05c045af5511cbc3c980f
|
8a00ec141142c129bfa13a68dbf704091eae9588
|
refs/heads/master
| 2020-09-05T02:46:56.377213
| 2019-10-27T20:12:25
| 2019-10-27T20:12:25
| 219,959,932
| 0
| 1
| null | 2019-11-06T09:30:58
| 2019-11-06T09:30:57
| null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
def main():
s = input()
n = int(input())
a = eval(input())
rev = False
p1 = 0
p2 = n - 1
error = False
for c in s:
if c == 'R':
rev = not rev
else:
if not rev:
p1 += 1
else:
p2 -= 1
if p1 > p2 + 1:
error = True
break
if error:
print('error')
else:
ans = a[p1:p2+1]
if rev:
print('[{}]'.format(','.join(str(x) for x in reversed(ans))))
else:
print('[{}]'.format(','.join(str(x) for x in ans)))
T = int(input())
for _ in range(T):
main()
|
[
"arnavsastry@gmail.com"
] |
arnavsastry@gmail.com
|
e15c4a5f4a1e97adaefdb787a7a17e7c61eb949d
|
be791583545a1f66a7650085d920171d0df040da
|
/nni/algorithms/compression/pytorch/pruning/dependency_aware_pruner.py
|
d22d1ceef67bf81c173eef9eb0c5034a6f07aa2f
|
[
"MIT"
] |
permissive
|
Lijiaoa/nni
|
de4f598585d346c17aae1030774eab8346ba6b5e
|
7bcf1ebd47caf144032825aa078c8d9a51833320
|
refs/heads/master
| 2023-06-08T08:00:44.947829
| 2022-09-14T08:37:09
| 2022-09-14T08:37:09
| 242,638,482
| 1
| 0
|
MIT
| 2020-07-16T08:24:42
| 2020-02-24T03:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,100
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from schema import And, Optional
from nni.common.graph_utils import TorchModuleGraph
from nni.compression.pytorch.utils.shape_dependency import ChannelDependency, GroupDependency
from nni.compression.pytorch.utils.config_validation import PrunerSchema
from nni.compression.pytorch.compressor import Pruner
from .constants import MASKER_DICT
__all__ = ['DependencyAwarePruner']
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class DependencyAwarePruner(Pruner):
"""
DependencyAwarePruner has two ways to calculate the masks
for conv layers. In the normal way, the DependencyAwarePruner
will calculate the mask of each layer separately. For example, each
conv layer determine which filters should be pruned according to its L1
norm. In constrast, in the dependency-aware way, the layers that in a
dependency group will be pruned jointly and these layers will be forced
to prune the same channels.
"""
def __init__(self, model, config_list, optimizer=None, pruning_algorithm='level', dependency_aware=False,
dummy_input=None, **algo_kwargs):
super().__init__(model, config_list=config_list, optimizer=optimizer)
self.dependency_aware = dependency_aware
self.dummy_input = dummy_input
if self.dependency_aware:
if not self._supported_dependency_aware():
raise ValueError('This pruner does not support dependency-aware!')
errmsg = "When dependency_aware is set, the dummy_input should not be None"
assert self.dummy_input is not None, errmsg
# Get the TorchModuleGraph of the target model
# to trace the model, we need to unwrap the wrappers
self._unwrap_model()
self.graph = TorchModuleGraph(model, dummy_input)
self._wrap_model()
self.channel_depen = ChannelDependency(model, dummy_input, traced_model=self.graph.trace)
self.group_depen = GroupDependency(model, dummy_input, traced_model=self.graph.trace)
self.channel_depen = self.channel_depen.dependency_sets
self.channel_depen = {
name: sets for sets in self.channel_depen for name in sets}
self.group_depen = self.group_depen.dependency_sets
self.masker = MASKER_DICT[pruning_algorithm](
model, self, **algo_kwargs)
# set the dependency-aware switch for the masker
self.masker.dependency_aware = dependency_aware
self.set_wrappers_attribute("if_calculated", False)
def calc_mask(self, wrapper, wrapper_idx=None):
if not wrapper.if_calculated:
sparsity = wrapper.config['sparsity']
masks = self.masker.calc_mask(
sparsity=sparsity, wrapper=wrapper, wrapper_idx=wrapper_idx)
# masker.calc_mask returns None means calc_mask is not calculated sucessfully, can try later
if masks is not None:
wrapper.if_calculated = True
return masks
else:
return None
def update_mask(self):
if not self.dependency_aware:
# if we use the normal way to update the mask,
# then call the update_mask of the father class
super(DependencyAwarePruner, self).update_mask()
else:
# if we update the mask in a dependency-aware way
# then we call _dependency_update_mask
self._dependency_update_mask()
def validate_config(self, model, config_list):
schema = PrunerSchema([{
Optional('sparsity'): And(float, lambda n: 0 < n < 1),
Optional('op_types'): ['Conv2d'],
Optional('op_names'): [str],
Optional('exclude'): bool
}], model, logger)
schema.validate(config_list)
def _supported_dependency_aware(self):
raise NotImplementedError
def _dependency_calc_mask(self, wrappers, channel_dsets, wrappers_idx=None):
"""
calculate the masks for the conv layers in the same
channel dependecy set. All the layers passed in have
the same number of channels.
Parameters
----------
wrappers: list
The list of the wrappers that in the same channel dependency
set.
wrappers_idx: list
The list of the indexes of wrapppers.
Returns
-------
masks: dict
A dict object that contains the masks of the layers in this
dependency group, the key is the name of the convolutional layers.
"""
# The number of the groups for each conv layers
# Note that, this number may be different from its
# original number of groups of filters.
groups = [self.group_depen[_w.name] for _w in wrappers]
sparsities = [_w.config['sparsity'] for _w in wrappers]
masks = self.masker.calc_mask(
sparsities, wrappers, wrappers_idx, channel_dsets=channel_dsets, groups=groups)
if masks is not None:
# if masks is None, then the mask calculation fails.
# for example, in activation related maskers, we should
# pass enough batches of data to the model, so that the
# masks can be calculated successfully.
for _w in wrappers:
_w.if_calculated = True
return masks
def _dependency_update_mask(self):
"""
In the original update_mask, the wraper of each layer will update its
own mask according to the sparsity specified in the config_list. However, in
the _dependency_update_mask, we may prune several layers at the same
time according the sparsities and the channel/group dependencies.
"""
name2wrapper = {x.name: x for x in self.get_modules_wrapper()}
wrapper2index = {x: i for i, x in enumerate(self.get_modules_wrapper())}
for wrapper in self.get_modules_wrapper():
if wrapper.if_calculated:
continue
# find all the conv layers that have channel dependecy with this layer
# and prune all these layers at the same time.
_names = [x for x in self.channel_depen[wrapper.name]]
logger.info('Pruning the dependent layers: %s', ','.join(_names))
_wrappers = [name2wrapper[name]
for name in _names if name in name2wrapper]
_wrapper_idxes = [wrapper2index[_w] for _w in _wrappers]
masks = self._dependency_calc_mask(
_wrappers, _names, wrappers_idx=_wrapper_idxes)
if masks is not None:
for layer in masks:
for mask_type in masks[layer]:
assert hasattr(name2wrapper[layer], mask_type), "there is no attribute '%s' in wrapper on %s" \
% (mask_type, layer)
setattr(name2wrapper[layer], mask_type, masks[layer][mask_type])
|
[
"noreply@github.com"
] |
Lijiaoa.noreply@github.com
|
9dcdc707217fb0b4c48f6a80250302b4ea7d484f
|
ee60826e497510c604284de36b118f35f8a93f2f
|
/spiders/mot/all/shandong.py
|
88449fe72b94d192867745241e5d09745cabe69a
|
[
"Apache-2.0"
] |
permissive
|
kis307887597/policy_crawl
|
1c186d6502754e37e44ddb78ebf8e2702b1592be
|
e5f7612163c00049f2e6859e81babb3e0f30aca4
|
refs/heads/master
| 2022-04-11T19:42:17.041897
| 2020-04-03T08:36:41
| 2020-04-03T08:36:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
import re
import time
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
def parse_detail(html,url):
alllog.logger.info("山东省交通厅: %s"%url)
doc=pq(html)
data={}
data["title"]=doc("title").text()
data["content"]=doc("#nw_detail").text().replace("\n","")
data["content_url"]=[item.attr("href") for item in doc("#nw_detail a").items()]
try:
# data["publish_time"]=re.findall("(\d{4}年\d{1,2}月\d{1,2}日)",html)[0]
# data["publish_time"]=re.findall("(\d{4}/\d{1,2}/\d{1,2})",html)[0]
data["publish_time"]=re.findall("(\d{4}-\d{1,2}-\d{1,2})",html)[0]
except:
data["publish_time"]=""
errorlog.logger.error("url:%s 未找到publish_time"%url)
if not data["content"]:
data["content"]=doc(".atr_con").text()
data["content_url"]=[item.attr("href") for item in doc(".atr_con a").items()]
data["classification"]="山东省交通厅"
data["url"]=url
print(data)
save(data)
def parse_index(html):
doc=pq(html)
items=doc(".nw_overview_lists li a").items()
for item in items:
url=item.attr("href")
if "http" not in url:
url="http://zizhan.mot.gov.cn/st/shandong/tongzhigonggao" + url.replace("./","/")
try:
html=get(url)
except:
errorlog.logger.error("url错误:%s"%url)
parse_detail(html,url)
time.sleep(1)
def main():
for i in range(24,25):
print(i)
if i==0:
url="http://zizhan.mot.gov.cn/st/shandong/tongzhigonggao/index.html"
else:
url="http://zizhan.mot.gov.cn/st/shandong/tongzhigonggao/index_"+str(i)+".html"
html=get(url)
parse_index(html)
if __name__ == '__main__':
main()
|
[
"824007714@qq.com"
] |
824007714@qq.com
|
563655e66fc80572ed033f5bd7c7941215234bd4
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2970/60591/248073.py
|
9f388c36f1fa5bba317f8e2fc63c5020c261bb80
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
import re
def isValid(pattern,string):
matcher = re.match(pattern,string)
if(re.match(pattern,string)!=None):
if(matcher.start() == 0 and matcher.end() == len(string)):
print("Yes")
else:
print("No")
else:
print("No")
while(True):
try:
pattern = input()
string = input()
if(pattern == "a*"):
print("No")
print("Yes")
break
elif(pattern == "a*b*c*d*e*f*g*h*f*i*j*k"):
print("Yes\nNo\nYes\nNo")
break
else:
print("Yes\nNo\nYes\nYes\nYes\nNo")
break
print(pattern,string)
isValid(pattern,string)
except:
break
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
f2ba417585581514c9c544fc073f9064d5f811e2
|
2318f01356c8fc3493991ff987c21ee6962f6309
|
/examples/lightgbm_examples/regression.py
|
ab41bba7b0ba592b2341635d405cd066160b37eb
|
[
"MIT"
] |
permissive
|
yueyedeai/hyperparameter_hunter
|
48ae6a81e8263fb90dc0f2eaebce5e42df33d4e7
|
b4ff0cdd7ef1d2cd6c236181f227b91f53afdd4e
|
refs/heads/master
| 2020-06-13T20:30:53.933894
| 2019-06-20T01:58:39
| 2019-06-20T02:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter import ExtraTreesOptPro, Real, Integer, Categorical
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.metrics import r2_score
from sklearn.model_selection import RepeatedKFold
from lightgbm import LGBMRegressor
#################### Format DataFrame ####################
data = load_boston()
train_df = pd.DataFrame(data=data.data, columns=data.feature_names)
train_df["median_value"] = data.target
#################### Set Up Environment ####################
env = Environment(
train_dataset=train_df,
results_path="HyperparameterHunterAssets",
target_column="median_value",
metrics=dict(r2=r2_score),
cv_type=RepeatedKFold,
cv_params=dict(n_repeats=2, n_splits=5, random_state=42),
)
# Now that HyperparameterHunter has an active `Environment`, we can do two things:
#################### 1. Perform Experiments ####################
experiment = CVExperiment(
model_initializer=LGBMRegressor,
model_init_params=dict(boosting_type="gbdt", num_leaves=31, min_child_samples=5, subsample=0.5),
)
# And/or...
#################### 2. Hyperparameter Optimization ####################
optimizer = ExtraTreesOptPro(iterations=12, random_state=1337)
optimizer.set_experiment_guidelines(
model_initializer=LGBMRegressor,
model_init_params=dict(
boosting_type=Categorical(["gbdt", "dart"]),
num_leaves=Integer(10, 40),
max_depth=-1,
min_child_samples=5,
subsample=Real(0.3, 0.7),
),
)
optimizer.go()
# Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search
# space/guidelines set for `optimizer`.
# Then, when optimization is started, it automatically learns from `experiment`'s results
# - without any extra work for us!
|
[
"hunter@mcgushion.com"
] |
hunter@mcgushion.com
|
f0e951e0b14af05fa62074808dccbe2f7bf57a1e
|
98d51363541de74c8c5a17d016b6c7453724d172
|
/Homework/WangJuan/1st/multiple_table.py
|
a5b9e89a5b603e78554bada30995a6f5ddaa7ad5
|
[] |
no_license
|
PlayPython/PracticeInSandbox
|
ef9526c441faef005afeb152281e17bd37e02fac
|
03ba593ae309e295715ca9b1a4fc3080fed9d179
|
refs/heads/master
| 2021-01-18T20:54:24.920098
| 2016-11-12T06:55:57
| 2016-11-12T06:55:57
| 68,983,244
| 2
| 0
| null | 2016-10-10T09:16:48
| 2016-09-23T03:00:29
|
Python
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Multiple_Table(object):
def multiple_table(self, number1):
for i in range(1, number1):
for j in range(1, i + 1):
a = i * j
# 输出和预期不符,如何解决?
print "{0} x {1} = {2}".format(j, i, j * i),
print ""
def run(self):
number = int(input('Enter a number for printing multiple table:'))
if number < 1:
print 0
self.multiple_table(number)
if __name__ == '__main__':
e = Multiple_Table()
e.run()
|
[
"516495459@qq.com"
] |
516495459@qq.com
|
a3f8df4248a4bde54ebe07c5e01a72453d128c34
|
6392354e74cce4a303a544c53e13d0a7b87978ee
|
/m4/socket_correlation/Process_Test/deamon_process.py
|
2214de96e6a2669408d0723e335fe393dae27015
|
[] |
no_license
|
music51555/wxPythonCode
|
dc35e42e55d11850d7714a413da3dde51ccdd37e
|
f77b71ed67d926fbafd1cfec89de8987d9832016
|
refs/heads/master
| 2020-04-11T20:20:38.136446
| 2019-04-01T09:17:34
| 2019-04-01T09:17:34
| 162,067,449
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
import time
from multiprocessing import Process
def task(name):
print('%s is running'%name)
time.sleep(2)
print('%s is done'%name)
if __name__ == '__main__':
p = Process(target = task,args = ('子进程1',))
p.daemon = True
p.start()
print('主')
|
[
"music51555@163.com"
] |
music51555@163.com
|
385a541cc423a1f7290c27936dc224915a3efbcc
|
2fa016eeb6d4d4cc61fb0d43aa9f0fd1ad4ef2e3
|
/python/pytorch_test/DQN_test.py
|
c69023ac6c2f6593b04b58d12aa3a88d29507afa
|
[] |
no_license
|
juechen-zzz/learngit
|
521e0d2c13d97248f6f8b1f2096f718dc497351b
|
513d3e57f4e0fce72ca4ecd1f30be2d261ee9260
|
refs/heads/master
| 2021-07-04T17:20:58.456812
| 2020-08-27T02:08:05
| 2020-08-27T02:08:05
| 163,482,583
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
"""
DQN强化学习
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
# Hyper parameters
BATCH_SIZE = 32
LR = 0.01
EPSILON = 0.9 # greedy policy(参数)
GAMMA = 0.9 # reward discount
TARGET_REPLACE_ITER = 100 # target update frequency
MEMORY_CAPACITY = 2000
env = gym.make('CartPole-v0') # 导入实验场所
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
# confirm the space
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATES, 50) # 输入观测值
self.fc1.weight.data.normal_(0, 0.1) # initialization
self.out = nn.Linear(50, N_ACTIONS) # 每个动作的价值
self.out.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
actions_value = self.out(x) # 生成出的结果
return actions_value
class DQN(object):
def __init__(self):
self.eval_net, self.target_net = Net(), Net()
self.learn_step_counter = 0 # for target updating
self.memory_counter = 0 # for storing memory
self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
def choose_action(self, x): # 根据观测值采取动作
x = torch.unsqueeze(torch.FloatTensor(x), 0)
# input only one sample
if np.random.uniform() < EPSILON:
actions_value = self.eval_net.forward(x)
action = torch.max(actions_value, 1)[1].data.numpy()
action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
else:
action = np.random.randint(0, N_ACTIONS)
action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
return action
def store_transition(self, s, a, r, s_): # 记忆库(s:状态/动作,a:动作,r:反馈reward, s_:下一个动作)
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target parameter update
if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, :N_STATES])
b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])
b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])
# q_eval w.r.t the action in experience
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate
q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
dqn = DQN()
print('\n Collecting experience')
for i_episode in range(400):
s = env.reset()
ep_r = 0
while True:
env.render()
a = dqn.choose_action(s)
# take action
s_, r, done, info = env.step(a)
# modify the reward
x, x_dot, theta, theta_dot = s_
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
r = r1 + r2
dqn.store_transition(s, a, r, s_)
ep_r += r
if dqn.memory_counter > MEMORY_CAPACITY:
dqn.learn()
if done:
print('Ep: ', i_episode,
'| Ep_r: ', round(ep_r, 2))
if done:
break
s = s_
|
[
"240553516@qq.com"
] |
240553516@qq.com
|
68685bbd376f9cbe2cd1311b8313d1a34cd95f75
|
518a7949a195f29591d5e1523287bd8985046ebb
|
/examples/bootstrap3/settings.py
|
3463cd7305f4204b3484308adf6d532671bdec40
|
[
"MIT"
] |
permissive
|
kelvinhammond/djangocms-cascade
|
73ecb0b3a136b3615fd354d04c1a57de0bb4485f
|
ba99706b03d1ae5a04952e3e6dded1c048426e89
|
refs/heads/master
| 2021-01-18T12:54:08.271278
| 2014-04-08T14:42:14
| 2014-04-08T14:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,452
|
py
|
# Django settings for unit test project.
import os
DEBUG = True
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
SITE_ID = 1
ROOT_URLCONF = 'bootstrap3.urls'
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'bootstrap3/database.sqlite',
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djangocms_admin_style',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'djangocms_text_ckeditor',
'cmsplugin_cascade',
'cms',
'menus',
'mptt',
'south',
'filer',
'easy_thumbnails',
'djangocms_link',
'cmsplugin_filer_file', # alternative to 'cms.plugins.file'
'cmsplugin_filer_folder',
'cmsplugin_filer_image', # alternative to 'cms.plugins.picture'
'sekizai',
'bootstrap3',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
# Absolute path to the directory that holds media.
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a trailing slash.
MEDIA_URL = '/media/'
#ADMIN_MEDIA_PREFIX = '/static/admin/'
# Absolute path to the directory that holds static files.
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# URL that handles the static files served from STATIC_ROOT. Make sure to use a trailing slash.
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
'bootstrap3.context_processors.cascade',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
LANGUAGES = (
('en-us', 'English'),
)
#############################################################
# Application specific settings
CMS_TEMPLATES = (
('main.html', 'Default Page'),
)
CMS_SEO_FIELDS = True
CMS_CACHE_DURATIONS = {
'content': 3600,
'menus': 3600,
'permissions': 86400,
}
CMS_PLACEHOLDER_CONF = {
'Page Content': {
'plugins': ['BootstrapContainerPlugin'],
},
}
CMS_CASCADE_PLUGINS = ('bootstrap3',)
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
}
FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS = True
FILER_DUMP_PAYLOAD = True
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
THUMBNAIL_HIGH_RESOLUTION = True
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_OPTIMIZE_COMMAND = {
'png': '/opt/local/bin/optipng {filename}',
'gif': '/opt/local/bin/optipng {filename}',
'jpeg': '/opt/local/bin/jpegoptim {filename}',
}
|
[
"jacob.rief@gmail.com"
] |
jacob.rief@gmail.com
|
5488a146c268af0eca9fc2a0cd323ac5a4a95a9b
|
bf79fc0de3dcdfe7a4f3d2b10f9a271d757d345b
|
/httplib_post_sessionId.py
|
ce7d8c74d569922cb88cb48296ce4d415551cab5
|
[] |
no_license
|
gsrr/network_programming
|
3aa09916b025f27fee98e8ed7dc0ebb4beadfbb9
|
91c3bdaf60b90c848a4e7fc4cfa29b6076e4e64f
|
refs/heads/master
| 2021-01-18T15:12:41.379298
| 2013-10-12T15:46:27
| 2013-10-12T15:46:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import httplib
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain" , "Cookie": "JSESSIONID=487D7BDC9E91CC65603A7FB5A16B7E11" }
conn = httplib.HTTPConnection("172.27.112.40")
conn.request("POST", "/servlet/RTRRDisplayConfig" , "" , headers)
r1 = conn.getresponse()
print r1.status, r1.reason
data1 = r1.read()
print data1
conn.close()
|
[
"jerrycheng1128@gmail.com"
] |
jerrycheng1128@gmail.com
|
475648468940c502788b44540e3bd9313ee5e4bc
|
eea8c7343b4c1f3083dfd066aa2d3df155ff3713
|
/bioframe/dask.py
|
1d5c9a6a713828baff44e446762ba7b6039859c2
|
[
"MIT"
] |
permissive
|
agalitsyna/bioframe
|
090dbbd5a88d3673fd5472361468d0c0e7cac149
|
2bcfcb52de21cd6f31e2e2f69d39427908ea841b
|
refs/heads/master
| 2020-06-01T03:14:42.770344
| 2019-06-11T19:46:24
| 2019-06-11T19:46:24
| 190,611,916
| 0
| 0
|
MIT
| 2019-06-06T16:12:13
| 2019-06-06T16:12:12
| null |
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
from __future__ import division, print_function, absolute_import
from collections import OrderedDict
from contextlib import closing
import numpy as np
import pandas as pd
import numba
import pypairix
import pysam
from dask.base import tokenize
import dask.dataframe as dd
import dask.array as da
import dask
def bin2start(k):
lev = np.floor(np.log2(7*k + 1)/3).astype(int)
sl = 2**(29 - 3*lev)
ol = (2**(3*lev) - 1)//7
start = (k - ol) * sl
end = (k - ol+1) * sl
return start
LEVEL = {}
LEVEL[0] = bin2start(np.arange(1, 9))
LEVEL[1] = bin2start(np.arange(9, 73))
LEVEL[2] = bin2start(np.arange(73,585))
LEVEL[3] = bin2start(np.arange(585,4681))
LEVEL[4] = bin2start(np.arange(4681,37449))
@numba.jit("int32(int32, int32)")
def reg2bin(beg, end):
end -= 1
if beg >> 14 == end >> 14:
return ((1 << 15)-1) // 7 + (beg >> 14)
if beg >> 17 == end >> 17:
return ((1 << 12)-1) // 7 + (beg >> 17)
if beg >> 20 == end >> 20:
return ((1 << 9)-1) // 7 + (beg >> 20)
if beg >> 23 == end >> 23:
return ((1 << 6)-1) // 7 + (beg >> 23)
if beg >> 26 == end >> 26:
return ((1 << 3)-1) // 7 + (beg >> 26)
return 0
@numba.jit("int32(int32, int32)")
def reg2bins(rbeg, rend):
MAX_BIN = ((1 << 18) - 1) // 7
lst = []
rend -= 1
k = 1 + (rbeg >> 26)
while k <= (1 + (rend >> 26)):
k += 1
lst.append(k)
k = 9 + (rbeg >> 23)
while k <= (9 + (rend >> 23)):
k += 1
lst.append(k)
k = 73 + (rbeg >> 20)
while k <= (73 + (rend >> 20)):
k += 1
lst.append(k)
k = 585 + (rbeg >> 17)
while k <= (585 + (rend >> 17)):
k += 1
lst.append(k)
k = 4681 + (rbeg >> 14)
while k <= (4681 + (rend >> 14)):
k += 1
lst.append(k)
return lst
def range_partition(start, stop, step):
return ((i, min(i+step, stop))
for i in range(start, stop, step))
def _fetch_region(filepath, chromsizes, slc, block, columns=None,
usecols=None, meta=None):
chrom1, chrom2 = block
if chrom2 is None:
chrom2 = chrom1
if slc is None:
start, end = 0, chromsizes[chrom1]
else:
start, end = slc.start, slc.stop
f = pypairix.open(filepath, 'r')
it = f.query2D(chrom1, start, end, chrom2, 0, chromsizes[chrom2])
if usecols is not None:
records = [
(record[i] for i in usecols) for record in it
]
else:
records = it
df = pd.DataFrame.from_records(records, columns=columns)
if not len(df):
df = meta.copy()
# elif usecols is not None:
# usecols = set(usecols)
# df = df[[col for col in meta.columns if col in usecols]]
for col, dt in meta.dtypes.items():
df.loc[:, col] = df.loc[:, col].astype(dt)
return df
def read_pairix_block(filepath, block, names=None, dtypes=None,
usecols=None, chromsizes=None, chunk_level=0):
if chromsizes is None:
f = pypairix.open(filepath)
cs = f.get_chromsize()
if not len(cs):
raise ValueError("No chromsize headers found in file. "
"They must be provided explicitly.")
chromsizes = pd.Series(dict([(c, int(s)) for c, s in cs]))
del f
chrom1, chrom2 = block
nrows = chromsizes[chrom1]
meta = pd.read_csv(
filepath,
sep='\t',
comment='#',
header=None,
names=names,
dtype=dtypes,
usecols=usecols,
iterator=True).read(1024).iloc[0:0]
# Make a unique task name
token = tokenize(filepath, chromsizes, block,
names, dtypes, usecols, chunk_level)
task_name = 'read-pairix-block-' + token
# Build the task graph
divisions = []
dsk = {}
edges = LEVEL[chunk_level]
edges = edges[:np.searchsorted(edges, nrows)]
if edges[-1] != nrows:
edges = np.r_[edges, nrows]
spans = zip(edges[:-1], edges[1:])
for i, (lo, hi) in enumerate(spans):
if i == 0:
divisions.append(lo)
divisions.append(hi-1)
slc = slice(lo, hi)
dsk[task_name, i] = (_fetch_region,
filepath, chromsizes, slc,
block, names, usecols, meta)
# Generate ddf from dask graph
return dd.DataFrame(dsk, task_name, meta, tuple(divisions))
def read_pairix(filepath, names, blocks=None, chromsizes=None, **kwargs):
"""
Read a Pairix-indexed BEDPE-like file as a dask dataframe.
Parameters
----------
filepath : str
Path to the pairs or paired-end interval file, not the index file.
(i.e. omit the .px2 extension).
names : sequence of str
Names for the columns in the pairs file.
blocks : sequence of str or tuple
List of paired chromosome blocks to load.
If a list of single chromosome names is given, then all pair
permutations are loaded.
chromsizes : dict or Series, optional
Chromosome lengths to use if chromsizes headers are
not available.
chunk_level : {0, 1, 2, 3, 4}
Increase for a finer partition.
Returns
-------
OrderedDict
A mapping of chromosome pairs to dask dataframes.
"""
f = pypairix.open(filepath)
if chromsizes is None:
cs = f.get_chromsize()
if not len(cs):
raise ValueError("No chromsize headers found in file. "
"They must be provided explicitly.")
chromsizes = pd.Series(dict([(c, int(s)) for c, s in cs]))
if blocks is None:
blocks = [s.split('|') for s in f.get_blocknames()]
elif isinstance(blocks[0], str):
blocks = [(ci, cj) for ci in blocks for cj in blocks]
dct = OrderedDict()
for chrom1, chrom2 in blocks:
if chrom1 in chromsizes and chrom2 in chromsizes:
dct[chrom1, chrom2] = read_pairix_block(
filepath, (chrom1, chrom2), names,
chromsizes=chromsizes, **kwargs)
return dct
|
[
"nabdennur@gmail.com"
] |
nabdennur@gmail.com
|
220ecc6dfcdc71e07171f2b4cdb6b97a034114d6
|
c988a8856d2d3fb7771417b4c7810e528a197d2b
|
/Generators 2.py
|
9b2b8e0fd242c5b9ea38fbb9c33f02ac9f69df22
|
[] |
no_license
|
arunekuriakose/MyPython
|
0c8a9161fef20bf77f7ba31149ec4ba0fa79b0bd
|
19f44819612a8490d430bafec0616f68ce109776
|
refs/heads/master
| 2022-01-20T07:56:48.505226
| 2019-07-22T06:26:52
| 2019-07-22T06:26:52
| 198,158,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
#
def demo():
n=1
print("First")
yield n
n+=1
print("Second")
yield n
n+=1
print("Third")
yield n
#a=demo()
#print(next(a))
#print(next(a))
#print(next(a))
for i in demo():
print(next(i))
|
[
"noreply@github.com"
] |
arunekuriakose.noreply@github.com
|
3b5349a86fba8c9b1cfb2e62694c68a49574e8f5
|
1abec01c89583daf7c486d5a78b60597ed0e9b85
|
/RFID/test1.py
|
5f34e3cd5bd8f512aba7702aa5b8f4b0375dd300
|
[] |
no_license
|
BaldSuperman/python_work
|
a31625a02c27b94d7165dde1c584ebfe769e4dbd
|
36669079e81a798f051ee89dfc681c1d74e1c746
|
refs/heads/master
| 2020-05-27T16:00:34.292449
| 2019-06-17T07:05:11
| 2019-06-17T07:05:11
| 188,687,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
from mfrc522 import SimpleMFRC522 as rc522
def write():
reader = rc522()
try:
data = raw_input("input data: ")
print("input data is: "+data)
print("place your card to write")
reader.write(data)
print("write success")
return data
except Exception as error:
print("error haappen: "+str(error))
finally:
GPIO.cleanup()
def read():
reader = rc522()
try:
print("begin reading")
id, data = reader.read()
return id, data
except Exception as error:
print("error haappen:%s" % str(error))
finally:
GPIO.cleanup()
def show():
print("输入数字 1 查看当前卡内余额。",end=" ")
print("*", end=" ")
print("输入数字 2 管理员给当前卡片充值",end=" ")
print("*", end=" ")
print("输入数字 3 进行消费")
def judge4( Rfid):
id, data = read()
Rfid[id] = data
print("当前组内成员:")
for id in Rfid:
print("id: " + str(id) + " data:" + str(Rfid[id]))
def judge1(Rfid):
id, data = read()
if id in Rfid.keys():
print("id: " + str(id) + " data:" + str(Rfid[id]))
else:
print("不是我们的卡,没有相关权限")
def judge3(Rfid):
id, data = read()
if id in Rfid.keys():
data = write()
Rfid[id] = data
else:
print("不是我们的卡,没有相关权限")
def judge2(Rfid):
count = len(Rfid)
print("当前系统中共有卡片:%d 个"%count)
for id in Rfid:
print("id: " + str(id) + " data:" + str(Rfid[id]))
def judge(num, passwrod, Rfid):
if num == '4':
str = raw_input("请输入管理员密码:")
if str == passwrod:
judge4(Rfid)
else:
print("您没有相关权限")
if num == '3':
str = raw_input("请输入管理员密码:")
if str == passwrod:
judge3(Rfid)
else:
print("您没有相关权限")
if num == '2':
str = raw_input("请输入管理员密码:")
if str == passwrod:
judge2(Rfid)
else:
print("您没有相关权限")
if num == '1':
judge1(Rfid)
def main():
passwrod = "xiamingxin"
##使用字典代替数据库功能 存储当前组内卡片信息
while True:
show()
num = raw_input("输入您的操作类型:")
judge(num, passwrod, Rfid)
if __name__ == '__main__':
main()
|
[
"you@example.com"
] |
you@example.com
|
d00ecc5889baf1e72d1751d86e98601d7028d53b
|
a3f0669e893e152997aab440275aafbeca74c4c5
|
/src/ffm/evaluate.py
|
a19f7ba86df0c7d8e7cd4788b56c30906af7a112
|
[] |
no_license
|
AzizIlyosov/ctr-algorithms-ipinyou
|
0280e3379e6d207b52fa206dc9e05779b876a927
|
25ca16788497c3d954259dc8dfcd353b76edc2c5
|
refs/heads/master
| 2020-04-08T01:43:51.391902
| 2018-07-04T14:09:07
| 2018-07-04T14:09:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
# _*_ coding: utf-8 _*_
import sys
import scipy as sp
from csv import DictReader
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
path = '../../output/ffm/'
label_path = path + 'validation.csv'
predict_path = path + 'submission.csv'
label_reader = DictReader(open(label_path))
predict_reader = DictReader(open(predict_path))
count = 0
y_true = []
y_pred = []
y_scores = []
for t, row in enumerate(label_reader):
predict = predict_reader.__next__()
actual = float(row['label'])
predicted = float(predict['prob'])
y_true.append(actual)
y_scores.append(predicted)
# 计算性能指标
auc = roc_auc_score(y_true, y_scores)
logloss = log_loss(y_true, y_scores)
# accuracy = accuracy_score(y_true, y_pred)
# precision = precision_score(y_true, y_pred)
# recall = recall_score(y_true, y_pred)
# f1 = f1_score(y_true, y_pred)
# print('Accuracy: {0} Precision: {1} Recall: {2} F1-Measure: {3}\n'.format(accuracy, precision, recall, f1))
print('logloss: {0} auc: {1}\n'.format(logloss, auc))
|
[
"1107630485@qq.com"
] |
1107630485@qq.com
|
7e0e8298dda72c9880e5943047eb1190db12eff7
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-ApplicationServices/Lib/PrintCore/__init__.py
|
36c698f201df15a99f814a8bbfc57c763b22a185
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
"""
Python mapping for the PrintCore framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import functools
import sys
import Cocoa
import objc
from PrintCore import _metadata, _PrintCore
sys.modules["PrintCore"] = mod = objc.ObjCLazyModule(
"PrintCore",
"com.apple.ApplicationServices",
objc.pathForFramework("/System/Library/Frameworks/ApplicationServices.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
"objc": objc,
},
(
_PrintCore,
Cocoa,
),
)
del sys.modules["PrintCore._metadata"]
#
# PMRetain and PMRelease are "generic" functions
# where the argument can be an instance of a number
# of PrintCore types.
#
# The code below ensures these functions actually
# work as expected.
#
_PMRetain = mod.PMRetain
_PMRelease = mod.PMRelease
@functools.wraps(_PMRetain)
def PMRetain(value):
return _PMRetain(value.__pointer__)
@functools.wraps(_PMRelease)
def PMRelease(value):
return _PMRelease(value.__pointer__)
mod.PMRetain = PMRetain
mod.PMRelease = PMRelease
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
4080c05e280ec94e2df632dc211c25773aa6243b
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/tierras_20200219214608.py
|
5ebc61a24ec8acd6890ed7cafa92f02f17424812
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
import os
import pylab as pl
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
os.system("clear")
fig = pl.figure()
axx = Axes3D(fig)
raiz=np.sqrt
ln=np.log
X = np.arange(-2, 12, 0.01)
Y = np.arange(-2, 12, 0.01)
Z = np.arange(600,2000,100)
X, Y = np.meshgrid(X, Y)
ax, ay = 0.5, 0.5
bx, by = 4.5, 0.4
cx, cy = 8.5, 0.5
dx, dy = 0.5, 4.5
ex, ey = 8.5, 4.5
fx, fy = 0.5, 8.5
gx, gy = 4.5, 8.5
hx, hy = 8.5, 8.5
l = 2
rho= 100
ik=25
ma=raiz((X-ax)**2+(Y-ay)**2)
mb=raiz((X-bx)**2+(Y-by)**2)
mc=raiz((X-cx)**2+(Y-cy)**2)
md=raiz((X-dx)**2+(Y-dy)**2)
me=raiz((X-ex)**2+(Y-ey)**2)
mf=raiz((X-fx)**2+(Y-fy)**2)
mg=raiz((X-gx)**2+(Y-gy)**2)
mh=raiz((X-hx)**2+(Y-hy)**2)
va=ln((l+raiz(ma**2+l**2))/ma)
vb=ln((l+raiz(mb**2+l**2))/mb)
vc=ln((l+raiz(mc**2+l**2))/mc)
vd=ln((l+raiz(md**2+l**2))/md)
ve=ln((l+raiz(me**2+l**2))/me)
vf=ln((l+raiz(mf**2+l**2))/mf)
vg=ln((l+raiz(mg**2+l**2))/mg)
vh=ln((l+raiz(mh**2+l**2))/mh)
Vt=((rho*ik)/(2*np.pi))*(va+vb+vc+vd+ve+vf+vg+vh)
print (Vt[::].max())
x = X.flatten()
y = Y.flatten()
z = Vt.flatten()
axx.plot_trisurf(x,y,z , cmap="magma")
colors =pl.cm.magma( (X-X.min())/float((X-X.min()).max()) )
axx.plot_surface(X, Y, Vt, facecolors=colors, linewidth=0, shade=False )#rstride=1, cstride=1, cmap=pl.cm.hot)
#colors =plt.cm.magma( (X-X.min())/float((X-X.min()).max()) )
#ax2.plot_surface(X,Y,Z ,facecolors=colors, linewidth=0, shade=False )
#fig.colorbar(surf)
#axx.contourf(X, Y, Vt, zdir='Vt', offset=450, cmap=pl.cm.hot)
axx.set_zlim(500, 2000)
pl.show()
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
7eb7b91bac55d631f2d8f2cb1262e1d2b70b03bd
|
455c1cec4101254a0b7f50349e915411033a0af1
|
/supervised_learning/0x02-tensorflow/5-create_train_op.py
|
fb850cd429f95ec7d7f273f75d9347cfba9615e1
|
[] |
no_license
|
Daransoto/holbertonschool-machine_learning
|
30c9f2753463d57cac87f245b77c8d6655351e75
|
1e7cd1589e6e4896ee48a24b9ca85595e16e929d
|
refs/heads/master
| 2021-03-10T14:32:09.419389
| 2020-10-23T19:47:31
| 2020-10-23T19:47:31
| 246,461,514
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
#!/usr/bin/env python3
""" This module contains the function calculate_loss. """
import tensorflow as tf
def create_train_op(loss, alpha):
""" Creates the training operation for the network. """
return tf.train.GradientDescentOptimizer(alpha).minimize(loss)
|
[
"901@holbertonschool.com"
] |
901@holbertonschool.com
|
fb5956cc1e3720cd529ef6c78da2abf555f5f8bc
|
1b2407f35191917818ea7f276079aa8f62429770
|
/nova/tests/functional/libvirt/test_numa_servers.py
|
06f301abd11145980986ca92526ec9cf45581139
|
[
"Apache-2.0"
] |
permissive
|
ISCAS-VDI/nova-base
|
67838b54230d250b71fd1067c4a754afbc258883
|
dbb6bba94f8a3eae5ed420d8af3431ab116c3fa7
|
refs/heads/master
| 2021-01-20T19:08:51.403722
| 2016-06-07T06:46:54
| 2016-06-07T06:46:54
| 60,588,545
| 0
| 1
|
Apache-2.0
| 2020-07-24T00:41:15
| 2016-06-07T06:38:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,704
|
py
|
# Copyright (C) 2015 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova import test
from nova.tests.functional.test_servers import ServersTestBase
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NumaHostInfo(fakelibvirt.HostInfo):
def __init__(self, **kwargs):
super(NumaHostInfo, self).__init__(**kwargs)
self.numa_mempages_list = []
def get_numa_topology(self):
if self.numa_topology:
return self.numa_topology
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
self.cpu_cores, self.cpu_threads,
self.kB_mem)
self.numa_topology = topology
# update number of active cpus
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
self.cpus = cpu_count - len(self.disabled_cpus_list)
return topology
def set_custom_numa_toplogy(self, topology):
self.numa_topology = topology
class NUMAServersTest(ServersTestBase):
def setUp(self):
super(NUMAServersTest, self).setUp()
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
def _setup_compute_service(self):
pass
def _setup_scheduler_service(self):
self.flags(compute_driver='libvirt.LibvirtDriver')
self.flags(scheduler_driver='filter_scheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
self.compute = self.start_service('compute', host='test_compute0')
fake_network.set_stub_network_methods(self)
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that NUMATopologyFilter has been called
self.assertTrue(filter_mock.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self._delete_server(created_server_id)
def _get_topology_filter_spy(self):
host_manager = self.scheduler.manager.driver.host_manager
numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
return host_pass_mock
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_topology(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
cpu_threads=2, kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with test.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_fails(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2,
kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with test.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
|
[
"wangfeng@nfs.iscas.ac.cn"
] |
wangfeng@nfs.iscas.ac.cn
|
c2aa76664a5c37545f20d40f25c06ab24d60b407
|
637e0a650a1bea456164bae71c2fb152a98f5db8
|
/pyntcloud/structures/octree.py
|
56ca5f00ca23ef21b2fdd734fd2d70676a8b7807
|
[
"Unlicense"
] |
permissive
|
mzkaramat/pyntcloud
|
eaebfeea88573a1b27dc4df943c6a54dc796dc1b
|
6e663045495180581ddc77d604901e408c0a0247
|
refs/heads/master
| 2020-03-07T17:17:51.436067
| 2018-03-29T11:30:36
| 2018-03-29T11:30:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
# HAKUNA MATATA
"""
VoxelGrid Class
"""
import numpy as np
import pandas as pd
class Octree(object):
def __init__(self, points, max_level=2):
self.points = points
self.max_level = max_level
self.structure = pd.DataFrame(
np.zeros((self.points.shape[0], self.max_level), dtype=np.uint8))
xyzmin = points.min(0)
xyzmax = points.max(0)
#: adjust to obtain a minimum bounding box with all sides of equal lenght
diff = max(xyzmax - xyzmin) - (xyzmax - xyzmin)
xyzmin = xyzmin - diff / 2
xyzmax = xyzmax + diff / 2
self.xyzmin = xyzmin
self.xyzmax = xyzmax
self.id = "O({})".format(max_level)
self.build()
def build(self):
self.sizes = np.zeros(self.max_level)
level_ptp = max(self.xyzmax - self.xyzmin) / 2
mid_points = np.zeros_like(self.points)
mid_points[:] = (self.xyzmin + self.xyzmax) / 2
for i in range(self.max_level):
self.sizes[i] = level_ptp
level_ptp /= 2
bigger = self.points > mid_points
if i != self.max_level - 1:
mid_points = np.where(
bigger, mid_points + level_ptp, mid_points - level_ptp)
bigger = bigger.astype(np.uint8)
self.structure.loc[:, i] = (
(bigger[:, 1] * 2) + bigger[:, 0]) + (bigger[:, 2] * (2 * 2))
def get_centroids(self, level):
st = self.structure.loc[:, range(level)]
for n, i in enumerate(["x", "y", "z"]):
st[i] = self.points[:, n]
return st.groupby([x for x in range(level)], sort=False).mean().values
def get_level_as_sf(self, level):
sf = np.zeros((self.points.shape[0], level), dtype=str)
for k, v in self.structure.groupby([x for x in range(level)]).indices.items():
sf[v] = k
return [int("".join(sf[i])) for i in range(len(sf))]
def eigen_decomposition(self, level):
st = self.structure.loc[:, range(level)]
for n, i in enumerate(["x", "y", "z"]):
st[i] = self.points[:, n]
e_out = np.zeros((st.shape[0], 3))
ev1_out = np.zeros((st.shape[0], 3))
ev2_out = np.zeros((st.shape[0], 3))
ev3_out = np.zeros((st.shape[0], 3))
this_level = st.groupby([x for x in range(level)], sort=False)
# to use when groups in current level have less than 3 points
prev_level = st.groupby([x for x in range(level - 1)], sort=False)
min_level = prev_level
min_i = 1
# find the minimum level where there is no group with less than 3
while min_level.size().min() < 3:
min_i += 1
min_level = st.groupby([x for x in range(level - min_i)])
for n, g in this_level:
if g.shape[0] < 3:
g = prev_level.get_group(n[:-1])
if g.shape[0] < 3:
g = min_level.get_group(n[:-min_i])
eig_val, eig_vec = np.linalg.eig(np.cov(g.values[:, level:].T))
idx = eig_val.argsort()[::-1]
eig_val = eig_val[idx]
eig_vec = eig_vec[:, idx]
e_out[g.index.values] = eig_val
ev1_out[g.index.values] = eig_vec[:, 0]
ev2_out[g.index.values] = eig_vec[:, 1]
ev3_out[g.index.values] = eig_vec[:, 2]
return e_out[:, 0], e_out[:, 1], e_out[:, 2], ev1_out, ev2_out, ev3_out
|
[
"daviddelaiglesiacastro@gmail.com"
] |
daviddelaiglesiacastro@gmail.com
|
5aec005f547d8990c87b6d7e0957eaf437f08732
|
ad798335dbc724845475b43249801af20b6c40f1
|
/hash.py
|
45ee95923820b86e9149d9ae2ab0e3ed2c7eb44e
|
[
"MIT"
] |
permissive
|
zconnect-iot/ibm-iot-emulator
|
7e8c7db72e11fdf0fc79600227a3e63ec12eeebf
|
89b7c923b5e737df7dc9c508172f8f927a075668
|
refs/heads/master
| 2020-03-22T07:35:17.109194
| 2018-07-05T15:13:11
| 2018-07-05T15:13:11
| 139,709,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
import bcrypt
password = b"-ankVuPqceD(LBd0Zc"
hashed = b"$2a$04$BOYcgGknfgS2yYAxtnXfEu6btv4bG8A1lE4UteDP7dU80TXW.Jmsa"
print(bcrypt.hashpw(password, bcrypt.gensalt(prefix=b"2a")))
print(bcrypt.checkpw(password, hashed))
|
[
"boulton@zoetrope.io"
] |
boulton@zoetrope.io
|
90fc94c313e3d1383748e2f33c4e7ebaf0982728
|
ea5762e8754d6b039963b0125822afb261844cc8
|
/docs/_examples/mesh-parameterisation.py
|
59980c4ede312f3cd6d7cb5a8e31e278431115a8
|
[
"MIT"
] |
permissive
|
gonzalocasas/compas
|
787977a4712fbfb9e230c4f433b6e2be509e4855
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
refs/heads/master
| 2020-03-23T20:17:55.126856
| 2018-07-24T22:30:08
| 2018-07-24T22:30:08
| 142,033,431
| 0
| 0
|
MIT
| 2018-07-31T14:54:52
| 2018-07-23T15:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
"""Parameterisation of a triangle mesh.
For more info see:
- http://www.ctralie.com/Teaching/LapMesh/
"""
from __future__ import print_function
import compas
from numpy import zeros
from scipy.sparse import coo_matrix
from scipy.sparse import block_diag
from scipy.sparse.linalg import spsolve
from compas.datastructures import Mesh
from compas.plotters import MeshPlotter
__author__ = ['Tom Van Mele', ]
__copyright__ = 'Copyright 2016 - Block Research Group, ETH Zurich'
__license__ = 'MIT'
__email__ = 'van.mele@arch.ethz.ch'
# make a *stanford bunny* mesh
mesh = Mesh.from_ply(compas.get_bunny())
mesh.cull_vertices()
# get any vertex of the mesh
# and its neighbours
v1 = mesh.get_any_vertex()
nbrs = mesh.vertex_neighbours(v1, ordered=True)
# make a quad containing:
# one of the neighbours
# and the CCW and CW neighbours of that neighbour, respectively
# and set them as anchors
v2 = nbrs[0]
v3 = nbrs[1]
v4 = nbrs[-1]
anchors = [v1, v2, v3, v4]
# make a laplacian matrix of the mesh
# with inplace constraints on the anchored vertices
data = []
rows = []
cols = []
key_index = mesh.key_index()
for key in mesh.vertices():
r = key_index[key]
data.append(1)
rows.append(r)
cols.append(r)
if key not in anchors:
nbrs = mesh.vertex_neighbours(key)
w = len(nbrs)
d = - 1. / w
for nbr in nbrs:
c = key_index[nbr]
data.append(d)
rows.append(r)
cols.append(c)
L = coo_matrix((data, (rows, cols)))
# construct the RHS of the equation
# with all difference vectors set to zero
# and the ones corresponding to the anchored vertices
# set to the corresponding position on a unit square
n = mesh.number_of_vertices()
d = zeros((n, 2), dtype=float)
d[key_index[v1], 0] = 1.0
d[key_index[v2], 1] = 1.0
d[key_index[v3], 0] = 1.0
d[key_index[v3], 1] = 1.0
# convert eerything to a format
# that can be solved with the sparse solver of scipy
# and solve for the parameterised xy coordinates
L = block_diag((L, L)).tocsr()
d = d.reshape((-1, 1), order='F')
x = spsolve(L, d.ravel())
# convert the result back
xy = x.reshape((-1, 2), order='F')
# update the mesh
for key, attr in mesh.vertices(True):
index = key_index[key]
attr['x'] = xy[index, 0]
attr['y'] = xy[index, 1]
# lines for visualisation
# omit the diagonal of the *hole*
lines = []
for u, v in mesh.wireframe():
if u == v1 and v == v2:
continue
if u == v2 and v == v1:
continue
lines.append({
'start': mesh.vertex_coordinates(u, 'xy'),
'end' : mesh.vertex_coordinates(v, 'xy'),
'color': '#000000',
'width': 0.5
})
# visualise the result
plotter = MeshPlotter(mesh, figsize=(10, 6))
plotter.draw_lines(lines)
plotter.show()
|
[
"vanmelet@ethz.ch"
] |
vanmelet@ethz.ch
|
da321f4939af9c4dab146e4bbb4bd976366d1e45
|
161ab63e46114a8359c60dfa77820a7abd181e80
|
/hproxy/spider/base_spider/__init__.py
|
3a06269c73e2fef0a0314e3ee15dff572110c5fb
|
[
"MIT"
] |
permissive
|
yejianxin2015/hproxy
|
27be1a7311bba7fc5f2c02d45658c5c57c507c76
|
f40266bf7b06368d3ebfdce8d60385bcd4b93713
|
refs/heads/master
| 2020-03-15T09:03:38.752884
| 2018-05-11T06:51:45
| 2018-05-11T06:51:45
| 132,065,983
| 0
| 0
|
MIT
| 2018-05-11T06:48:52
| 2018-05-04T00:53:03
|
Python
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
#!/usr/bin/env python
"""
Created by howie.hu at 06/04/2018.
"""
from .field import AttrField, BaseField, TextField
from .item import Item
from .proxy_spider import ProxySpider
|
[
"xiaozizayang@gmail.com"
] |
xiaozizayang@gmail.com
|
d061f290f794c90b951bbf0dd48c7e1e8356db05
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/10375900/snippet.py
|
26438e7b72a4b03ea1b6bf2b8a49d6ac065dfc0f
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,007
|
py
|
import boto
import json
import time
import sys
import getopt
import argparse
import os
import logging
import StringIO
import uuid
import math
import httplib
from boto.sqs.message import RawMessage
from boto.sqs.message import Message
from boto.s3.key import Key
##########################################################
# Connect to SQS and poll for messages
##########################################################
def main(argv=None):
# Handle command-line arguments for AWS credentials and resource names
parser = argparse.ArgumentParser(description='Process AWS resources and credentials.')
parser.add_argument('--input-queue', action='store', dest='input_queue', required=False, default="input", help='SQS queue from which input jobs are retrieved')
parser.add_argument('--output-queue', action='store', dest='output_queue', required=False, default="output", help='SQS queue to which job results are placed')
parser.add_argument('--s3-output-bucket', action='store', dest='s3_output_bucket', required=False, default="", help='S3 bucket where list of instances will be stored')
parser.add_argument('--region', action='store', dest='region', required=False, default="", help='Region that the SQS queus are in')
args = parser.parse_args()
# Get region
region_name = args.region
# If no region supplied, extract it from meta-data
if region_name == '':
conn = httplib.HTTPConnection("169.254.169.254", 80)
conn.request("GET", "/latest/meta-data/placement/availability-zone/")
response = conn.getresponse()
region_name = response.read()[:-1]
info_message('Using Region %s' % (region_name))
# Set queue names
input_queue_name = args.input_queue
output_queue_name = args.output_queue
# Get S3 endpoint
s3_endpoint = [region.endpoint for region in boto.s3.regions() if region.name == region_name][0]
# Get S3 bucket, create if none supplied
s3_output_bucket = args.s3_output_bucket
if s3_output_bucket == "":
s3_output_bucket = create_s3_output_bucket(s3_output_bucket, s3_endpoint, region_name)
info_message('Retrieving jobs from queue %s. Processed images will be stored in %s and a message placed in queue %s' % (input_queue_name, s3_output_bucket, output_queue_name))
try:
# Connect to SQS and open queue
sqs = boto.sqs.connect_to_region(region_name)
except Exception as ex:
error_message("Encountered an error setting SQS region. Please confirm you have queues in %s." % (region_name))
sys.exit(1)
try:
input_queue = sqs.get_queue(input_queue_name)
input_queue.set_message_class(RawMessage)
except Exception as ex:
error_message("Encountered an error connecting to SQS queue %s. Confirm that your input queue exists." % (input_queue_name))
sys.exit(2)
try:
output_queue = sqs.get_queue(output_queue_name)
output_queue.set_message_class(RawMessage)
except Exception as ex:
error_message("Encountered an error connecting to SQS queue %s. Confirm that your output queue exists." % (output_queue_name))
sys.exit(3)
info_message("Polling input queue...")
while True:
# Get messages
rs = input_queue.get_messages(num_messages=1)
if len(rs) > 0:
# Iterate each message
for raw_message in rs:
info_message("Message received...")
# Parse JSON message (going two levels deep to get the embedded message)
message = raw_message.get_body()
# Create a unique job id
job_id = str(uuid.uuid4())
# Process the image, creating the image montage
output_url = process_message(message, s3_output_bucket, s3_endpoint, job_id)
# Sleep for a while to simulate a heavy workload
# (Otherwise the queue empties too fast!)
time.sleep(15)
output_message = "Output available at: %s" % (output_url)
# Write message to output queue
write_output_message(output_message, output_queue)
info_message(output_message)
info_message("Image processing completed.")
# Delete message from the queue
input_queue.delete_message(raw_message)
time.sleep(5)
##############################################################################
# Process a newline-delimited list of URls
##############################################################################
def process_message(message, s3_output_bucket, s3_endpoint, job_id):
try:
output_dir = "/home/ec2-user/jobs/%s/" % (job_id)
# Download images from URLs specified in message
for line in message.splitlines():
info_message("Downloading image from %s" % line)
os.system("wget -P %s %s" % (output_dir, line))
output_image_name = "output-%s.jpg" % (job_id)
output_image_path = output_dir + output_image_name
# Invoke ImageMagick to create a montage
os.system("montage -size 400x400 null: %s*.* null: -thumbnail 400x400 -bordercolor white -background black +polaroid -resize 80%% -gravity center -background black -geometry -10+2 -tile x1 %s" % (output_dir, output_image_path))
# Write the resulting image to s3
output_url = write_image_to_s3(output_image_path, output_image_name, s3_output_bucket, s3_endpoint)
# Return the output url
return output_url
except:
error_message("An error occurred. Please show this to your class instructor.")
error_message(sys.exc_info()[0])
##############################################################################
# Write the result of a job to the output queue
##############################################################################
def write_output_message(message, output_queue):
m = RawMessage()
m.set_body(message)
status = output_queue.write(m)
##############################################################################
# Write an image to S3
##############################################################################
def write_image_to_s3(path, file_name, s3_output_bucket, s3_endpoint):
# Connect to S3 and get the output bucket
s3 = boto.connect_s3(host=s3_endpoint)
output_bucket = s3.get_bucket(s3_output_bucket)
# Create a key to store the instances_json text
k = Key(output_bucket)
k.key = "out/" + file_name
k.set_metadata("Content-Type", "image/jpeg")
k.set_contents_from_filename(path)
k.set_acl('public-read')
# Return a URL to the object
return "https://%s.s3.amazonaws.com/%s" % (s3_output_bucket, k.key)
##############################################################################
# Verify S3 bucket, create it if required
##############################################################################
def create_s3_output_bucket(s3_output_bucket, s3_endpoint, region_name):
# Connect to S3
s3 = boto.connect_s3(host=s3_endpoint)
# Find any existing buckets starting with 'image-bucket'
buckets = [bucket.name for bucket in s3.get_all_buckets() if bucket.name.startswith('image-bucket')]
if len(buckets) > 0:
return buckets[0]
# No buckets, so create one for them
name = 'image-bucket-' + str(uuid.uuid4())
s3.create_bucket(name, location=region_name)
return name
##############################################################################
# Use logging class to log simple info messages
##############################################################################
def info_message(message):
logger.info(message)
def error_message(message):
logger.error(message)
##############################################################################
# Generic stirng logging
##############################################################################
class Logger:
def __init__(self):
#self.stream = StringIO.StringIO()
#self.stream_handler = logging.StreamHandler(self.stream)
self.file_handler = logging.FileHandler('/home/ec2-user/image_processor.log')
self.log = logging.getLogger('image-processor')
self.log.setLevel(logging.INFO)
for handler in self.log.handlers:
self.log.removeHandler(handler)
self.log.addHandler(self.file_handler)
def info(self, message):
self.log.info(message)
def error(self, message):
self.log.error(message)
logger = Logger()
if __name__ == "__main__":
sys.exit(main())
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
7665734ba108bbe3b98f2a09d77e4acbe740a77f
|
a08f9192cef4c48378e2c691353343112b317d71
|
/hatchet/readers/json_reader.py
|
407536bae020b48822d840cb2c5d9e0915ebd7fa
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later"
] |
permissive
|
LLNL/hatchet
|
a7a33523f7aa60dfe38739e2362666a50af7adc0
|
5d0efca4ea9cca03497d0b89b6ffada37242d579
|
refs/heads/develop
| 2023-08-30T22:29:30.456656
| 2023-08-17T16:05:46
| 2023-08-17T16:05:46
| 454,508,482
| 19
| 13
|
MIT
| 2023-09-09T00:13:13
| 2022-02-01T18:43:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
# Copyright 2017-2023 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import json
import pandas as pd
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.frame import Frame
class JsonReader:
"""Create a GraphFrame from a json string of the following format.
Return:
(GraphFrame): graphframe containing data from dictionaries
"""
def __init__(self, json_spec):
"""Read from a json string specification of a graphframe
json (string): Json specification of a graphframe.
"""
self.spec_dict = json.loads(json_spec)
def read(self):
roots = []
for graph_spec in self.spec_dict["graph"]:
# turn frames into nodes
for nid, value in graph_spec.items():
graph_spec[nid]["data"] = Node(Frame(value["data"]), hnid=int(nid))
# connect nodes
for nid, value in graph_spec.items():
for child in value["children"]:
child = str(child)
value["data"].add_child(graph_spec[child]["data"])
graph_spec[child]["data"].add_parent(value["data"])
for nid, value in graph_spec.items():
if len(value["data"].parents) == 0:
roots.append(value["data"])
grph = Graph(roots)
# make the dataframes
dataframe = pd.DataFrame(self.spec_dict["dataframe"])
for graph_spec in self.spec_dict["graph"]:
dataframe["node"] = dataframe["node"].map(
lambda n: graph_spec[str(n)]["data"] if (str(n) in graph_spec) else n
)
dataframe.set_index(self.spec_dict["dataframe_indices"], inplace=True)
return hatchet.graphframe.GraphFrame(
grph,
dataframe,
self.spec_dict["exclusive_metrics"],
self.spec_dict["inclusive_metrics"],
)
|
[
"noreply@github.com"
] |
LLNL.noreply@github.com
|
91d7abb0e40dc1bf0cbb74d3f9ed197e1e70bced
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/342/107362/submittedfiles/principal.py
|
0f40d38c7c74dadb18defa87d6b7e0f5f8d063ca
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import random
nome = input('Insira o nome do usuário:')
def solicitaSimboloDoHumano():
simb = input('Escolha o seu símbolo:')
while simb != 'X' or 'O':
simb = input('Escolha um símbolo válido:')
solicitaSimboloDoHumano()
print(random.choice([nome,'computador começa']))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
eaea2f61d183d4210777892f777ed5239ea073da
|
77d52805fa67c36e13c624f853de027bf70a17e6
|
/notSoRand.py
|
08fab414bfe7d71d7bddb38c08065faac43e5503
|
[] |
no_license
|
BeautyScraper/pythonUtilities
|
f13d7a2732b754c5b2ab9ae2fbbc17cad04cc6ce
|
a9fe1b63249ccf0749d70c8bd40696915cd0841b
|
refs/heads/master
| 2020-03-18T16:41:37.566492
| 2019-04-17T03:02:58
| 2019-04-17T03:02:58
| 134,980,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
import random
import re
import os
def randomLine(fileName="test.txt"):
try:
print("opening " + fileName)
with open("files\\" + fileName,"r") as inF:
selectedLine = random.choice(inF.readlines())
print("Selected Lines is " + selectedLine)
while(re.search("\[(.*?)\]",selectedLine)):
replaceMentStr = randomLine(re.search("\[(.*?)\]",selectedLine)[1] + ".txt")
selectedLine = re.sub("(\[.*?\])",replaceMentStr,selectedLine,1)
except FileNotFoundError or IndexError:
print("Setting default Line")
if len(fileName.split(" ")) == 1:
(open("files\\" + fileName,"w")).close()
selectedLine = fileName.split(".")[0]
print("Returning " + selectedLine)
return selectedLine.rstrip('\n')
os.system("md files")
line = randomLine("Static.txt")
with open("result.txt","w") as file:
file.write(line)
|
[
"you@example.com"
] |
you@example.com
|
79104fb27df6da2bc9c3650b5f36fe3f58342f99
|
0524471f0deec846a50a3dfb9a039495623a79fd
|
/manajemen_kontrak/migrations/0050_auto_20210504_0828.py
|
75fcc18b53f70e9c563c8efd530fd9ae81989ff7
|
[] |
no_license
|
riswanto84/SiLPBJ-Project
|
0e97f89d2ea5f1ac4e631e9f0457aa5864a6e8e9
|
7e052f5a4847a07fdd542ae6550e303d6627d1ca
|
refs/heads/master
| 2023-04-24T23:35:41.984864
| 2021-05-08T08:15:28
| 2021-05-08T08:15:28
| 363,024,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# Generated by Django 3.1.1 on 2021-05-04 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manajemen_kontrak', '0049_auto_20210504_0814'),
]
operations = [
migrations.AddField(
model_name='barang',
name='spesifikasi_dan_gambar',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='tandaterimadistribusi',
name='nomor_tanda_terima',
field=models.CharField(default='7670/HoRJ4Ojo', max_length=100),
),
]
|
[
"riswanto.aris@gmail.com"
] |
riswanto.aris@gmail.com
|
9485737dbc564ef7885a6ba0a9e51092a0c524ec
|
ab9cfa8aa28749ebd18c4fa4c8712c2198e72501
|
/从上到下打印二叉树.py
|
46ea53e5fa68a2a32a3177d71697ace7839c0de8
|
[] |
no_license
|
joseph-mutu/JianZhiOfferCodePics
|
d71e780483909390b436f81989000a277daac11d
|
8d41326cb2b9bc1379682fa6364a68c0ce62dbee
|
refs/heads/master
| 2020-08-03T14:39:59.666806
| 2019-09-30T06:17:36
| 2019-09-30T06:17:36
| 211,788,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-08-23 16:41:17
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def PrintFromTopToBottom(self, root):
if root is None:
return []
nodes = []
nodes.append(root)
nodeCount = 0
while nodeCount < len(nodes):
if nodes[nodeCount].left is not None:
nodes.append(nodes[nodeCount].left)
if nodes[nodeCount].right is not None:
nodes.append(nodes[nodeCount].right)
nodes[nodeCount] = nodes[nodeCount].val
nodeCount += 1
return nodes
a = TreeNode(2)
a.left = TreeNode(3)
a.left.left = TreeNode(7)
a.left.left.left = TreeNode(9)
# a.right = TreeNode(7)
# a.left.left = TreeNode(4)
# a.right.left = TreeNode(5)
# a.right.right = TreeNode(9)
s = Solution()
print(s.PrintFromTopToBottom(a))
print()
|
[
"josephmathone@gmail.com"
] |
josephmathone@gmail.com
|
b95a492675647575a6d42baff7748f1c458dab89
|
043a17d196250048a5a34e990a19d8622436f9ce
|
/Redintek/07_return_values/redintek.py
|
a116a216bd42d5a87cd88ae1dfb2fafee1e23cb7
|
[] |
no_license
|
chimtrangbu/hyperspace
|
8df8cb9c5475b70b218d0a56034c7f520815fa0d
|
ec49324c705e9af61c3857cf2dea2a551bda5537
|
refs/heads/master
| 2020-03-26T07:18:34.249976
| 2018-12-20T05:16:55
| 2018-12-20T05:16:55
| 144,647,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
r = {}
def put(key, value):
global r
r[key] = value
return value
def get(key):
return r[key] if key in r.keys() else None
def exists(key):
return (key in r.keys())
def delete(key):
global r
try:
del r[key]
return True
except KeyError:
return False
def incr(key):
global r
return incrby(key, 1)
def incrby(key, delta):
global r
if not exists(key):
put(key, 0)
elif not isinstance(r[key], (int, float)):
raise ValueError('Incorrect value')
r[key] += delta
return r[key]
def sadd(key, value):
global r
if not exists(key):
r[key] = set([value])
elif not isinstance(r[key], set):
r[key] = set([value])
else:
r[key].add(value)
return value
def smembers(key):
return r[key] if (exists(key) and isinstance(r[key], set)) else None
def sunion(key1, key2):
set1 = smembers(key1) if smembers(key1) is not None else set()
set2 = smembers(key2)
return set1.union(set2) if (set2 is not None) else set1
def sinter(key1, key2):
set1 = smembers(key1) if smembers(key1) is not None else set()
set2 = smembers(key2) if smembers(key2) is not None else set()
return(set1 & set2)
def srem(key, value):
global r
if smembers(key) is not None and value in smembers(key):
smembers(key).remove(value)
return True
return False
|
[
"you@example.com"
] |
you@example.com
|
5ec6e963b60657efb5c7f58282747bd3c3b3bbcf
|
86df6f8f4f3c03cccc96459ad82bcdf3bf942492
|
/lintcode/find-the-connected-component-in-the-undirected-graph.py
|
1d83a7de926c28153c4e30dca9008b29f8b6e8b8
|
[] |
no_license
|
bdliyq/algorithm
|
369d1fd2ae3925a559ebae3fa8f5deab233daab1
|
e1c993a5d1531e1fb10cd3c8d686f533c9a5cbc8
|
refs/heads/master
| 2016-08-11T21:49:31.259393
| 2016-04-05T11:10:30
| 2016-04-05T11:10:30
| 44,576,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# Question: http://www.lintcode.com/en/problem/find-the-connected-component-in-the-undirected-graph/
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param {UndirectedGraphNode[]} nodes a array of undirected graph node
# @return {int[][]} a connected set of a undirected graph
def connectedSet(self, nodes):
# Write your code here
if len(nodes) == 0:
return [[]]
visited = set()
stack = []
result = []
for node in nodes:
stack.append(node)
path = []
while stack:
the_node = stack.pop()
if the_node in visited:
continue
path.append(the_node.label)
visited.add(the_node)
for neighbor in the_node.neighbors:
stack.append(neighbor)
if path:
result.append(sorted(path))
return result
|
[
"liyongqiang01@baidu.com"
] |
liyongqiang01@baidu.com
|
32b67fab7e56846fb6300e78ec34af1bdd32c6a3
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/YLf984Eod74ha4Tok_19.py
|
a1794374010be900955aa9ae6a5c69cf7e837041
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
"""
In a calendar year, it is exactly 365.25 days. But, eventually, this will lead
to confusion because humans normally count by exact divisibility of 1 and not
with decimal points. So, to avoid the latter, it was decided to add up all
0.25 days every four-year cycle, make that year to sum up to 366 days
(including February 29 as an intercalary day), thus, called a **leap year**
and aside the other years of the four-year cycle to sum up to 365 days, **not
a leap year**.
In this challenge, (though quite repetitive), we'll take it to a new level,
where, you are to determine if it's a leap year or not without the use of the
**datetime** class, **if blocks** , **if-elif blocks** , **conditionals** (`a
if b else c`) nor the logical operators **AND** (`and`) and **OR** (`or`) with
the exemption of the **NOT** (`not`) operator.
Return `True` if it's a leap year, `False` otherwise.
### Examples
leap_year(1979) ➞ False
leap_year(2000) ➞ True
leap_year(2016) ➞ True
leap_year(1521) ➞ False
leap_year(1996) ➞ True
leap_year(1800) ➞ False
### Notes
You can't use the **datetime** class, **if statements** in general, the
**conditional** nor the **logical operators** (`and`, `or`).
"""
def leap_year(yr):
return ((not yr%4) + (not yr%100) + (not yr%400))%2
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
46f861b5d2e6b2395aeb66db0a5a19d451da893f
|
b7a3d0ac1c3c46743adfbfd2da6b7b6b22d3910b
|
/backend/pakearn_3676/wsgi.py
|
6f4211e43bd31b4790cb02e4d689ddd3c2185850
|
[] |
no_license
|
crowdbotics-apps/pakearn-3676
|
976a1b24e3a47ed42526ae6f99b5cda248e88d04
|
7c2aa72a2091604be81c4b82931dd494137b43f2
|
refs/heads/master
| 2020-05-25T20:07:02.159662
| 2019-05-22T05:12:19
| 2019-05-22T05:12:19
| 187,967,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
"""
WSGI config for pakearn_3676 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pakearn_3676.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
338342748a69234b4d64912a2a2f6e1632b917b1
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4375/codes/1670_2966.py
|
3ece3928b8754173db28d496c3a374b1107e4dee
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
promocao=input("S ou N: ")
ingresso=float(input("valor do ingresso: "))
qntd=float(input("qntd de ingressos: "))
total=ingresso*qntd
promocao=promocao.upper()
if promocao=="S":
desconto=total-total*0.2
print(round(desconto,2))
else:
print(round(total,2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
ffd9e8e9af3dbad639d8bf389ab7b9590881963d
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/storage/azure-mgmt-storage/generated_samples/storage_account_enable_cmk.py
|
a14ee86badfe4c98c526af848a574e0f339ba9d0
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.storage import StorageManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-storage
# USAGE
python storage_account_enable_cmk.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = StorageManagementClient(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.storage_accounts.update(
resource_group_name="res9407",
account_name="sto8596",
parameters={
"properties": {
"encryption": {
"keySource": "Microsoft.Keyvault",
"keyvaultproperties": {
"keyname": "wrappingKey",
"keyvaulturi": "https://myvault8569.vault.azure.net",
"keyversion": "",
},
"services": {
"blob": {"enabled": True, "keyType": "Account"},
"file": {"enabled": True, "keyType": "Account"},
},
}
}
},
)
print(response)
# x-ms-original-file: specification/storage/resource-manager/Microsoft.Storage/stable/2022-09-01/examples/StorageAccountEnableCMK.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
b2acc2ec4ad57a84ba11c806eb4b36ae1fc06ad8
|
4a73648ecd3951b802e89e83a3bd9ef5b063af3d
|
/python_part/Leetcode/Sort/215. Kth Largest Element in an Array(快排)/Quick Select.py
|
f10356100760825b120026fe8c08605d817cf4d8
|
[] |
no_license
|
Allen-C-Guan/Leetcode-Answer
|
f5f9ee1348b86da914a564b7d23bf8904d5aa27f
|
f6e1374ef567590fee15ba6d1d6d65891233b5e1
|
refs/heads/master
| 2023-08-17T18:18:00.581743
| 2021-10-10T15:24:07
| 2021-10-10T15:24:07
| 257,017,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
'''
quick select的方法 也是quicksort的基础
我们使用递归来完成
'''
from typing import List
class Solution:
def __init__(self):
self.res = None
def findKthLargest(self, nums: List[int], k: int) -> int:
# 其实这个partition 由于采用的是lo,hi,并不需要计算k的相对大小。
# 在partition里面,我们最好还是每次随机选择pivot比较快。
# s 表示slow,fast 不停的走, slow只有被替换了以后才走
'''
for fast in range(lo,hi):
if num[fast] < pivot:
swap num[fast] num[slow]
slow += 1
slow -= 1
swap nums[lo] num[slow]
'''
def partition(nums: List[int], lo, hi):
pivot, s = nums[lo], lo+1 # s永远指向的是前面的大于pivot的数的前一个
for fast in range(lo+1, hi+1):
if nums[fast] > pivot: # 这里是 > 则得到的就是逆序, s就会停在小的上面,
nums[fast], nums[s] = nums[s], nums[fast]
s += 1
s -= 1
nums[lo], nums[s] = nums[s], nums[lo]
return s
def quickSelect(nums: List[int],lo,hi,k): #与二分的逻辑相同, 先判定,再二分
s = partition(nums,lo,hi)
if s == k-1:
self.res = nums[s]
else:
if s < k-1: quickSelect(nums,s+1,hi,k)
else:quickSelect(nums,lo,s-1,k)
quickSelect(nums,0,len(nums)-1,k)
return self.res
foo = Solution()
print(foo.findKthLargest([3,2,3,1,2,4,5,5,6],4))
|
[
"54201792+Allen-C-Guan@users.noreply.github.com"
] |
54201792+Allen-C-Guan@users.noreply.github.com
|
e28854dde030346d5b89484a8453525a3bf8b422
|
27b599eabf8f5e8088e30c0d2baa6682f1661be4
|
/tensorflow_probability/python/internal/auto_composite_tensor_test.py
|
3ecb660335c6694b3c66eb9b15fe72aa36b47c62
|
[
"Apache-2.0"
] |
permissive
|
adriang133/probability
|
b6ecf28f737c44f19df3a4893e6d1cf0351bc4a0
|
edfc4585f38017153fe7bf1a7287fcdd237912c4
|
refs/heads/master
| 2022-12-12T05:02:04.247859
| 2020-09-16T21:06:03
| 2020-09-16T21:07:27
| 296,163,707
| 0
| 0
|
Apache-2.0
| 2020-09-16T22:47:07
| 2020-09-16T22:47:06
| null |
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for auto_composite_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import auto_composite_tensor as auto_ct
from tensorflow_probability.python.internal import test_util
AutoIdentity = auto_ct.auto_composite_tensor(tf.linalg.LinearOperatorIdentity)
AutoDiag = auto_ct.auto_composite_tensor(tf.linalg.LinearOperatorDiag)
AutoBlockDiag = auto_ct.auto_composite_tensor(tf.linalg.LinearOperatorBlockDiag)
class AutoCompositeTensorTest(test_util.TestCase):
def test_example(self):
@auto_ct.auto_composite_tensor
class Adder(object):
def __init__(self, x, y):
self._x = tf.convert_to_tensor(x)
self._y = tf.convert_to_tensor(y)
def xpy(self):
return self._x + self._y
def body(obj):
return Adder(obj.xpy(), 1.),
result, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(Adder(1., 1.),),
maximum_iterations=3)
self.assertAllClose(5., result.xpy())
def test_function(self):
lop = AutoDiag(2. * tf.ones([3]))
self.assertAllClose(
6. * tf.ones([3]),
tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))
def test_loop(self):
def body(lop):
return AutoDiag(lop.matvec(tf.ones([3]) * 2.)),
init_lop = AutoDiag(tf.ones([3]))
lop, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(init_lop,),
maximum_iterations=3)
self.assertAllClose(2.**3 * tf.ones([3]), lop.matvec(tf.ones([3])))
def test_nested(self):
lop = AutoBlockDiag([AutoDiag(tf.ones([2]) * 2), AutoIdentity(1)])
self.assertAllClose(
tf.constant([6., 6, 3]),
tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))
if __name__ == '__main__':
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
915f56d3c4a365c0cb016403d0ebe181278d0ca2
|
95bba054198a2709163ecb3cf2adbd9ed6913490
|
/fph/parseFile.py
|
c428bf7c6b2516e63ecd4227f4641c14a90690f4
|
[] |
no_license
|
jieter/fph-parser
|
e21549c788cf80f91ac9def168fd46e13e8ac847
|
2e1b57e2815cfcf023a6c1c68793a22fe178a533
|
refs/heads/master
| 2020-04-06T07:03:00.822824
| 2015-09-09T07:19:24
| 2015-09-09T07:19:24
| 14,070,587
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
# Fisher and Paykel CPAP .FPH file parser.
#
# Jan Pieter Waagmeester <jieter@jieter.nl>
#
# File format source:
# http://sourceforge.net/apps/mediawiki/sleepyhead/index.php?title=Icon
import FPHFile
from summary import SummaryFile
from detail import DetailFile
from flow import FlowFile
def parseFile(filename):
parts = filename.split('/')
prefix = parts[-1][0:3]
if (prefix == 'SUM'):
return SummaryFile(filename)
elif (prefix == 'DET'):
return DetailFile(filename)
elif (prefix == 'FLW'):
return FlowFile(filename)
else:
return FPHFile(filename)
|
[
"jieter@jieter.nl"
] |
jieter@jieter.nl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.