blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbe8c840dba4c6a96ac98a3b1fa2ad50500dd26c
|
763841bf8447c5490ebc3bf74523fd5470944a80
|
/forms.py
|
eb7db2f2490088671fcd6c907f89330646e2a20b
|
[] |
no_license
|
sumy7/little-bolg
|
bad7e4ad2755cd1e77edb44a3b1a52780714059f
|
13d825ad4fb1ad6fc97f41259ff094664a4664bf
|
refs/heads/app_hxn_1
| 2021-01-10T23:21:22.062356
| 2016-10-08T14:40:36
| 2016-10-08T14:40:36
| 70,610,888
| 1
| 0
| null | 2016-10-11T16:01:40
| 2016-10-11T16:01:40
| null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextField,IntegerField
from wtforms.validators import DataRequired
class ArticleForm(FlaskForm):
title = StringField('title',validators = [DataRequired()])
content = TextField('content',validators = [DataRequired()])
class UserForm(FlaskForm):
username = StringField('username',validators = [DataRequired()])
userpass = StringField('password',validators = [DataRequired()])
class SignUpForm(FlaskForm):
username = StringField('username',validators = [DataRequired()])
userpass = StringField('password',validators = [DataRequired()])
email = StringField('email',validators=[DataRequired()])
class ReplyForm(FlaskForm):
content = TextField('content',validators = [DataRequired()])
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
11d131e67824ef499030da4c17db0ece1ce20b21
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_21956.py
|
a1a29e05be6d20512499012515acee2bcd1e343a
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
# Why can't my setup.py-generated script find my also-installed module?
>>> from mkdocs.mkdocs import main_entry_point
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
d980b7457b1a48b0e8331322105ae6eda61bfad7
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/twilio/twilio/rest/events/v1/subscription/__init__.py
|
2518ccf20431928bb5df6b6532f2ee53c8234e95
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0255d8652c98e97ffafdedd4dd83a7f814b0062c7bb3a6ea44f2491fa9a04f72
size 15857
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
77757d4c47da531054934f91c32859169ad5780d
|
29fa274ae2bf847df8d6f0b03bc28a78f52119aa
|
/dndsearch/web.py
|
9c3dff4f39ba88c1cb4422dbc1f2a7e49bd38744
|
[] |
no_license
|
pranav/dndsearch
|
a6b42d354ea0a3d59dc9dc47f6ec511a820399d7
|
4f63c6b86fb2d53021b409f6ff6527ca8d286799
|
refs/heads/master
| 2020-12-24T14:45:41.684076
| 2014-12-21T07:31:19
| 2014-12-21T07:31:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
import os
import json
from flask import Flask, render_template
from sql import Page
app = Flask(__name__)
@app.route('/query/<query>', methods=['GET'])
def simple_query(query):
return json.dumps([{'book': p[0], 'page': p[1]} for p in Page.search(query)])
@app.route('/')
def render_home():
return render_template('home.html')
if __name__ == '__main__':
app.debug = False
try:
PORT = int(os.getenv('PORT'))
except IndexError:
PORT = 80
app.run(host='0.0.0.0', port=PORT)
|
[
"pgandhi@hubspot.com"
] |
pgandhi@hubspot.com
|
e5ba59f2b9321764767fe0a59be34fd36dd50f78
|
d6d0a751d2093b86f733648f9ba0af28e757662b
|
/serializer/mydev/myapp/models.py
|
7e710ed20c5f36054f8faf6c86b19401adc1399c
|
[] |
no_license
|
nivyashri05/Django_REST_API
|
2d2460896d8f61eacfd873dd5d657a46a2f13eeb
|
884037e18d11d13921b15f652833f2da10581beb
|
refs/heads/master
| 2022-12-30T21:41:43.707944
| 2020-10-26T11:33:19
| 2020-10-26T11:33:19
| 307,348,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from django.db import models
class Collegetb(models.Model):
deptid=models.IntegerField()
deptname=models.CharField(max_length=256)
depthod=models.CharField(max_length=256)
location=models.CharField(max_length=256)
|
[
"nivyainventateq@gmail.com"
] |
nivyainventateq@gmail.com
|
6770180f243b6a6f2e41b662fcd4326032622ecb
|
fde950cc136ac38f9bd7e3e3c4a2e469df6c320a
|
/tests/core/test_commands.py
|
4e655f9f3dbbac70f726f768cc6cda2fe954e119
|
[
"MIT"
] |
permissive
|
Ilgrim/cwmud
|
47180185f7462a1bb9fa3e338c167ffa87c87f63
|
bee8b126a5e70edd0593dae9753a6be8d52357cf
|
refs/heads/master
| 2022-03-12T02:39:35.550565
| 2019-10-19T21:51:56
| 2019-10-19T21:51:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
# -*- coding: utf-8 -*-
"""Test for command management and processing."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
import pytest
from cwmud.core.commands import AlreadyExists, Command, CommandManager
class TestCommands:
"""A collection of tests for command management."""
commands = None
command_class = None
command = None
class _FakeSession:
pass
session = _FakeSession()
def test_command_manager_create(self):
"""Test that we can create a command manager.
This is currently redundant, importing the commands package already
creates one, but we can keep it for symmetry and in case that
isn't always so.
"""
type(self).commands = CommandManager()
assert self.commands
def test_command_manager_get_name(self):
"""Test that we can figure out the name for an argument."""
assert self.commands._get_name(Command) == "Command"
assert self.commands._get_name("TestCommand") == "TestCommand"
def test_command_manager_register(self):
"""Test that we can register new commands through a command manager."""
@self.commands.register
class TestCommand(Command):
"""A test command."""
def __init__(self, session, args):
super().__init__(session, args)
self.called = False
def _action(self):
self.called = True
type(self).command_class = TestCommand
assert "TestCommand" in self.commands
def test_command_manager_register_by_argument(self):
"""Test that we can register a new command by argument."""
self.commands.register(command=Command)
assert "Command" in self.commands
def test_command_manager_register_not_command(self):
"""Test that trying to register a non-command fails."""
with pytest.raises(TypeError):
self.commands.register(command=object())
def test_command_manager_register_already_exists(self):
"""Test that trying to register an existing command name fails."""
with pytest.raises(AlreadyExists):
self.commands.register(command=self.command_class)
def test_command_manager_contains(self):
"""Test that we can see if a command manager contains a command."""
assert "TestCommand" in self.commands
assert Command in self.commands
assert "some_nonexistent_command" not in self.commands
assert CommandManager not in self.commands
def test_command_manager_get_command(self):
"""Test that we can get a command from a command manager."""
assert self.commands["TestCommand"] is self.command_class
with pytest.raises(KeyError):
self.commands["some_nonexistent_command"].process()
def test_command_instance(self):
"""Test that we can create a command instance."""
type(self).command = self.command_class(None, ())
assert self.command
def test_command_execute_no_session(self):
"""Test that a command instance without a session won't execute."""
self.command.execute()
assert not self.command.called
def test_command_session_property(self):
"""Test that we can get and set the session property of a command."""
assert self.command.session is None
self.command.session = self.session
assert self.command.session is self.session
def test_command_execute(self):
"""Test that we can execute a command."""
self.command.execute()
assert self.command.called
|
[
"will.hutcheson@gmail.com"
] |
will.hutcheson@gmail.com
|
0f21947ee7c2b9045e2bafdf343d409245ab9b40
|
fa3e527114cd5799dddb0a25067da4923eae354e
|
/DataPrepare/FastSim/GAN/BES/Dedx/makeDataSet.py
|
65596edf23a6fa591fe64774ab00af09397a15d7
|
[] |
no_license
|
wenxingfang/FastSim_ML
|
e64c6b56ce2afd703d1ddda0ada2de6f65fde049
|
d2f1abbb2f6879313d5f4f137b64c4d8bf10fe83
|
refs/heads/master
| 2022-11-28T01:35:39.727895
| 2020-08-03T15:47:37
| 2020-08-03T15:47:37
| 284,734,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,755
|
py
|
import ROOT as rt
import numpy as np
import h5py
import sys
import gc
import math
import argparse
rt.gROOT.SetBatch(rt.kTRUE)
from sklearn.utils import shuffle
#######################################
# use digi step data and use B field ##
# use cell ID for ECAL ##
# add HCAL
# add HoE cut
#######################################
def get_parser():
parser = argparse.ArgumentParser(
description='root to hdf5',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--input', action='store', type=str,
help='input root file')
parser.add_argument('--output', action='store', type=str,
help='output root file')
parser.add_argument('--tag', action='store', type=str,
help='tag name for plots')
parser.add_argument('--str_particle', action='store', type=str,
help='e^{-}')
return parser
def plot_gr(gr,out_name,title):
canvas=rt.TCanvas("%s"%(out_name),"",800,800)
canvas.cd()
canvas.SetTopMargin(0.13)
canvas.SetBottomMargin(0.1)
canvas.SetLeftMargin(0.13)
canvas.SetRightMargin(0.15)
if 'logy' in out_name:
canvas.SetLogy()
#gr.GetXaxis().SetTitle("#phi(AU, 0 #rightarrow 2#pi)")
#gr.GetYaxis().SetTitle("Z(AU) (-19.5 #rightarrow 19.5 m)")
#gr.SetTitle(title)
#gr.Draw("pcol")
gr.Draw("hist")
canvas.SaveAs("%s/%s.png"%(plot_path,out_name))
del canvas
gc.collect()
def plot_hist(hist,out_name,title):
canvas=rt.TCanvas("%s"%(out_name),"",800,800)
canvas.cd()
canvas.SetTopMargin(0.13)
canvas.SetBottomMargin(0.1)
canvas.SetLeftMargin(0.13)
canvas.SetRightMargin(0.15)
canvas.SetGridy()
canvas.SetGridx()
#h_corr.Draw("COLZ")
#h_corr.LabelsDeflate("X")
#h_corr.LabelsDeflate("Y")
#h_corr.LabelsOption("v")
hist.SetStats(rt.kFALSE)
#hist.GetXaxis().SetTitle("#Delta Z (mm)")
if 'x_z' in out_name:
#hist.GetYaxis().SetTitle("X (mm)")
hist.GetYaxis().SetTitle("cell X")
hist.GetXaxis().SetTitle("cell Z")
elif 'y_z' in out_name:
#hist.GetYaxis().SetTitle("#Delta Y (mm)")
hist.GetYaxis().SetTitle("cell Y")
hist.GetXaxis().SetTitle("cell Z")
elif 'z_r' in out_name:
hist.GetYaxis().SetTitle("bin R")
hist.GetXaxis().SetTitle("bin Z")
elif 'z_phi' in out_name:
hist.GetYaxis().SetTitle("bin #phi")
hist.GetXaxis().SetTitle("bin Z")
hist.SetTitle(title)
#hist.SetTitleSize(0.1)
#hist.Draw("COLZ TEXT")
hist.Draw("COLZ")
canvas.SaveAs("%s/%s.png"%(plot_path,out_name))
del canvas
gc.collect()
if __name__ == '__main__':
test_percent = 0.5
for_em = True
for_ep = False
plot_path = './plots/'
f_in = rt.TFile("/besfs/groups/cal/dedx/zhuk/calib/663/26577-27090/Simulation/hadron_track/electron/electron.root","READ")
tree = f_in.Get('n103')
print('entries=',tree.GetEntries())
h_pt = rt.TH1F('H_pt' , '', 220, 0, 2.2)
h_pt0 = rt.TH1F('H_pt0' , '', 220, 0, 2.2)
h_charge = rt.TH1F('H_charge' , '', 20 , -2, 2)
h_costheta = rt.TH1F('H_costheta' , '', 20 , -2, 2)
h_theta = rt.TH1F('H_theta' , '', 200 , 0, 200)
h_dEdx_meas = rt.TH1F('H_dEdx_meas' , '', 901,-1, 900)
h_dedx_theta= rt.TH2F('H_dedx_theta', '', 900, 0, 900, 200 , 0, 200)
h_dedx_pt = rt.TH2F('H_dedx_pt' , '', 900, 0, 900, 220 , 0, 2.2)
maxEvent = tree.GetEntries()
Data = np.full((maxEvent, 3), 0 ,dtype=np.float32)#init
for i in range(maxEvent):
tree.GetEntry(i)
ptrk = getattr(tree, 'ptrk')
charge = getattr(tree, 'charge')
costheta = getattr(tree, 'costheta')
dEdx_meas = getattr(tree, 'dEdx_meas')
if for_em and charge != -1: continue
if for_ep and charge != 1 : continue
Data[i,0] = ptrk/2.0
#Data[i,1] = charge
Data[i,1] = costheta
Data[i,2] = (dEdx_meas - 546)/(3*32)
h_pt .Fill(ptrk)
h_pt0 .Fill(math.sqrt(ptrk))
h_charge .Fill(charge)
h_costheta .Fill(costheta)
tmp_theta = math.acos(costheta)*180/math.pi
h_theta .Fill(tmp_theta)
h_dEdx_meas.Fill(dEdx_meas)
h_dedx_theta.Fill(dEdx_meas, tmp_theta)
h_dedx_pt .Fill(dEdx_meas, ptrk)
if True:
dele_list = []
for i in range(Data.shape[0]):
if Data[i,0]==0:
dele_list.append(i) ## remove the empty event
Data = np.delete(Data, dele_list, axis = 0)
print('final size=', Data.shape[0])
plot_gr(h_pt , "h_pt_track" ,"")
plot_gr(h_pt0 , "h_pt_track0","")
plot_gr(h_charge , "h_charge" ,"")
plot_gr(h_costheta , "h_costheta" ,"")
plot_gr(h_theta , "h_theta" ,"")
plot_gr(h_dEdx_meas, "h_dEdx_meas","")
plot_gr(h_dEdx_meas, "h_dEdx_meas_logy","")
plot_hist(h_dedx_theta, "h_dedx_theta","")
plot_hist(h_dedx_pt , "h_dedx_pt" ,"")
Data = shuffle(Data)
all_evt = Data.shape[0]
training_data = Data[0:int((1-test_percent)*all_evt) ,:]
test_data = Data[int((1-test_percent)*all_evt):all_evt,:]
theta_range0 = 35
theta_range1 = 145
training_data_barrel = training_data [ np.logical_and( np.arccos(training_data[:,1])*180/math.pi < 145, np.arccos(training_data[:,1])*180/math.pi > 35), : ]
test_data_barrel = test_data [ np.logical_and( np.arccos(test_data [:,1])*180/math.pi < 145, np.arccos(test_data [:,1])*180/math.pi > 35), : ]
training_data_endcap = training_data [ np.logical_or ( np.arccos(training_data[:,1])*180/math.pi > 145, np.arccos(training_data[:,1])*180/math.pi < 35), : ]
test_data_endcap = test_data [ np.logical_or ( np.arccos(test_data [:,1])*180/math.pi > 145, np.arccos(test_data [:,1])*180/math.pi < 35), : ]
hf = h5py.File('electron_train_barrel.h5', 'w')
hf.create_dataset('dataset', data=training_data_barrel)
print ('training_data_barrel shape=',training_data_barrel.shape)
hf.close()
hf = h5py.File('electron_train_endcap.h5', 'w')
hf.create_dataset('dataset', data=training_data_endcap)
print ('training_data_endcap shape=',training_data_endcap.shape)
hf.close()
hf = h5py.File('electron_test_barrel.h5', 'w')
hf.create_dataset('dataset' , data=test_data_barrel)
print ('test_data_barrel shape=',test_data_barrel.shape)
hf.close()
hf = h5py.File('electron_test_endcap.h5', 'w')
hf.create_dataset('dataset' , data=test_data_endcap)
print ('test_data_endcap shape=',test_data_endcap.shape)
hf.close()
print ('Done')
|
[
"1473717798@qq.com"
] |
1473717798@qq.com
|
0d830f5647c98236a85d3ab95cba2544e777ab52
|
ba916d93dfb8074241b0ea1f39997cb028509240
|
/problems/min_cost_buy_candies.py
|
43dde065bba3be50479abf9804d66f154e3f7138
|
[] |
no_license
|
satojkovic/algorithms
|
ecc1589898c61d2eef562093d3d2a9a2d127faa8
|
f666b215bc9bbdab2d2257c83ff1ee2c31c6ff8e
|
refs/heads/master
| 2023-09-06T08:17:08.712555
| 2023-08-31T14:19:01
| 2023-08-31T14:19:01
| 169,414,662
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
def min_cost_buy_candies(cost):
cost = sorted(cost, reverse=True)
return sum(cost) - sum(cost[2::3])
def test_min_cost_buy_candies():
assert min_cost_buy_candies([1]) == 1
assert min_cost_buy_candies([1, 2]) == 3
assert min_cost_buy_candies([3, 4, 10]) == 14
assert min_cost_buy_candies([1, 1, 1, 1, 1]) == 4
|
[
"satojkovic@gmail.com"
] |
satojkovic@gmail.com
|
5f8cb45c7040fd7e59a958d419df1cbcb228ccc9
|
8a4bc47685427204365b1668b3d7b5a6fd7546f1
|
/packages/example/common/environment.py
|
19665e4adcd19fef2aa761bcb6b45bee6664d2dc
|
[] |
no_license
|
myronww/hello-service
|
3b1705ad8c25a6763d5a9673086b01d388b7817a
|
8b59054dd4cb09fb5f1697e14a050d8251b3ada8
|
refs/heads/master
| 2020-04-07T03:43:08.728638
| 2019-04-10T17:12:24
| 2019-04-10T17:12:24
| 158,027,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
import os
DEBUG = False
if "DEBUG" in os.environ:
if os.environ["DEBUG"].lower() in ["1", "true", "on"]:
DEBUG = True
DEVELOPER_MODE = False
if "DEVELOPER_MODE" in os.environ:
if os.environ["DEVELOPER_MODE"].lower() in ["1", "true", "on"]:
DEVELOPER_MODE = True
|
[
"myron.walker@gmail.com"
] |
myron.walker@gmail.com
|
6c98862cef6bebdb92b27e051d1e0386d84cef74
|
c8296b9479cbefd26bb3ebaaf6ab55bd090f9735
|
/mitest/views/module.py
|
fa86e862af1f5372fd34923fcbb84eca9d020404
|
[] |
no_license
|
WangYongjun1990/test-core
|
81db49f54e291bd09d329c7c2c560adcbb3b3466
|
0ec13174b58a41f35fce2bd2b895b8ac441dfd37
|
refs/heads/master
| 2022-12-12T11:45:48.502241
| 2018-08-17T08:38:55
| 2018-08-17T08:40:32
| 138,014,671
| 0
| 0
| null | 2022-12-08T02:10:28
| 2018-06-20T10:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,878
|
py
|
# -*- coding:utf-8 -*-
"""
File Name: `module`.py
Version:
Description:
Author: wangyongjun
Date: 2018/6/21 13:44
"""
import json
from flask import Blueprint
from flask_restful import Resource
from mitest.api.comm_log import logger
from mitest.views.wrappers import timer
from mitest.utils.common import get_request_json, make_response
from mitest.api.mysql_manager import ModuleInfoManager,TestsuiteInfoManager
module = Blueprint('module_interface', __name__)
import mitest.config.sit
from flask import Flask
Flask(__name__).config.from_object(mitest.config.sit)
class Module(Resource):
def __init__(self):
pass
@timer
def post(self, action):
data = get_request_json()
mim = ModuleInfoManager()
if action == 'add':
try:
system_id = data["systemId"]
module_name = data["moduleName"]
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
module_name_list = mim.query_module(system_id,module_name)
if len(module_name_list) != 0:
return make_response({"code": "201", "desc": "这个名称的测试模块已经存在"})
mim.insert_module(module_name = module_name,system_id = system_id)
return make_response({"code": "000", "desc": "{}模块添加成功".format(module_name)})
elif action == 'edit':
try:
id = data["id"]
module_name = data["moduleName"]
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
res_module_name = mim.query_module_id(id=id)[0].module_name
if res_module_name == module_name:
return make_response({"code": "201", "desc": "您修改的模块名称已存在"})
mim.update_module(id_=id,module_name=module_name)
return make_response({"code": "000", "desc": "操作成功"})
elif action == 'delete':
try:
id = data["id"]
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
mim.delete_module(id_=id)
return make_response({"code": "000", "desc": "操作成功"})
elif action == 'detail':
pass
elif action == 'list':
try:
system_id = data["systemId"]
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
module_list = mim.query_all_module(system_id=system_id)
res = list()
id = 0
for i in module_list:
module_dict = dict()
if module_list:
id += 1
module_dict["id"] = id
module_dict["moduleId"] = i.id
module_dict["label"] = i.module_name
testsuite_list = TestsuiteInfoManager.query_all_testsuite(module_id=i.id)
testsuite = list()
for j in testsuite_list:
testsuite_dict = dict()
if testsuite_list:
id += 1
testsuite_dict["id"] = id
testsuite_dict["testsuiteId"] = j.id
testsuite_dict["label"] = j.testsuite_name
testsuite.append(testsuite_dict)
module_dict["children"] = testsuite
res.append(module_dict)
return make_response({"code": "000", "desc": res})
elif action == 'queryBySystemId':
""" 根据SystemId查询系统下的所有模块
url: /module/queryBySystemId
input:
{"systemId":"9"}
output:
{
"code": "000",
"data": [
{
"systemId": 4,
"systemName": "申请"
}
]
}
"""
try:
system_id = data.pop('systemId')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
obj = ModuleInfoManager.query_all_module(system_id)
module_list = []
for m in obj:
module_info_dic = {
"systemName": m.module_name,
"systemId": m.id,
}
module_list.append(module_info_dic)
return make_response({"code": "000", "data": module_list})
else:
return make_response({"code": "100", "desc": "url错误,不存在的接口动作<{action}>".format(action=action)})
if __name__ == '__main__':
Module = Module()
res = Module.post("list")
print(res)
|
[
"yongjun.wang@mi-me.com"
] |
yongjun.wang@mi-me.com
|
16343f081959d18ba076d1ee9ba2fc91db0493bf
|
a50bd0cbf51b0578fd8249785a35796dfbcb5728
|
/poem/Poem/api/urls_internal.py
|
c664b83878667a62275dcf8f5dd34587c667d063
|
[
"Apache-2.0"
] |
permissive
|
ARGOeu/poem
|
d24cc8126abec7dbae687ca0f854cabb54f982a4
|
40c17484c6184fe3cf6547401d258b95644fa18f
|
refs/heads/master
| 2020-12-25T17:24:36.601759
| 2019-04-05T11:51:34
| 2019-04-05T11:51:34
| 26,312,136
| 0
| 2
|
NOASSERTION
| 2020-09-17T09:31:12
| 2014-11-07T09:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
from django.urls import path
from . import views_internal
app_name = 'poem'
urlpatterns = [
path('metrics/<str:group>', views_internal.ListMetricsInGroup.as_view(), name='metrics'),
path('tokens/', views_internal.ListTokens.as_view(), name='tokens'),
path('tokens/<str:name>', views_internal.ListTokenForTenant.as_view(), name='tokens'),
path('users/', views_internal.ListUsers.as_view(), name='users'),
path('groups/', views_internal.ListGroupsForUser.as_view(), name='groups'),
path('groups/<str:group>', views_internal.ListGroupsForUser.as_view(), name='groups'),
path('probes/<str:probe_name>', views_internal.ListProbes.as_view(), name='probes'),
path('aggregations/', views_internal.ListAggregations.as_view(), name='aggregations'),
path('aggregations/<str:aggregation_name>', views_internal.ListAggregations.as_view(), name='aggregations'),
]
|
[
"daniel.vrcic@gmail.com"
] |
daniel.vrcic@gmail.com
|
83f39d930bf9b33f24dae1eff820a417779e8ba2
|
a7b6741f345aad73117bc747c4e93e148f5fe769
|
/Basic-Course/07-Flask-JWT-Extended/resources/user.py
|
80d17e2cd51af9d4af3b4d76b35812651d483995
|
[
"MIT"
] |
permissive
|
suzynakayama/python-flask-udemy
|
e07f2d30d1f4b66aae06d1dcd775bd58ed5d2083
|
95d2c5fa328e2f50d0893d73fd386fb713d1f12b
|
refs/heads/master
| 2022-12-28T16:51:30.507702
| 2020-10-09T22:52:59
| 2020-10-09T22:52:59
| 299,960,039
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
from flask_restful import Resource, reqparse
from werkzeug.security import safe_str_cmp
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
get_jwt_identity,
jwt_required,
get_raw_jwt
)
from models.user import UserModel
from blacklist import BLACKLIST
_user_parser = reqparse.RequestParser()
_user_parser.add_argument('username',
type=str,
required=True,
help="This field cannot be blank."
)
_user_parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
class UserRegister(Resource):
def post(self):
data = _user_parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "A user with that username already exists"}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
class User(Resource):
@classmethod
def get(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {'message': 'User not found.'}, 404
return user.json()
@classmethod
def delete(cls, user_id):
user = UserModel.find_by_id(user_id)
if not user:
return {'message': 'User not found.'}, 404
user.delete_from_db()
return {'message': 'User deleted from db.'}, 200
class UserLogin(Resource):
@classmethod
def post(cls):
# get data from parser
data = _user_parser.parse_args()
# find user in Db
user = UserModel.find_by_username(data['username'])
# check password / create access token / create refresh token / return tokens
if user and safe_str_cmp(user.password, data['password']):
access_token = create_access_token(identity=user.id, fresh=True) # `identity=` is the same as `identity()`
refresh_token = create_refresh_token(user.id)
return {
'access_token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials.'}, 401
class UserLogout(Resource):
@jwt_required
def get(self):
# we only want to blacklist the token, not the user itself, so they will have to login again
jti = get_raw_jwt()['jti'] # jwt id, a unique identifier for a jwt
BLACKLIST.add(jti)
return {'message': 'Successfully logged out.'}, 200
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
return {'access_token': new_token}, 200
|
[
"suzy.nakayama@gmail.com"
] |
suzy.nakayama@gmail.com
|
3e2a3416e9043be473464f5178214bc1d21ac811
|
2dfbb97b47fd467f29ffb26faf9a9f6f117abeee
|
/leetcode/151.py
|
5ed59c72ca216a929c05706680799872276990c7
|
[] |
no_license
|
liuweilin17/algorithm
|
0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5
|
d3e8669f932fc2e22711e8b7590d3365d020e189
|
refs/heads/master
| 2020-12-30T11:03:40.085105
| 2020-04-10T03:46:01
| 2020-04-10T03:46:01
| 98,844,919
| 3
| 1
| null | 2018-10-05T03:01:02
| 2017-07-31T03:35:14
|
C++
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
###########################################
# Let's Have Some Fun
# File Name: 151.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Mon Aug 26 22:38:31 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 151. Reverse Words in a String
class Solution:
def reverseWords(self, s: str) -> str:
s = s.strip()
N = len(s)
ret = []
t = ''
for i in range(N-1, -1, -1):
if s[i] != ' ':
t = s[i] + t
elif t != '':
ret.append(t)
t = ''
else: pass
if t:
ret.append(t)
return ' '.join(ret)
|
[
"liuweilin17@qq.com"
] |
liuweilin17@qq.com
|
d27a7cc8735ca16e17b623666767925f67dc0aa9
|
3bdf35a266547425501fdfe684ba6049f38b6690
|
/Paddy/api/serializers.py
|
7f93d5a7ecb0205720eb071b89616eab5485c1cb
|
[] |
no_license
|
SIBU99/ChhatraViswaKarmaOurServerCode
|
a7b95a2a3b27c97b4488b1825721cc2b4063568d
|
b36e0f9e866f76b7d6a104897f62eacceb2faa22
|
refs/heads/master
| 2023-01-07T09:22:24.428522
| 2020-11-10T09:02:13
| 2020-11-10T09:02:13
| 311,376,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
from rest_framework import serializers
from ..models import Paddy
from Account.api.serializers import FarmerSerializer
from Account.models import Farmer
from rest_framework.exceptions import ValidationError
class PaddySerializer(serializers.ModelSerializer):
"This the serializer for the model : Corn"
farmer = FarmerSerializer(read_only = True)
class Meta:
model = Paddy
fields = [
"id",
"farmer",
"image",
"result_tag",
"disease1",
"disease2",
"disease3",
"when",
"map_disease",
]
|
[
"kumarmishra678@gmail.com"
] |
kumarmishra678@gmail.com
|
0438f4c787caade1a5b8e1a3ef8e3b498ef03a8d
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Fisher_Lag1Trend_30_12_0.py
|
fb2a9442dbc8a4731f3e87bf0942881517feb6bd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 266
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
bda7687bc1bb236ab785aa669b8d8f79f222c32c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_coffined.py
|
156a15c662f97716fc2ceae82faeba7aa8cdc092
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _COFFINED():
def __init__(self,):
self.name = "COFFINED"
self.definitions = coffin
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['coffin']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1d4a679d7e01b82f721f9675a3b7087c03fc0dfe
|
b000127408f96db7411f301553585f5da0e426cd
|
/code/Letter-Tile-Possibilities.py
|
faf5ee09133f12527c8ee128149d054486b16934
|
[] |
no_license
|
SaiVK/Leetcode-Archive
|
5f758faf97d1ab559c4c75d26ae5cf7a256baef8
|
56bafeaaced2d0fd3b3d2f1a0365d24d5b41e504
|
refs/heads/master
| 2022-11-23T08:50:17.610247
| 2020-07-27T02:09:53
| 2020-07-27T02:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
import itertools
class Solution(object):
def numTilePossibilities(self, tiles):
a = 0
for e in range(1, (len(tiles) + 1)):
for val in set(itertools.permutations(tiles, e)):
a += 1
return a
|
[
"christopherlambert106@gmail.com"
] |
christopherlambert106@gmail.com
|
c63f96624ea7308bc8b1af5b80a4d286f0439386
|
06476bc4cb7fc3ce378beb357fac7d5aacb87b3b
|
/Prototype/env/lib/python3.8/site-packages/pyfilterbank/rbj_audio_eq.py
|
ea96d6f1ca70909b6076a14e89b3e8c77d95a2cb
|
[
"MIT"
] |
permissive
|
marc-ortuno/VOPEC
|
44d3a74d3e0686474dd57fcb21e845fd5fd48897
|
e7ed1f13cc1868a824f4036dd08ec6bed4266c08
|
refs/heads/main
| 2023-06-12T19:15:18.060897
| 2021-07-01T17:15:03
| 2021-07-01T17:15:03
| 344,433,646
| 0
| 0
|
MIT
| 2021-06-14T19:15:47
| 2021-03-04T10:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,621
|
py
|
from numpy import sqrt, pi, cos, sin, sinh, log
def rbj_sos(filtertype, sample_rate, f0, gain_db=None,
q_factor=None, band_width=None, shelf_slope=None):
if 'shelf' in filtertype and not shelf_slope:
raise(ValueError('shelf_slope mus be specified.'))
w0 = 2*pi * f0/sample_rate
amplitude = None if not gain_db else sqrt(10**(gain_db/20.0))
alpha = _compute_alpha(amplitude, w0, q_factor, band_width, shelf_slope)
params = {'amplitude': amplitude, 'w0': w0, 'alpha': alpha}
filterfun = _filtertype_to_filterfun_dict[filtertype]
sos = filterfun(**params)
return sos
class RbjEqCascade:
def __init__(self, sample_rate):
self._sample_rate = sample_rate
self._sosmat = []
self._filterlist = []
def add(self, filtertype):
self._filtertypelist += [filtertype]
filtobj = RbjEq(filtertype, self._sample_rate)
self._filterlist += [filtobj]
self._sosmat += [filtobj.sos]
class RbjEq:
def __init__(self, filtertype, sample_rate, params=None):
self._filtertype = filtertype
self._sample_rate = sample_rate
self._filterfun = _filtertype_to_filterfun_dict[filtertype]
if not params:
params, param_names = _get_params_filtertype(filtertype)
self._params = params
self._update(**params)
def update(self, f0,
gain_db=None,
q_factor=None,
band_width=None,
shelf_slope=None):
w0 = 2*pi * f0/self.sample_rate
amplitude = None if not gain_db else sqrt(10**(gain_db/20.0))
alpha = _compute_alpha(amplitude, w0, q_factor, band_width, shelf_slope)
params = {'amplitude': amplitude, 'w0': w0, 'alpha': alpha}
self._sos = self._filterfun(**params)
@property
def sos(self):
return self._sos
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
self.update(**self.params)
def _compute_alpha(amplitude=None, w0=None, q_factor=None,
band_width=None,
shelf_slope=None):
if q_factor:
return sin(w0) / (2*q_factor)
elif band_width:
return sin(w0) * sinh(0.5*log(2.0) * band_width * w0/sin(w0))
elif shelf_slope:
return sin(w0) / 2 * sqrt((amplitude + 1/alpha) * (1/shelf_slope - 1) +2)
else:
raise(ValueError(
'''You need to specify at least one of:
q_factor, band_width or shelf_slope.'''))
def _lowpass(w0, alpha):
b0 = (1 - cos(w0)) / 2.0
b1 = 1 - cos(w0)
b2 = (1 - cos(w0)) / 2.0
a0 = 1 + alpha
a1 = -2 * cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _highpass(w0, alpha):
b0 = (1 + cos(w0)) / 2.0
b1 = -(1 + cos(w0))
b2 = (1 + cos(w0)) / 2.0
a0 = 1 + alpha
a1 = -2 * cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _bandpassQ(w0, alpha):
b0 = sin(w0) / 2.0 # = Q*alpha
b1 = 0.0
b2 = -sin(w0) / 2.0 # = -Q*alpha
a0 = 1 + alpha
a1 = -2 * cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _bandpass(w0, alpha):
b0 = alpha
b1 = 0.0
b2 = -alpha
a0 = 1 + alpha
a1 = -2 * cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _notch(w0, alpha):
b0 = 1.0
b1 = -2 * cos(w0)
b2 = 1.0
a0 = 1 + alpha
a1 = -2 * cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _apf(w0, alpha):
b0 = 1 - alpha
b1 = -2 * cos(w0)
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2 *cos(w0)
a2 = 1 - alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _peq(amplitude, w0, alpha):
b0 = 1 + alpha*amplitude
b1 = -2 * cos(w0)
b2 = 1 - alpha*amplitude
a0 = 1 + alpha/amplitude
a1 = -2 * cos(w0)
a2 = 1 - alpha/amplitude
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _lowshelf(amplitude, w0, alpha):
b0 = amplitude*((amplitude+1) - (amplitude-1)*cos(w0) + 2*sqrt(amplitude)*alpha)
b1 = 2*amplitude*((amplitude-1) - (amplitude+1)*cos(w0))
b2 = amplitude*((amplitude+1) - (amplitude-1)*cos(w0) - 2*sqrt(amplitude)*alpha)
a0 = (amplitude+1) + (amplitude-1)*cos(w0) + 2*sqrt(amplitude)*alpha
a1 = -2*((amplitude-1) + (amplitude+1)*cos(w0))
a2 = (amplitude+1) + (amplitude-1)*cos(w0) - 2*sqrt(amplitude)*alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
def _highshelf(amplitude, w0, alpha):
b0 = amplitude*((amplitude+1) + (amplitude-1)*cos(w0) + 2*sqrt(amplitude)*alpha)
b1 = -2*amplitude*((amplitude-1) + (amplitude+1)*cos(w0))
b2 = amplitude*((amplitude+1) + (amplitude-1)*cos(w0) - 2*sqrt(amplitude)*alpha)
a0 = (amplitude+1) - (amplitude-1)*cos(w0) + 2*sqrt(amplitude)*alpha
a1 = 2*((amplitude-1) - (amplitude+1)*cos(w0))
a2 = (amplitude+1) - (amplitude-1)*cos(w0) - 2*sqrt(amplitude)*alpha
sos = array([b0, b1, b2, a0, a1, a2]) / a0
return sos
_filtertype_to_filterfun_dict = {
'lowpass': _lowpass,
'highpass': _highpass,
'bandpassQ': _bandpassQ,
'bandpass': _bandpass,
'notch': _notch,
'apf': _apf,
'peq': _peq,
'lowshelf': _lowshelf,
'highshelf': _highshelf,
}
available_filtertypes = list(_filtertype_to_filterfun_dict.keys())
|
[
"you@example.com"
] |
you@example.com
|
c9886d0249b8595088fa4db7338186268e2c81b8
|
8c77b0d14dd720a89470f2aa6243a8e8d4167424
|
/py4e/CodeUp/1072_repeat_execute.py
|
619fba19b76681461ab606ae0c0cac0991d2d90d
|
[] |
no_license
|
oshsage/Python_Pandas
|
64909c10fd98b0f2290c081dde9e6b87e17032e4
|
a131598d62c834d63979eda56ea15763e35fab4e
|
refs/heads/master
| 2022-12-19T19:32:20.436935
| 2020-10-24T07:17:10
| 2020-10-24T07:17:10
| 298,832,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# n개의 정수가 순서대로 입력된다.
# -2147483648 ~ +2147483647, 단 n의 최대 개수는 알 수 없다.
# n개의 입력된 정수를 순서대로 출력해보자.
# while( ), for( ), do~while( ) 등의 반복문을 사용할 수 없다.
cnt = int(input())
i = 1
list = list(input().split(' '))
while i < cnt+1:
print(list[i-1])
i += 1
# 사용한 개념: while, +=, list[n]
# list[n]: 리스트의 n번째 항. n은 0부터 시작한다!
|
[
"oh12sung@naver.com"
] |
oh12sung@naver.com
|
0908c5536c97723509afee287b17b5b5981324ec
|
5cec1ff43bf38cf31316254dabe3f972d38744ad
|
/src/hydrat/classifier/scikits_learn.py
|
f471c9e8b1665a01199d88be5270701ded21ba07
|
[] |
no_license
|
eyadsibai/hydrat
|
7fb63f3c54f1fca25d04ab7266712c1077ffa2e3
|
5a68c6b8f32bc6bad59c3f002340bf7ef62e868c
|
refs/heads/master
| 2016-09-06T14:16:46.082697
| 2013-08-06T05:14:02
| 2013-08-06T05:14:02
| 33,199,904
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
"""
hydrat's interface to scikit.learn
http://scikit-learn.sourceforge.net
Marco Lui <saffsd@gmail.com> October 2010
"""
import numpy
from hydrat.task.sampler import isOneofM
from hydrat.classifier.abstract import Learner, Classifier, NotInstalledError
class ScikitL(Learner):
"""
Lightweight wrapper for scikit's learner interface
"""
__name__ = 'scikit'
def __init__(self, learn_class, **kwargs):
Learner.__init__(self)
self.learn_class = learn_class
self.kwargs = kwargs
def _check_installed(self):
try:
import sklearn
except ImportError:
raise NotInstalledError("sklearn not installed")
def is_pickleable(self):
# TODO: Mark as false until we look into this more closely
return False
def _params(self):
md = dict(self.kwargs)
md['learner'] = self.learn_class.__name__
return md
def _learn(self, feature_map, class_map):
if not isOneofM(class_map):
raise ValueError, "can only use one-of-m classmaps"
learner = self.learn_class(**self.kwargs)
targets = class_map.argmax(axis=1)
learner.fit(feature_map.todense(), targets)
return ScikitC(learner, class_map.shape[1])
class ScikitC(Classifier):
__name__ = 'scikits'
def __init__(self, learner, num_class):
Classifier.__init__(self)
self.learner = learner
self.num_class = num_class
def _classify(self, feature_map):
if hasattr(self.learner, 'predict_proba'):
# use probabilistic output
classif = self.learner.predict_proba(feature_map.todense())
else:
pred = self.learner.predict(feature_map.todense())
classif = numpy.zeros((feature_map.shape[0], self.num_class), dtype='bool')
for i,p in enumerate(pred):
classif[i,p] = True
return classif
# Convenience methods
from sklearn import svm
def SVC(**kwargs): return ScikitL(svm.sparse.SVC, **kwargs)
def NuSVC(**kwargs): return ScikitL(svm.sparse.NuSVC, **kwargs)
def LinearSVC(**kwargs): return ScikitL(svm.sparse.LinearSVC, **kwargs)
from sklearn.ensemble import RandomForestClassifier
def RandomForest(**kwargs): return ScikitL(RandomForestClassifier, **kwargs)
# TODO: There are generalized linear models available for sparse features
# TODO: Some of the classifiers are only implemented for dense features, could investigate
# using them but would need to be careful of very large spaces.
# TODO: Warn if scikits.learn is not installed
|
[
"saffsd@gmail.com"
] |
saffsd@gmail.com
|
8c1f992909d87f31cc7c977d65ad54a7e5165a10
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03200/s727157767.py
|
3bb8da266eec20606492b32fc65225b1b0ffa5ff
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
s = list(input())
ans = 0
cntB = 0
for i in range(len(s)):
if s[i] == 'B':
cntB += 1
else:
ans += cntB
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5c4983c27b4cef394e0a0bcb7f0283990cd5809d
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py
|
df79a9cf13963d26384b00ced0cf5efa9f68a420
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
_base_ = [
'../_base_/models/ocrnet_hr18.py',
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(decode_head=[
dict(
type='FCNHead',
in_channels=[18, 36, 72, 144],
channels=sum([18, 36, 72, 144]),
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='OCRHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
channels=512,
ocr_channels=256,
dropout_ratio=-1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
|
[
"1178396201@qq.com"
] |
1178396201@qq.com
|
d13d6b5acc64af0811538ebda36d881d4dcf63ca
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/catapult/dashboard/dashboard/email_sheriff_test.py
|
a66f70d0b3480620b2a4fbb9330bd1a14a74e6d8
|
[
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 6,169
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
import mock
from dashboard import email_sheriff
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import bug_label_patterns
from dashboard.models.subscription import Subscription
_SHERIFF_URL = 'http://chromium-build.appspot.com/p/chromium/sheriff_perf.js'
_SHERIFF_EMAIL = 'perf-sheriff-group@google.com'
class EmailSheriffTest(testing_common.TestCase):
def _AddTestToStubDataStore(self):
"""Adds a test which will be used in the methods below."""
bug_label_patterns.AddBugLabelPattern('label1', '*/*/dromaeo/dom')
bug_label_patterns.AddBugLabelPattern('label2', '*/*/other/test')
testing_common.AddTests(['ChromiumPerf'], ['Win7'],
{'dromaeo': {
'dom': {}
}})
test = utils.TestKey('ChromiumPerf/Win7/dromaeo/dom').get()
test.improvement_direction = anomaly.DOWN
return test
def _GetDefaultMailArgs(self):
"""Adds an Anomaly and returns arguments for email_sheriff.EmailSheriff."""
test_entity = self._AddTestToStubDataStore()
subscription_url = Subscription(
name='Chromium Perf Sheriff URL',
rotation_url=_SHERIFF_URL,
bug_labels=['Performance-Sheriff-URL'])
subscription_email = Subscription(
name='Chromium Perf Sheriff Mail',
notification_email=_SHERIFF_EMAIL,
bug_labels=['Performance-Sheriff-Mail'])
anomaly_entity = anomaly.Anomaly(
median_before_anomaly=5.0,
median_after_anomaly=10.0,
start_revision=10002,
end_revision=10004,
subscription_names=[
subscription_url.name,
subscription_email.name,
],
subscriptions=[subscription_url, subscription_email],
test=utils.TestKey('ChromiumPerf/Win7/dromaeo/dom'))
return {
'subscriptions': [subscription_url, subscription_email],
'test': test_entity,
'anomaly': anomaly_entity
}
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200, 'document.write(\'sullivan\')')))
def testEmailSheriff_ContentAndRecipientAreCorrect(self):
email_sheriff.EmailSheriff(**self._GetDefaultMailArgs())
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('gasper-alerts@google.com', messages[0].sender)
self.assertEqual({'perf-sheriff-group@google.com', 'sullivan@google.com'},
{s.strip() for s in messages[0].to.split(',')})
name = 'dromaeo/dom on Win7'
expected_subject = '100.0%% regression in %s at 10002:10004' % name
self.assertEqual(expected_subject, messages[0].subject)
body = str(messages[0].body)
self.assertIn('10002 - 10004', body)
self.assertIn('100.0%', body)
self.assertIn('ChromiumPerf', body)
self.assertIn('Win7', body)
self.assertIn('dromaeo/dom', body)
html = str(messages[0].html)
self.assertIn('<b>10002 - 10004</b>', html)
self.assertIn('<b>100.0%</b>', html)
self.assertIn('<b>ChromiumPerf</b>', html)
self.assertIn('<b>Win7</b>', html)
self.assertIn('<b>dromaeo/dom</b>', html)
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200, 'document.write(\'sonnyrao, digit\')')))
def testEmailSheriff_MultipleSheriffs_AllGetEmailed(self):
email_sheriff.EmailSheriff(**self._GetDefaultMailArgs())
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('gasper-alerts@google.com', messages[0].sender)
self.assertEqual(
{
'perf-sheriff-group@google.com', 'sonnyrao@google.com',
'digit@google.com'
}, {s.strip() for s in messages[0].to.split(',')})
def testEmail_NoSheriffUrl_EmailSentToSheriffRotationEmailAddress(self):
args = self._GetDefaultMailArgs()
args['subscriptions'][0].rotation_url = None
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
# An email is only sent to the general sheriff rotation email;
# There is no other specific sheriff to send it to.
self.assertEqual('perf-sheriff-group@google.com', messages[0].to)
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(200, 'garbage')))
def testEmailSheriff_RotationUrlHasInvalidContent_EmailStillSent(self):
"""Tests the email to list when the rotation URL returns garbage."""
args = self._GetDefaultMailArgs()
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
# An email is only sent to the general sheriff rotation email.
self.assertEqual('perf-sheriff-group@google.com', messages[0].to)
def testEmailSheriff_PercentChangeMaxFloat_ContentSaysAlertSize(self):
"""Tests the email content for "freakin huge" alert."""
args = self._GetDefaultMailArgs()
args['subscriptions'][0].rotation_url = None
args['anomaly'].median_before_anomaly = 0.0
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].subject))
self.assertNotIn(str(sys.float_info.max), str(messages[0].body))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].body))
self.assertNotIn(str(sys.float_info.max), str(messages[0].html))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].html))
if __name__ == '__main__':
unittest.main()
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
bd455dab671cde472cc785727e4d195135c38a5d
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_3_1_neat/16_3_1_ipince_senate.py
|
e11f2916855fa67449999b8443738a3141993f9f
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
#!/usr/bin/python
import operator
import sys
import random
ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
# number -> party
def solve(senators):
evacs = []
tups = senators.items()
while True:
tups = sorted(tups, key=operator.itemgetter(1), reverse=True)
#print tups
if tups[0][1] is 0 and tups[1][1] is 0:
return evacs
m1 = tups[0]
m2 = tups[1]
if m1[1] is 1 and m2[1] is 1:
if len(tups) > 2 and tups[2][1] > 0: # 1 1 1 ....
evacs.append('' + m1[0])
tups[0] = (m1[0], m1[1] - 1)
else: # 1 1 0 ...
evacs.append('' + m1[0] + m2[0])
tups[0] = (m1[0], m1[1] - 1)
tups[1] = (m2[0], m2[1] - 1)
elif m1[1] - m2[1] is 0: # x x ...
evacs.append('' + m1[0] + m2[0])
tups[0] = (m1[0], m1[1] - 1)
tups[1] = (m2[0], m2[1] - 1)
else: # 4 2 ...
evacs.append('' + m1[0])
tups[0] = (m1[0], m1[1] - 1)
# check if all 0
if False:
print 50
for i in xrange(50):
s = random.randint(2, 26)
print s
nums = []
for j in xrange(s):
nums.append(str(random.randint(1, 1000)))
print ' '.join(nums)
sys.exit()
lines = iter(sys.stdin.readlines())
cases = int(lines.next())
for i in xrange(cases):
senators = {}
s = int(lines.next())
numbers = [int(num) for num in lines.next().split(' ')]
for j in xrange(s):
senators[ALPHA[j]] = numbers[j]
print "Case #%d: %s" % (i + 1, ' '.join(solve(senators)))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
75240260a004d52337a86bfbe5cd8738addff1a1
|
77f0f0156f27810269bd2542383af3cea97907e5
|
/bafs/views/user.py
|
77e351840aaa74b869e1b1faadc7afc78482c83f
|
[] |
no_license
|
schvin/freezingsaddles
|
2676808da9b5c025cedc5fc877d855efe211db83
|
5b3e608a05eebf72925d8b8a50b8afa5141f575b
|
refs/heads/master
| 2021-09-02T12:36:00.265585
| 2018-01-02T03:05:50
| 2018-01-02T03:05:50
| 111,460,388
| 0
| 0
| null | 2017-11-20T20:38:49
| 2017-11-20T20:38:49
| null |
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
import json
import copy
import logging
from collections import defaultdict
from datetime import datetime, timedelta, date
from flask import render_template, redirect, url_for, current_app, request, Blueprint, session, g, jsonify
from sqlalchemy import text
from stravalib import Client
from stravalib import unithelper as uh
from bafs import app, db, data
from bafs.utils import gviz_api, auth
from bafs.model import Team, Athlete, RidePhoto, Ride, RideWeather
from bafs.utils.auth import requires_auth
from .people import people_list_users, people_show_person, ridedays
from .pointless import averagespeed, shortride, billygoat, tortoiseteam, weekendwarrior
def bt_jsonify(data):
"""
Override eto handle raw lists expected by bootrap table.
"""
return current_app.response_class(json.dumps(data, default=json_seralizer), mimetype='application/json')
blueprint = Blueprint('user', __name__)
def json_seralizer(obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return str(obj)
@blueprint.route("/rides")
@requires_auth
def rides():
return render_template('user/rides.html')
@blueprint.route("/refetch_ride_photos", methods=['POST'])
@requires_auth
def ride_refetch_photos():
ride_id = request.form['id']
ride = db.session.query(Ride).filter(Ride.id==ride_id).filter(Ride.athlete_id==session.get('athlete_id')).one()
ride.photos_fetched = False
logging.info("Marking photos to be refetched for ride {}".format(ride))
db.session.commit()
return jsonify(success=True) # I don't really have anything useful to spit back.
@blueprint.route("/rides.json")
@requires_auth
def rides_data():
athlete_id = session.get('athlete_id')
rides_q = db.session.query(Ride).filter(Ride.athlete_id==athlete_id).order_by(Ride.start_date.desc())
results = []
for r in rides_q:
w = r.weather
if w:
avg_temp = w.ride_temp_avg
else:
avg_temp = None
results.append(dict(id=r.id,
private=r.private,
name=r.name,
start_date=r.start_date,
elapsed_time=r.elapsed_time,
moving_time=r.moving_time,
distance=r.distance,
photos_fetched=r.photos_fetched,
avg_temp=avg_temp
))
#rides = db.session.query(Ride).all()
return bt_jsonify(results)
# athlete_id = sa.Column(sa.BigInteger, sa.ForeignKey('athletes.id', ondelete='cascade'), nullable=False, index=True)
# elapsed_time = sa.Column(sa.Integer, nullable=False) # Seconds
# # in case we want to conver that to a TIME type ... (using time for interval is kinda mysql-specific brokenness, though)
# # time.strftime('%H:%M:%S', time.gmtime(12345))
# moving_time = sa.Column(sa.Integer, nullable=False, index=True) #
# elevation_gain = sa.Column(sa.Integer, nullable=True) # 269.6 (feet)
# average_speed = sa.Column(sa.Float) # mph
# maximum_speed = sa.Column(sa.Float) # mph
# start_date = sa.Column(sa.DateTime, nullable=False, index=True) # 2010-02-28T08:31:35Z
# distance = sa.Column(sa.Float, nullable=False, index=True) # 82369.1 (meters)
# location = sa.Column(sa.String(255), nullable=True)
#
# commute = sa.Column(sa.Boolean, nullable=True)
# trainer = sa.Column(sa.Boolean, nullable=True)
#
# efforts_fetched = sa.Column(sa.Boolean, default=False, nullable=False)
#
# timezone = sa.Column(sa.String(255), nullable=True)
#
# geo = orm.relationship("RideGeo", uselist=False, backref="ride", cascade="all, delete, delete-orphan")
# weather = orm.relationship("RideWeather", uselist=False, backref="ride", cascade="all, delete, delete-orphan")
# photos = orm.relationship("RidePhoto", backref="ride", cascade="all, delete, delete-orphan")
#
# photos_fetched = sa.Column(sa.Boolean, default=False, nullable=False)
# private = sa.Column(sa.Boolean, default=False, nullable=False)
|
[
"hans@xmpl.org"
] |
hans@xmpl.org
|
c16057f28176e9844ca0d10ae26e3b78d5f37bc3
|
b19a1baf69d1f7ba05a02ace7dfcba15c8d47cfb
|
/liste.py
|
1d1cf8fda235c256858d262cedca33463379f949
|
[] |
no_license
|
MarkHofstetter/20191018-wifi-python
|
20ed5de1cf28996902cecf7cd681d054e0d06739
|
7427b896783059a77c541e95df851a492ef5ebb9
|
refs/heads/master
| 2020-08-15T03:43:42.964992
| 2019-10-28T14:39:17
| 2019-10-28T14:39:17
| 215,275,139
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# 0 1 2 3 4 5 6
f = [1, 1, 2, 3, 5, 8, 13,]
print(f)
print(f[3])
print(f[-1])
print(f[0:3]) # exklusive des letzen Elements
print(f[3:])
# f[4] = 'Hallo'
# print(f)
print(len(f))
print(f[3], f[5])
d = [f[3], f[5]]
f.append(21)
f.insert(0,0) # position, wert(e)
print(f)
f.remove(1) # entfernt den ersten Wert aus der Liste!
print(f)
del(f[0]) # entfernt per index
print(f)
|
[
"mark@hofstetter.at"
] |
mark@hofstetter.at
|
acb2f22c10e5a983885d5d763b965af07ef47eb9
|
ea1bb1d4b3227fde47ce1ebf3aadef696f355642
|
/0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py
|
15ca6e3120ece11db4e3d16d806045fdf3867f01
|
[] |
no_license
|
petehwu/holbertonschool-higher_level_programming
|
4f0b0ea76699a60f47cf3dcb0b2b29da35ec2ef1
|
3c1595d1011fcc3c0265f52a1b965e54f4edd94c
|
refs/heads/master
| 2021-07-13T18:41:06.413945
| 2019-02-22T05:33:14
| 2019-02-22T05:33:14
| 148,185,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
#!/usr/bin/python3
"""python script to list all state objects
"""
import sys
from model_state import Base, State
from model_city import City
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2],
sys.argv[3]), pool_pre_ping=True)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
cities = session.query(City).all()
for c in cities:
state = session.query(State).filter(State.id == c.state_id).first()
print("{:s}: ({:d}) {:s}".format(state.name, c.id, c.name))
|
[
"pete.h.wu@gmail.com"
] |
pete.h.wu@gmail.com
|
2071d69796f799b401c9dc1bd122c662f52f5d0c
|
3d61905cb470e4918027d2b6d995246d60aab2b4
|
/python/brenpysandbox/fbx/fbx_import_settings_test_01.py
|
d0baf17d94994bfbb82d1b191a19c80242170f51
|
[] |
no_license
|
brenainnJordan/brenpy-sandbox
|
6e36cfba617c4c9c8989bb36b78c3780b9d0959c
|
6dd20d8b7722719742613d2efec2c2992fcfdd9a
|
refs/heads/master
| 2020-08-28T17:16:24.041945
| 2020-06-21T19:58:15
| 2020-06-21T19:58:15
| 217,766,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
import fbx
import sys
import os
try:
from Qt import QtCore
from Qt import QtWidgets
from Qt import QtGui
except ImportError:
print "[ WARNING ] Cannot find Qt library, using PySide2 instead"
from PySide2 import QtCore
from PySide2 import QtWidgets
from PySide2 import QtGui
# QtCore.SIGNAL doesn't seem to exist
# TODO investigate why
try:
from PySide.QtCore import SIGNAL
except ImportError:
from PySide2.QtCore import SIGNAL
from brenfbx.utils import bfFbxUtils
from brenfbx.qt.property import bfQtPropertyValueWidgets
from brenfbx.qt.property import bfQtPropertyTreeWidgets
from brenfbx.qt.property import bfQtPropertyItems
from brenfbx.qt.property import bfQtPropertyModels
def inspect_child_properties(fbx_property, indent=0):
"""Recursively debug properties
"""
child_property = fbx_property.GetChild()
while child_property.IsValid():
print "-"*indent, child_property.GetName()
inspect_child_properties(child_property, indent=indent+1)
child_property = child_property.GetSibling()
def test_1():
fbx_manager = fbx.FbxManager.Create()
settings = fbx.FbxIOSettings.Create(
fbx_manager, fbx.IOSROOT
)
print settings
root_properties = bfFbxUtils.get_root_properties(settings)
for property in root_properties:
print property.GetName()
inspect_child_properties(property, indent=1)
# fbx_property = settings.GetFirstProperty()
#
# while fbx_property.IsValid():
# print fbx_property.GetName()
#
# fbx_property = settings.GetNextProperty(fbx_property)
class Test2(object):
def __init__(self):
self.fbx_manager = fbx.FbxManager.Create()
self.settings = fbx.FbxIOSettings.Create(
self.fbx_manager, fbx.IOSROOT
)
item_manager = bfQtPropertyItems.FbxPropertyTreeItemManager(self.fbx_manager)
item_manager.set_debug_level(item_manager.LEVELS.mid())
item_manager.set_fbx_object(self.settings)
model = bfQtPropertyModels.BfFbxPropertyModel()
model.set_item_manager(item_manager)
import_property = self.settings.FindProperty("Import")
print import_property, import_property.IsValid()
model.set_root_fbx_property(import_property)
# self._properties_widget = bfQtPropertyWidgets.BfPropertiesWidget(self.fbx_manager)
self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeWidget()
self._properties_widget.set_property_model(model)
# self._properties_widget.set_fbx_object(self.settings)
# for child_property in bfUtils.get_child_properties(import_property):
# print child_property.GetName()
self._properties_widget.show()
if __name__ == "__main__":
# test_1()
app = QtWidgets.QApplication(sys.argv)
test = Test2()
sys.exit(app.exec_())
|
[
"brenainnjordan@googlemail.com"
] |
brenainnjordan@googlemail.com
|
2e94f6588aa5777aec5c16eee7d4ea0ca17dd4ea
|
7882860350c714e6c08368288dab721288b8d9db
|
/2407_조합.py
|
255617b4bcd46fc1eb2cfadaf6a18e1d263ead7a
|
[] |
no_license
|
park-seonju/Algorithm
|
682fca984813a54b92a3f2ab174e4f05a95921a8
|
30e5bcb756e9388693624e8880e57bc92bfda969
|
refs/heads/master
| 2023-08-11T18:23:49.644259
| 2021-09-27T10:07:49
| 2021-09-27T10:07:49
| 388,741,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
n,m=map(int,input().split())
cnt=0
top=1
while cnt<m:
cnt+=1
top*=n
n-=1
for i in range(m,1,-1):
top//=i
print(top)
|
[
"cucu9823@naver.com"
] |
cucu9823@naver.com
|
cb114711c20368e1af3bd2e23487fe45e5301fca
|
47c01a12f776928a041aee309841d5efc1622e18
|
/backend/dailigreen_3612/wsgi.py
|
0497660c86729db74a3c245c4c7812b7635d8e36
|
[] |
no_license
|
crowdbotics-apps/dailigreen-3612
|
2aa28e1197db65be35fe30a6838efe28d1f9890c
|
a38bf4be6eacb40d05cf0eee998bb3f0fdaf451e
|
refs/heads/master
| 2020-05-25T10:02:37.845216
| 2019-05-21T03:04:19
| 2019-05-21T03:04:19
| 187,751,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
"""
WSGI config for dailigreen_3612 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dailigreen_3612.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bd4d17989f1ebd69ef859132dbb57ac3f978ebb0
|
5abb52c3ee859ee5876601025479e9d3214f829f
|
/meiduo/meiduo/apps/contents/migrations/0001_initial.py
|
797d2d8fd8153a0ac6f469e3cd7a43e02e2b0e1a
|
[] |
no_license
|
RapperDream/meiduo-18
|
05ca46628f5575b31d6a0b2115786dd3f0e57f5a
|
d7f5aad879f0e420ac16e577d107236bdec816ee
|
refs/heads/master
| 2020-04-22T02:39:01.099998
| 2019-02-23T14:53:39
| 2019-02-23T14:53:39
| 170,057,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-02-23 07:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('url', models.CharField(max_length=300, verbose_name='内容链接')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='图片')),
('text', models.TextField(blank=True, null=True, verbose_name='内容')),
('sequence', models.IntegerField(verbose_name='排序')),
('status', models.BooleanField(default=True, verbose_name='是否展示')),
],
options={
'verbose_name_plural': '广告内容',
'db_table': 'tb_content',
'verbose_name': '广告内容',
},
),
migrations.CreateModel(
name='ContentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('key', models.CharField(max_length=50, verbose_name='类别键名')),
],
options={
'verbose_name_plural': '广告内容类别',
'db_table': 'tb_content_category',
'verbose_name': '广告内容类别',
},
),
migrations.AddField(
model_name='content',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contents.ContentCategory', verbose_name='类别'),
),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
71fd47eacb61850cec0364459140c654afb663c1
|
d9cc042eacfd65912781aad31a57a2709c1e27e4
|
/platypush/backend/camera/pi.py
|
a595c399c9fe28c4578fef15bb5134b787c138c7
|
[
"MIT"
] |
permissive
|
shineit/platypush
|
914ddb50371cee63669c74f775baf5ae652c9075
|
a67b301cd66235b41bff6bfb3de56b903bf9551d
|
refs/heads/master
| 2020-03-15T04:08:02.349413
| 2018-05-03T00:17:31
| 2018-05-03T00:17:31
| 131,958,375
| 1
| 0
|
MIT
| 2018-05-03T07:31:56
| 2018-05-03T07:31:56
| null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
import logging
import socket
import time
import picamera
from platypush.backend import Backend
class CameraPiBackend(Backend):
def __init__(self, listen_port, x_resolution=640, y_resolution=480,
framerate=24, hflip=False, vflip=False,
sharpness=0, contrast=0, brightness=50,
video_stabilization=False, ISO=0, exposure_compensation=0,
exposure_mode='auto', meter_mode='average', awb_mode='auto',
image_effect='none', color_effects=None, rotation=0,
crop=(0.0, 0.0, 1.0, 1.0), **kwargs):
""" See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options """
super().__init__(**kwargs)
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', self.listen_port))
self.server_socket.listen(0)
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = ISO
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
logging.info('Initialized Pi camera backend')
def send_message(self, msg):
pass
def run(self):
super().run()
while True:
connection = self.server_socket.accept()[0].makefile('wb')
try:
self.camera.start_recording(connection, format='h264')
while True:
self.camera.wait_recording(60)
except ConnectionError as e:
pass
finally:
try:
self.camera.stop_recording()
connection.close()
except:
pass
# vim:sw=4:ts=4:et:
|
[
"blacklight86@gmail.com"
] |
blacklight86@gmail.com
|
9fbe0840c86114c15a28c5123e2a28710abe4740
|
2c32cf726e111b8625265c458feeaea436652e83
|
/pramp-condility-3month/random_O1.py
|
47221cf8aaec833a485f67d72f55e7c969e9e592
|
[] |
no_license
|
minhthe/practice-algorithms-and-data-structures
|
6fa3bf98e8e2fe98f4e32419fb797b1df4400364
|
488a82dd3a0c797859a6c9e1195d6d579d676073
|
refs/heads/master
| 2021-05-16T23:01:20.026475
| 2020-09-23T04:17:13
| 2020-09-23T04:17:13
| 250,505,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
'''https://leetcode.com/problems/insert-delete-getrandom-o1'''
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.arr = []
self.mp = {}
self.cnt = 0
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.mp:
return False
else:
self.mp[val] = self.cnt
self.arr.append(val)
self.cnt +=1
# print(self.arr, self.mp, self.cnt )
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.mp:
# 1 2 3 4 5
idx = self.mp[val]
tmp = self.arr[-1]
self.arr[idx] = tmp
self.mp[tmp] = idx
del self.mp[val]
self.arr.pop()
self.cnt-=1
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return self.arr[ random.randint(0, self.cnt-1) ]
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
|
[
"minhthe.007@gmail.com"
] |
minhthe.007@gmail.com
|
7ec4ecdfa91bbe1ffc8d0703684dbf39db4898ca
|
535503dc18c38b92f8520289da5b4fa42b0a722a
|
/code/exp_control/sequencer/sequences/obsolete/general_pulses2.py
|
dc379fb427becc1d61c5d12714b867915e729148
|
[] |
no_license
|
jamesbate/phd_code
|
fbbbf7657c428a0a1f18768edca1dfce56801cc1
|
7e71d7f041835497fb421dd741c644ab5c8e3805
|
refs/heads/master
| 2023-05-07T10:31:22.168217
| 2021-05-26T15:00:40
| 2021-05-26T15:00:40
| 371,073,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
# 729 pulse experiment
# 16.2.06 TK
<VARIABLES>
# test-tom=self.set_variable("float","test-tom",9000.123456,0.01,2e7)
phase1=self.set_variable("float","phase1",0,-10,10)
phase2=self.set_variable("float","phase2",0,-10,10)
gl_cam_time=self.set_variable("float","gl_cam_time",5000.000000,0,2e7)
det_time=self.set_variable("float","det_time",5000.000000,0.01,2e7)
doppler_length=self.set_variable("float","doppler_length",3000.000000,1,2e5)
pump_length=self.set_variable("float","pump_length",100.000000,1,2e5)
pump_length_729=self.set_variable("float","pump_length_729",500.000000,1,2e5)
pulse_3=self.set_variable("bool","pulse_3",0)
pulse_4=self.set_variable("bool","pulse_4",0)
</VARIABLES>
# The save form specifies which data will be saved and how, when a scan is performed.
# If this is omitted a standard form is used
<SAVE FORM>
.dat ; %1.6f
meanExc; 0; %1.3f
parity; 0; %1.3f
pn; 1;elements; (0:N); %1.3f
StartTime; 0; %1.3f
StopTime; 0; %1.3f
PMTcounts; 1;elements; (0:N); %1.0f
</SAVE FORM>
<TRANSITIONS>
t_carr={1 : 1.0, 2: 1.0, 3 : 1.0}
#Carrier=transition(transition_name="Carrier",t_rabi=t_carr,
# frequency=freq,sweeprange=sspan,amplitude=power_dB,slope_type="blackman",
# slope_duration=slope_dur,amplitude2=-1,frequency2=0,port=port_nr)
#set_transition(Carrier,"729")
</TRANSITIONS>
# Here the sequence can override program parameters. Syntax follows from "Write Token to Params.vi"
<PARAMS OVERRIDE>
AcquisitionMode excitation
</PARAMS OVERRIDE>
<SEQUENCE>
incl.DopplerCooling40(doppler_length,repump_length)
if opt_pumping : incl.OpticalPumping40(pump_length)
if sb_cool : incl.SBCooling40(SBCool_time,SBCool_reps)
if opt_pump_729 : incl.OpticalPumping40_729(pump_length_729)
else : seq_wait(pump_length_729)
seq_wait(700)
if pulse_1 : rf_729(1,0.5,0,"carrier1")
if pulse_2 : rf_729(1,1,0,"gate")
seq_wait(wait_time)
if pulse_3 : rf_729(1,1,phase2*math.pi,"gate")
if pulse_4 : rf_729(1,0.5,phase1*math.pi,"carrier1")
incl.PMTDetection(det_time,gl_cam_time)
</SEQUENCE>
<AUTHORED BY LABVIEW>
3
</AUTHORED BY LABVIEW>
|
[
"james.bate@oriel.ox.ac.uk"
] |
james.bate@oriel.ox.ac.uk
|
111e59e47e462a20ed1faf6392b577cdf83b5911
|
df8466745f40b2966c69744a78dce62e46e69503
|
/AULA7/exercicios/portas_006.py
|
b8e84668c02f01686b5f4f59a256b9507ef0ebb1
|
[] |
no_license
|
sandromelobrazil/YELLOW_aula1
|
42f0e8ffa673a5f95d6ee6330ef1d2b894339a14
|
c078fbe235979f365f72c6f160462b394a44765c
|
refs/heads/master
| 2023-02-05T00:52:43.719881
| 2021-08-10T16:00:03
| 2021-08-10T16:00:03
| 300,909,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
#!/usr/bin/python
portlist = set()
porteste1 = [ '11-443', 'iE100', 'XYZ','1000', '30', '111', '11', '11', '11', '25', '22', '21', '22', '22', 'Zzz' ]
def testaporta(_ports):
if _port.isdigit():
portlist.add(int(_port))
elif '-' in _port:
portainicial, portafinal = _port.split('-')
for _rangeport in range(int(portainicial), int(portafinal)+1):
portlist.add(int(_rangeport))
else:
pass
for _port in porteste1:
testaporta(_port)
print('=' * 50)
print(portlist)
print('=' * 50)
print('=' * 50)
print('=' * 50)
portlist2 = [ str(port) for port in portlist ]
listport = ','.join(portlist2)
print(listport)
print('=' * 50)
print('=' * 50)
|
[
"sandromelo.brazil@gmail.com"
] |
sandromelo.brazil@gmail.com
|
11c7d56d8a24de673cf13849d9d7c1be58c71029
|
73b158f51285300c1d3456b7af9163939ee206f2
|
/DevOps/sprint03/t01_clear_words/clear_words.py
|
edc4ac2b62b175289d0754c33a8097cc31331b4f
|
[] |
no_license
|
nnocturnnn/DevOps
|
2e332b3552a5b294b36d2af7de854aa18f2da46f
|
173c75938e65be8fbbb5c02c3d655d09df9a2931
|
refs/heads/master
| 2023-06-11T07:21:14.097930
| 2021-06-30T13:58:15
| 2021-06-30T13:58:15
| 352,070,911
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
import re
def re_sub_clean(string):
return re.sub(r" |\?|!|\.|:|;|,|-","",string)
def clear_words(s):
return list(map(re_sub_clean, s.split()))
|
[
"vikchehovich@gmail.com"
] |
vikchehovich@gmail.com
|
4d943666d27082a916fe19297f81966b1985dcc9
|
72dc7d124cdac8f2dcab3f72e95e9a646154a6a0
|
/byceps/services/shop/order/models/order_item.py
|
bdf7f5515c1aaaa9557530f6c771a730c2c0e82d
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
m-ober/byceps
|
e6569802ee76e8d81b892f1f547881010359e416
|
4d0d43446f3f86a7888ed55395bc2aba58eb52d5
|
refs/heads/master
| 2020-11-30T23:31:33.944870
| 2020-02-12T23:53:55
| 2020-02-12T23:56:04
| 40,315,983
| 0
| 0
| null | 2015-08-06T16:41:36
| 2015-08-06T16:41:36
| null |
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
"""
byceps.services.shop.order.models.order_item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from decimal import Decimal
from .....database import db, generate_uuid
from ...article.models.article import Article
from ...article.transfer.models import ArticleNumber
from ..transfer.models import OrderItem as OrderItemTransferObject
from .order import Order
class OrderItem(db.Model):
"""An item that belongs to an order."""
__tablename__ = 'shop_order_items'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
order_number = db.Column(db.UnicodeText, db.ForeignKey('shop_orders.order_number'), index=True, nullable=False)
order = db.relationship(Order, backref='items')
article_number = db.Column(db.UnicodeText, db.ForeignKey('shop_articles.item_number'), index=True, nullable=False)
article = db.relationship(Article, backref='order_items')
description = db.Column(db.UnicodeText, nullable=False)
unit_price = db.Column(db.Numeric(6, 2), nullable=False)
tax_rate = db.Column(db.Numeric(3, 3), nullable=False)
quantity = db.Column(db.Integer, db.CheckConstraint('quantity > 0'), nullable=False)
line_amount = db.Column(db.Numeric(7, 2), nullable=False)
shipping_required = db.Column(db.Boolean, nullable=False)
def __init__(
self,
order: Order,
article_number: ArticleNumber,
description: str,
unit_price: Decimal,
tax_rate: Decimal,
quantity: int,
line_amount: Decimal,
shipping_required: bool,
) -> None:
# Require order instance rather than order number as argument
# because order items are created together with the order – and
# until the order is created, there is no order number assigned.
self.order = order
self.article_number = article_number
self.description = description
self.unit_price = unit_price
self.tax_rate = tax_rate
self.quantity = quantity
self.line_amount = line_amount
self.shipping_required = shipping_required
def to_transfer_object(self) -> OrderItemTransferObject:
return OrderItemTransferObject(
self.order_number,
self.article_number,
self.description,
self.unit_price,
self.tax_rate,
self.quantity,
self.line_amount,
)
|
[
"homework@nwsnet.de"
] |
homework@nwsnet.de
|
bcbfda9b43d7eb4d22855efabe65b5b75b4b22a1
|
1dcea2a511f14a43701994f6a7785afd21a20d74
|
/Algorithm/389_FindTheDifference.py
|
429149c653494ecf639efcc4a8d316ffea12735d
|
[] |
no_license
|
lingtianwan/Leetcode2
|
66031e256a2928c6197516f83f14748c52e91b8c
|
80a604cc09d5d2d62dd05157d8b829de675e4404
|
refs/heads/master
| 2021-01-13T11:17:18.238465
| 2017-02-09T01:43:38
| 2017-02-09T01:43:38
| 81,395,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Given two strings s and t which consist of only lowercase letters.
#
# String t is generated by random shuffling string s and then add one more letter at a random position.
#
# Find the letter that was added in t.
#
# Example:
#
# Input:
# s = "abcd"
# t = "abcde"
#
# Output:
# e
#
# Explanation:
# 'e' is the letter that was added.
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
s_map = collections.Counter(s)
t_map = collections.Counter(t)
for key, val in t_map.items():
if s_map.get(key, 0) != val:
return key
|
[
"lingtian.wan@gmail.com"
] |
lingtian.wan@gmail.com
|
33a4fb3e793637a89656c533d9a60bdc7161a7de
|
0f1849a97b00fefef26756884d1410df2bd3e484
|
/app/color.py
|
ea0f4e4ffb55d6867d91250f186479d3bd4248f2
|
[
"Apache-2.0"
] |
permissive
|
yumioi/ci_edit
|
62170f8e429f7183cea4f122112c790e3851b210
|
5af80d643e7b16e5e3270771bdbc6b322255d460
|
refs/heads/master
| 2020-04-09T06:16:07.197122
| 2018-11-28T18:55:10
| 2018-11-28T18:55:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import app.prefs
colors = 256
cache__ = {}
def reset():
global cache__
cache__ = {}
def get(colorType, delta=0):
global cache__
if type(colorType) == type(0):
colorIndex = colorType
else:
colorIndex = app.prefs.color[colorType]
colorIndex = min(colors - 1, colorIndex + delta)
color = cache__.get(colorIndex) or curses.color_pair(colorIndex)
cache__[colorIndex] = color
if colorType in ('error', 'misspelling'):
color |= curses.A_BOLD | curses.A_REVERSE
return color
|
[
"dschuyler@chromium.org"
] |
dschuyler@chromium.org
|
6487f8748c0f588a0cf9da0085b7944535d7e7f5
|
0d298b3aff627883b2eed85516bed7e61f174d74
|
/bin/delete_downstream.py
|
1754ee3365c81b8aba8b74ef4d7d8a1bd33065c9
|
[
"BSD-3-Clause"
] |
permissive
|
ellongley/TXPipe
|
be05bec21181a0d1e13cb2f3dcc2f55633a62369
|
785f93e876a00c528379b1bbacbbf5c42585b789
|
refs/heads/master
| 2023-08-21T13:53:50.732723
| 2021-10-04T14:28:06
| 2021-10-04T14:28:06
| 416,102,676
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
"""
This script prints out the commands to delete all files generated by a pipeline,
downstream of a specified stage.
If one stage was wrong, and you need to re-run everything it affected, this script
will print out the commands to delete the relevant files to that re-running the pipeline
with resume=True will re-run the correct stages.
"""
import sys
sys.path.append('.')
import ceci
import txpipe
import yaml
import collections
import os
# start from a config file and a stage to delete
config = yaml.safe_load(open(sys.argv[1]))
stage_to_delete = sys.argv[2]
# get the stages we need
stage_names = [s['name'] for s in config['stages']]
pipeline = ceci.Pipeline(config['stages'], None)
stages = [ceci.PipelineStage.get_stage(stage_name) for stage_name in stage_names]
# build the mapping tag => stages depending on that tag
dependencies = collections.defaultdict(list)
for stage in stages:
for tag in stage.input_tags():
dependencies[tag].append(stage)
# initialize with deletng one stage and the tags it makes
tags_to_delete = ceci.PipelineStage.get_stage(stage_to_delete).output_tags()
stages_to_delete = {stage}
# loop through nstage times (the maximum it could be)
for i in range(len(stage_names)):
# take all tags we currently know we have to delete
for tag in tags_to_delete[:]:
# find out which stages to clear because they need
# this tag which we are deleting
deps = set(dependencies[tag])
for s in stages:
if s in deps:
# if we need to delete this stage,
# add its outputs to the tags to delete
tags_to_delete += s.output_tags()
# and it to the stages to delete
stages_to_delete.add(s)
tags_to_delete = list(set(tags_to_delete))
# now at the end we delete all tags output by stage to delete
for s in stages_to_delete:
for f in pipeline.find_outputs(s, config).values():
print(f"rm -f {f}")
|
[
"joezuntz@googlemail.com"
] |
joezuntz@googlemail.com
|
b642949b0d7101af9212e049ba6f11ca4f2ae132
|
08f087f8d07aac2f93f5adccc48bfa7172e7ae6d
|
/trash/servers_action.py
|
a5b17eb341b9844e549ba21b6bf4cbc71a54c880
|
[] |
no_license
|
bsdpunk/trash
|
78a49e2255dbffb212e227ea6fe1d5fbbbf60158
|
2a1f0f1eb4456d9c5ca418a34e94fa61a88f00b6
|
refs/heads/master
| 2020-04-12T09:34:35.638883
| 2017-01-05T02:53:26
| 2017-01-05T02:53:26
| 64,771,900
| 8
| 2
| null | 2016-09-02T19:52:41
| 2016-08-02T16:05:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,432
|
py
|
import sys
import requests
import json
import re
from pprint import pprint
def linode_list(api_key):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_list_ip(api_key, arguement=0):
headers = {'content-type': 'application/json'}
ip_addy = re.compile('(\d+|\d)\.(\d+|\d)\.(\d+|\d)\.(\d+|\d)')
lin_name = re.compile('(\d+)')
#print(arguement)
if(arguement == 0):
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.ip.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
elif re.match(ip_addy, arguement) is not None:
json_data = "not implimented yet"
elif re.match(lin_name, arguement) is not None:
p = re.match(lin_name, arguement)
lin_id = p.group(1)
#print(lin_id)
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.ip.list&LinodeID="+ lin_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
#pprint(json_data)
json_data = json_data["DATA"][0]["IPADDRESS"]
else:
json_data = "Invalid"
return(json_data)
def linode_create(api_key, dc_id, plan_id, pay_term_id=0):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.create&DatacenterID="+ dc_id +"&PlanID=" +plan_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_disk_create(api_key, l_id, size, dst_id=0, root=0, label=0, formatt=0, ro=0):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.create&DatacenterID="+ dc_id +"&PlanID=" +plan_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_disk_dist(api_key, l_id, dst_id, label, size, root, ssh_key=0):
headers = {'content-type': 'application/json'}
if ssh_key == 0:
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&LinodeID="+ l_id +"&DistributionID=" +dst_id+"&Label="+label+"&Size="+size+"&rootPass="+root
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
else:
json_data = "Invalid"
return(json_data)
#def linode_config_create(api_key, l_id, k_id, label, size, root, ssh_key=0):
# headers = {'content-type': 'application/json'}
# if ssh_key == 0:
# endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&LinodeID="+ l_id +"&DistributionID=" +dst_id+"&Label="+label+"&Size="+size+"&rootPass="+root
# r = requests.get(endpoint, headers=headers)
# json_data = json.loads(r.text)
# else:
# json_data = "Invalid"
# return(json_data)
#def linode_disk_image(api_key, i_id, l_id, label, size, root, ssh_key=0):
# headers = {'content-type': 'application/json'}
# if ssh_key == 0:
# endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&i_id="+i_id+"&LinodeID="+ l_id+"&Label="+label+"&Size="+size+"&rootPass="+root
#
# r = requests.get(endpoint, headers=headers)
# json_data = json.loads(r.text)
# else:
# json_data = "Invalid"
# return(json_data)
def list_images(api_key):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=image.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_shutdown(api_key, numeric_lin_id):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.shutdown&LinodeID="+ numeric_lin_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
|
[
"bsdpunk@gmail.com"
] |
bsdpunk@gmail.com
|
b696eccd4a16cc676cb7faa3e3ac14ad5a53df25
|
d2da19b86a134c4213311af568c7166bd10fbc8a
|
/OGBL_Collab/utils/logger.py
|
3daed2535eae606a66945ac26b1f14d17234d746
|
[
"MIT"
] |
permissive
|
x-zho14/Unified-LTH-GNN
|
bc96f89e6bdff18c45cc050de2cbdee8d425fcbf
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
refs/heads/main
| 2023-07-19T22:06:57.653044
| 2021-08-22T03:44:53
| 2021-08-22T03:44:53
| 398,705,892
| 0
| 0
|
MIT
| 2021-08-22T03:33:27
| 2021-08-22T03:33:27
| null |
UTF-8
|
Python
| false
| false
| 891
|
py
|
import os
import shutil
import csv
def save_best_result(list_of_dict, file_name, dir_path='best_result'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print("Directory ", dir_path, " is created.")
csv_file_name = '{}/{}.csv'.format(dir_path, file_name)
with open(csv_file_name, 'a+') as csv_file:
csv_writer = csv.writer(csv_file)
for _ in range(len(list_of_dict)):
csv_writer.writerow(list_of_dict[_].values())
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
# if scripts_to_save is not None:
# os.mkdir(os.path.join(path, 'scripts'))
# for script in scripts_to_save:
# dst_file = os.path.join(path, 'scripts', os.path.basename(script))
# shutil.copyfile(script, dst_file)
|
[
"wiwjp619@gmail.com"
] |
wiwjp619@gmail.com
|
ce27d4f1720ab3ca03e03f54a6859697aa0b7d55
|
8cd098de61bc569e4247d15ca25bfbd0fb9ef793
|
/01_Jump_to_Python/Chap04/149.py
|
a55e73979cb20208806cdd471eb9977af696e106
|
[] |
no_license
|
gaeunPark/Bigdata
|
67b8df7a4988f130ed7b45750c5447cf0394c49b
|
31388b2ac3a28306caae6d299bb5090aef849403
|
refs/heads/master
| 2020-03-15T09:27:23.537060
| 2018-11-22T02:18:45
| 2018-11-22T02:18:45
| 132,075,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
def sum_and_num(a,b):
return a+b, a*b
result = sum_and_num(3,4)
print(result)
sum, mul = sum_and_num(3,4)
print("%d %d" %(sum, mul))
|
[
"USER@test.com"
] |
USER@test.com
|
c46a1fd73901ca7b1756224e155dcb5e1ee69bf9
|
1625edfe28b4b0979fd32b4a3c5e55249a993fd5
|
/baekjoon2953.py
|
dec2a6a3720be7c3939a31f421f2c940a1e4d1cc
|
[] |
no_license
|
beOk91/baekjoon2
|
b8bf504c506c6278899d4107ecfe51974ef13f5e
|
39569f8effb8e32405a7d74d98bdabcab783ec56
|
refs/heads/master
| 2023-05-11T20:11:19.015113
| 2020-09-14T23:58:49
| 2020-09-14T23:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
arr=[[0]*5 for _ in range(5)]
arr2=[]
for i in range(5):
arr=list(map(int,input().strip().split()))
arr2.append(sum(arr))
print(arr2.index(max(arr2))+1,max(arr2))
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
f8edeab01ab18d1ec01ca83f1c589413af276fb3
|
2abefb9b5df4a5fdd782ac43cd1f6e94198fe43a
|
/Misc/HoWdArEyOu.py
|
b09e270fc5a17a017c97dade02711916257adef0
|
[] |
no_license
|
jb1361/Class-files-repo
|
e63d339fd9c9add23b78571b2258d6c836c329d4
|
b29b80a93655348067a5146a6c7d2d5186ba184a
|
refs/heads/master
| 2023-01-30T09:38:45.786561
| 2021-06-14T22:42:04
| 2021-06-14T22:42:04
| 92,340,613
| 0
| 0
| null | 2023-01-06T11:20:52
| 2017-05-24T22:05:32
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
word = input()
def howdareyou(s):
ret = ""
i = True # capitalize
for char in s:
if i:
ret += char.upper()
else:
ret += char.lower()
if char != ' ':
i = not i
return ret
print(howdareyou(word))
|
[
"justinbutler4@hotmail.com"
] |
justinbutler4@hotmail.com
|
951292454e8c84b2863f62cb932ec16fb27245f0
|
6dc80929dc2ef3dfdbde4f95fece88e68d4aa4aa
|
/catalog/migrations/0004_auto_20190623_2357.py
|
8bf172be2fb269d620f2dd42f01d270a9062bb65
|
[] |
no_license
|
shortnd/local_library
|
87d912cd885919d4f9db73cbef058ed7bccd6d11
|
5a8753131193183c81c448d36064c7481843da4e
|
refs/heads/master
| 2022-02-22T07:15:06.096747
| 2019-06-24T04:02:27
| 2019-06-24T04:02:27
| 190,504,303
| 0
| 0
| null | 2021-06-10T18:11:55
| 2019-06-06T03:00:23
|
Python
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 2.2.2 on 2019-06-24 03:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_bookinstance_borrower'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back'], 'permissions': (('can_mark_returned', 'Set book as returned'),)},
),
]
|
[
"ococncol@gmail.com"
] |
ococncol@gmail.com
|
e14b578b7338dca6ead87edcb5371d81b75e155b
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/498245_LRU_and_LFU_cache_decorators/recipe-498245.py
|
9dd9b93cee6c70ce3a39cb4159b952253e391c9e
|
[
"Python-2.0",
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,183
|
py
|
import collections
import functools
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorating_function(user_function,
len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
'''Least-frequenty-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorating_function(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwd_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
use_count[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least frequently used cache entry
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize // 10,
use_count.iteritems(),
key=itemgetter(1)):
del cache[key], use_count[key]
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
if __name__ == '__main__':
@lru_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
@lfu_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
60d9dd78e5b4036bf310616dd0cb6ccda32f0d3c
|
767b5482f3c5b9c2c85575c711e37561f5b8f198
|
/engine/plugins/TrojanCheckScript_yd.py
|
914bad369a45d68fa5f1bc5d0bb50d9fc094b964
|
[] |
no_license
|
zhupite233/scaner
|
8e39c903f295d06195be20067043087ec8baac4f
|
7c29c02bca2247a82bcbb91cc86955cc27998c95
|
refs/heads/master
| 2020-05-18T03:23:03.459222
| 2019-04-15T04:29:10
| 2019-04-15T04:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,165
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb
import MySQLdb.cursors
from engine.engineConfig import *
from engine.engine_utils.common import *
from engine.logger import scanLogger as logger
from urlparse import urlparse
from bs4 import BeautifulSoup
def find_friend_links(content):
friend_link_list = []
soup = BeautifulSoup(content, 'lxml')
friends = soup.find(text=re.compile(u'.*?(友情链接|合作伙伴).*?'))
if not friends:
return []
i = 0
while not friends.find_all('a') and i < 4:
try:
friends = friends.parent
except:
pass
i += 1
for friend in friends.find_all('a'):
friend_link = friend.get('href')
if friend_link:
net_loc = urlparse(friend_link).netloc
if net_loc and not re.match('^(\d{1,3}\.){3}\d{1,3}(:\d{1,6})?$', net_loc): # ip地址不是友情链接
friend_link_list.append(net_loc)
return friend_link_list
def run_domain(http, ob):
'''
黑链暗链检测插件
'''
result = []
try:
scheme = ob.get('scheme')
domain = ob.get('domain')
path = ob.get('path')
res, content = http.request('%s://%s%s' % (scheme, domain, path))
friend_link_list = find_friend_links(content)
friend_link_set = set(friend_link_list)
task_id = ob.get('taskId')
# type = 1 表示域名与当前扫描网站不符
sql = "SELECT spider_url_other.url FROM spider_url_other WHERE task_id=%s AND TYPE=%s" % (task_id, 1)
db = MySQLdb.connect(SCANER_DB_HOST, SCANER_DB_USER, SCANER_DB_PASSWORD, SCANER_DB_DATABASE, cursorclass=MySQLdb.cursors.DictCursor)
cursor = db.cursor()
cursor.execute(sql)
other_url_list = cursor.fetchall()
if other_url_list:
detail = '检测到外站链接,如果不是友情链接或其他已知来源,则可能是暗链黑链等恶意链接'
for other_url_dict in other_url_list:
other_url = other_url_dict.get('url')
other_domain = urlparse(other_url).netloc
other_domain = other_domain.split(':')[0]
if other_domain.split('.', 1)[1] == domain.split('.', 1)[1]: # 子域名
continue
if other_domain not in friend_link_set: # 不在友情链接内
result.append(getRecord(ob, other_url, ob['level'], detail, request=other_url, response=''))
except Exception, e:
logger.error("File:TrojanCheckScript_yd.py, run_domain function :%s" % (str(e)))
return result
# result = []
# domain = ob['domain']
# try:
# task_id = ob['taskId']
# # other_urls = db.session.query(SpiderUrlOther.url).filter(SpiderUrlOther.task_id == task_id, SpiderUrlOther.type == 1).all()
# sql = "SELECT spider_url_other.url FROM spider_url_other WHERE task_id=%s AND TYPE=%s" % (task_id, 1)
# db = MySQLdb.connect(SCANER_DB_HOST, SCANER_DB_USER, SCANER_DB_PASSWORD, SCANER_DB_DATABASE, cursorclass=MySQLdb.cursors.DictCursor)
# cursor = db.cursor()
# cursor.execute(sql)
# other_url_list = cursor.fetchmany()
# if other_url_list:
# for other_url_dict in other_url_list:
# other_url = other_url_dict.get('url')
# other_domain = urlparse(other_url).netloc
# other_domain = other_domain.split(':')[0]
# if domain.find(other_domain) == -1 and other_domain.find(domain) == -1 and domain.find(other_domain.split('.', 1)[1]) == -1:
# detail = '检测到外站链接,如果不是友情链接或其他已知来源,则可能是暗链黑链木马等恶意链接'
# # res, content = http.request(other_url.url,"HEAD")
# # request = getRequest(other_url)
# result.append(getRecord(ob, other_url, ob['level'], detail, request=other_url, response=''))
# except Exception, e:
# logger.error("File:TrojanCheckScript_yd.py, run_domain function :%s" % (str(e)))
#
# return result
|
[
"lixiang@yundun.com"
] |
lixiang@yundun.com
|
f639a2910f5084d10f088248ae867f33930c1c51
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/wiColorJ/pyr_Tcrop255_p60_j15/Sob_k23_s001/pyr_0s/L3/step10_a.py
|
89256e72274b6f1679ebbb7044cb26830e0dc4c7
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,983
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k23_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
9ab9ef75853c91fce2d10e650eae0aec3515ce68
|
34792ccd315338b2bd12b4251cc8188967bfaf35
|
/linkdump/migrations/versions/e510de2bd585_.py
|
ac2a3c5327709abe4232c408efd0c3f024090750
|
[] |
no_license
|
puhoy/linkdump
|
a32dec709e10e8dda68c1dfbb326d9177d64dd96
|
ba03ef64c4286910ac1bb15fea8d4c431a738c2f
|
refs/heads/master
| 2022-12-13T20:35:41.913012
| 2020-07-26T12:55:22
| 2020-07-26T12:55:22
| 245,454,321
| 0
| 0
| null | 2022-12-08T03:45:01
| 2020-03-06T15:29:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
"""empty message
Revision ID: e510de2bd585
Revises:
Create Date: 2020-03-03 23:24:51.952624
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e510de2bd585'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('source', sa.String(), nullable=False),
sa.Column('date_added', sa.Date(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('date_processing_started', sa.DateTime(), nullable=True),
sa.Column('date_processing_finished', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_items')),
sa.UniqueConstraint('source', 'date_added', name='_source_at_date')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_users')),
sa.UniqueConstraint('email', name=op.f('uq_users_email')),
sa.UniqueConstraint('username', name=op.f('uq_users_username'))
)
op.create_table('bookmarks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.Column('time_added', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['item_id'], ['items.id'], name=op.f('fk_bookmarks_item_id_items')),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_bookmarks_user_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_bookmarks'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('bookmarks')
op.drop_table('users')
op.drop_table('items')
# ### end Alembic commands ###
|
[
"jan@kwoh.de"
] |
jan@kwoh.de
|
b7ba385ad28955eb47603abdc1dda3b37f3712c6
|
b5a29700c3516cf12f837e2284e3844546205d09
|
/plugins/wywwzjj_web_plugin.py
|
dcec14bf5b7494fcb6a82521181cf0d1fd25b8e7
|
[] |
no_license
|
p1g3/Collect-Info-Research
|
f609823486f36460186cfde27f4be7c9c5a058ae
|
e8e7366677a8642c3bcf4b103e43378762e6673c
|
refs/heads/master
| 2020-12-24T03:59:01.190032
| 2020-01-31T06:47:35
| 2020-01-31T06:47:35
| 237,374,792
| 37
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
import asyncio
import feedparser
import ssl
import pymongo
from loguru import logger
import datetime
from dateutil import parser
class wywwzjj_web_plugin:
def __init__(self,loop,collection,lock):
ssl._create_default_https_context = ssl._create_unverified_context
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
self.loop = loop
self.rss = 'https://wywwzjj.top/atom.xml'
self.collection = collection
self.type = 'web'
self.lock = lock
async def return_result(self):
logger.info("{} is running.",self.__class__.__name__)
future = self.loop.run_in_executor(None,feedparser.parse,self.rss)
try:
parse_result = await asyncio.wait_for(future, 30, loop=self.loop)
except:
logger.warning("{} parse time out".format(self.rss))
return
if parse_result.has_key('entries'):
entries = parse_result['entries']
format_time = datetime.date.today()
for entrie in entries:
article_time = parser.parse(entrie['updated'])
if (article_time.year == format_time.year) and (article_time.month == format_time.month) and (article_time.day == format_time.day):
add_dict = {'type':self.type,'title':entrie['title'],'link':entrie['link'],'is_send':0}
try:
await self.lock
if self.collection.count_documents({'link':entrie['link']}) < 1:
self.collection.insert_one(add_dict)
logger.info('[Web] {} {}'.format(entrie['title'],entrie['link']))
finally:
self.lock.release()
else:
logger.error('[Error Parse] {}',self.rss)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.info_collect
collection = db['infos']
lock = asyncio.Lock()
loop = asyncio.get_event_loop()
class_name = wywwzjj_web_plugin(loop,collection,lock)
loop.run_until_complete(class_name.return_result())
|
[
"p1g3cyx@gmail.com"
] |
p1g3cyx@gmail.com
|
7f3012a25338bf5c6082fc3d3a1a539d859a756c
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_7/GDS/shamil_v3/purchase_report/report/purchase_total.py
|
cef7ba15b5a6a9cdb3f2d74d780ea138dbd3257e
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269
| 2018-12-20T06:00:00
| 2018-12-20T06:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,820
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2011-2012 NCTR (<http://www.nctr.sd>).
#
##############################################################################
import time
from report import report_sxw
# purchases total report
class purchase_total_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(purchase_total_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'line':self._getdata,
})
def _getdata(self,data):
product=data['form']['product']
if not product:
self.cr.execute("""
select
min(l.id) as id,
min(u.name) as uom_name,
min(l.name) as product_name,
sum(l.product_qty) as quantity,
count(*) as nbr,
(min(l.price_unit)*sum(l.product_qty))::decimal(16,2) as price_total
from purchase_order s
left join purchase_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_uom u on (u.id=l.product_uom)
where s.state='done'and
(to_char(s.date_approve,'YYYY-mm-dd')>=%s and to_char(s.date_approve,'YYYY-mm-dd')<=%s)
group by
l.product_id
""",(data['form']['from_date'],data['form']['to_date']))
else:
self.cr.execute("""
select
min(l.id) as id,
min(u.name) as uom_name,
min(l.name) as product_name,
sum(l.product_qty) as quantity,
count(*) as nbr,
(min(l.price_unit)*sum(l.product_qty))::decimal(16,2) as price_total
from purchase_order s
left join purchase_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_uom u on (u.id=l.product_uom)
where l.product_id is not null and s.state='done'and
(to_char(s.date_approve,'YYYY-mm-dd')>=%s and to_char(s.date_approve,'YYYY-mm-dd')<=%s)
and p.id = %s
group by
l.product_id
""",(data['form']['from_date'],data['form']['to_date'],product[0]))
res = self.cr.dictfetchall()
return res
report_sxw.report_sxw('report.purchase_total.report', 'purchase.order', 'addons/purchase_report/report/purchase_total.rml' ,parser=purchase_total_report )
|
[
"bakry@exp-sa.com"
] |
bakry@exp-sa.com
|
71ded94d8ec5f45f496da5eed363bb9ae2d74e09
|
fc00b177802c49cf04dc6a8e430093bc14ae9b53
|
/venv/Lib/site-packages/mypyc/test/test_tuplename.py
|
7f3fd2000d29058fac183cb8157e9c118dc775c8
|
[] |
permissive
|
artisakov/vigilant-journey
|
9c8264d36da5745374a0d08b0b0288a70f978a11
|
4fed9026071a64489d26422ba7cd1a9b9cb05e16
|
refs/heads/master
| 2022-11-16T03:10:06.418221
| 2020-07-16T07:33:06
| 2020-07-16T07:33:06
| 238,490,887
| 0
| 1
|
MIT
| 2020-03-01T10:12:22
| 2020-02-05T16:03:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 974
|
py
|
import unittest
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, int_rprimitive, bool_rprimitive, list_rprimitive,
RInstance, RUnion,
)
from mypyc.ir.class_ir import ClassIR
class TestTupleNames(unittest.TestCase):
def setUp(self) -> None:
self.inst_a = RInstance(ClassIR('A', '__main__'))
self.inst_b = RInstance(ClassIR('B', '__main__'))
def test_names(self) -> None:
assert RTuple([int_rprimitive, int_rprimitive]).unique_id == "T2II"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_a]).unique_id == "T3OOO"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_b]).unique_id == "T3OOO"
assert RTuple([]).unique_id == "T0"
assert RTuple([RTuple([]),
RTuple([int_rprimitive, int_rprimitive])]).unique_id == "T2T0T2II"
assert RTuple([bool_rprimitive,
RUnion([bool_rprimitive, int_rprimitive])]).unique_id == "T2CO"
|
[
"60698561+artisakov@users.noreply.github.com"
] |
60698561+artisakov@users.noreply.github.com
|
ca252f50d847acea12ba55205c8fe4b0e04c4158
|
75a009b47851ebe8879a61d716bb64c0b1fc4a0d
|
/app/config/settings/dev.py
|
2f02da674fdba7c55f4db0d65627fc5a10b2dc4e
|
[] |
no_license
|
smallbee3/Airbnb
|
c55bf97f5ff16105328d44c485f6d2c017c90ea7
|
e8f79c15055c53e51fa2b3be549f9896680f63a7
|
refs/heads/master
| 2020-03-07T13:37:04.668580
| 2018-04-01T22:51:15
| 2018-04-01T22:51:15
| 127,505,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from .base import *
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS += [
'django_extensions',
]
SECRETS_DEV = os.path.join(SECRETS_DIR, 'dev.json')
secrets_dev = json.loads(open(SECRETS_DEV, 'rt').read())
# DATABASES = secrets_dev['DATABASES']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
[
"smallbee3@gmail.com"
] |
smallbee3@gmail.com
|
4dfefd806d30ea59c8d44aa2cec59209bc551910
|
5bb1ae9b9e6592def632b8a95def32b3a2d742d5
|
/headfirst/ch6_sarah.py
|
75490cb66f1cb9f23363d8e7c9377d5feff910a7
|
[] |
no_license
|
fiso0/my_python
|
af1132637a4ad92036ea0a949fa93df6f904b190
|
391def01ecdb97b8e3008235910a596bb5a9b52c
|
refs/heads/master
| 2021-01-17T15:52:36.745999
| 2016-10-29T08:37:51
| 2016-10-29T08:37:51
| 58,641,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return(time_string)
(mins,secs) = time_string.split(splitter)
return(mins+'.'+secs)
def get_coach_data(file_name):
try:
with open(file_name) as f:
data = f.readline().strip().split(',')
mydict = {'name' : data.pop(0), \
'dob' : data.pop(0), \
'tops' : str(sorted(set([sanitize(t) for t in data]))[0:3])}
return(mydict)
except IOError as ioerr:
print('File error:' + str(ioerr))
return(None)
sarah = get_coach_data('sarah2.txt')
print(sarah['name'] + "'s fastest times are: " + sarah['tops'])
|
[
"fiso0@126.com"
] |
fiso0@126.com
|
2693632938ac97d96d8d251bfd165cd7dca0f66f
|
428ca6903cc085a0ff51d3d0d85e757bed412330
|
/accounts/migrations/0011_auto_20190310_1744.py
|
71f9a82d8c822d70c5f6b8dbda3ed92e671a997e
|
[] |
no_license
|
privalytics/privalytics.io
|
a261603f51bcf7ec5c8946de88bb240ef1e76666
|
6f5121c798656bc6c6993e873ea56e77fa254a1d
|
refs/heads/master
| 2021-08-07T02:47:50.708936
| 2021-06-20T08:55:28
| 2021-06-20T08:55:28
| 171,507,151
| 4
| 5
| null | 2021-06-20T08:55:47
| 2019-02-19T16:17:16
|
CSS
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
# Generated by Django 2.1.7 on 2019-03-10 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0010_auto_20190308_0856'),
('subscriptions', '0001_initial')
]
operations = [
migrations.AlterField(
model_name='subscription',
name='subscription_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='subscriptions.SubscriptionType'),
),
]
|
[
"aquiles@aquicarattino.com"
] |
aquiles@aquicarattino.com
|
b55934894d57fb4858d49c01cf2bdeeff5735d0b
|
2eb6d57b4f97fe2ea2cd6ab78512dd2c7a6e6ecc
|
/chapter5/exercise_2.py
|
52c87f388582138ef431ee7857c65c9bb4397dd3
|
[] |
no_license
|
afcarl/PythonDataStructures
|
4ba98bca168f535dc9c8ed9392ed313592850101
|
a620af0a1e0d707556a8883ecb5b79a6f1df56c7
|
refs/heads/master
| 2020-03-26T06:26:38.637461
| 2014-04-26T15:53:46
| 2014-04-26T15:53:46
| 144,605,553
| 1
| 0
| null | 2018-08-13T16:26:42
| 2018-08-13T16:26:42
| null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
import math
def hypotenuse(a,b):
"""
>>> hypotenuse(3,4)
5.0
>>> hypotenuse(12,5)
13.0
>>> hypotenuse(7,24)
25.0
>>> hypotenuse(9,12)
15.0
"""
if type(a) == type(int()) or type(a) == type(float()) and type(b) == type(int()) or type(b) == type(float()):
return math.sqrt(a**2 + b**2)
else:
return "distance function undefined for associated types!!"
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"ericschles@gmail.com"
] |
ericschles@gmail.com
|
56140d86cc58f32f4304d03463614711db1e4edb
|
60cf862c8b63394d244fbf20f004ede014c26687
|
/simple_calculator/Ex2 Simple calculator2.py
|
c4d92a8ea7016fd58b932f34f3023b9ef1ea5f88
|
[] |
no_license
|
bartoszmaleta/2nd-Self-instructed-week-exercises-
|
bda9af5eabf04f8fa07ebba00b5a382140941fb0
|
22c33b6cc047ad0e293b5b1e04647e741502f9d7
|
refs/heads/master
| 2020-08-12T01:33:57.630782
| 2019-10-14T09:08:34
| 2019-10-14T09:08:34
| 214,664,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
first_number = input('Enter a number (or a letter to exit): ')
second_number = input('Enter another number: ')
sign = input('Enter an operation: ')
if sign == '+':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
sum = first_number_as_int + second_number_as_int
sum_as_str = str(sum)
print('Result: ' + sum_as_str)
elif sign == '-':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
difference = first_number_as_int - second_number_as_int
difference_as_str = str(difference)
print('Result: ' + difference_as_str)
elif sign == '*':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
product = first_number_as_int * second_number_as_int
product_as_str = str(product)
print('Result: ' + product_as_str)
elif sign == '/':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
quotient = first_number_as_int / second_number_as_int
quotient_as_str = str(quotient)
print('Result: ' + quotient_as_str)
|
[
"bartosz.maleta@gmail.com"
] |
bartosz.maleta@gmail.com
|
ea66a53f1ec41732f03fa530e7cff39b927d7b1b
|
2da8bcfb9a72e507812a8723e38ad6d030c300f1
|
/check_if_a_string_contains_all_binary_codes_of_size_k_1461.py
|
d908c35cb2d2ea1b12616cc16df50e74de78084a
|
[] |
no_license
|
aditya-doshatti/Leetcode
|
1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634
|
eed20da07896db471ea6582785335e52d4f04f85
|
refs/heads/master
| 2023-04-06T02:18:57.287263
| 2023-03-17T03:08:42
| 2023-03-17T03:08:42
| 218,408,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
'''
1461. Check If a String Contains All Binary Codes of Size K
Medium
Given a binary string s and an integer k.
Return True if every binary code of length k is a substring of s. Otherwise, return False.
Example 1:
Input: s = "00110110", k = 2
Output: true
Explanation: The binary codes of length 2 are "00", "01", "10" and "11". They can be all found as substrings at indicies 0, 1, 3 and 2 respectively.
https://leetcode.com/problems/check-if-a-string-contains-all-binary-codes-of-size-k/
'''
class Solution:
def hasAllCodes(self, s: str, k: int) -> bool:
required = 2 ** k
done = set()
for i in range(k, len(s)+1):
temp = s[i-k:i]
if temp not in done:
done.add(temp)
required -=1
if required == 0:
return True
return False
# if len(s) < k:
# return False
# for i in range(2 **k):
# binary = str(bin(i)[2:])
# checkVal = '0'*(k-len(binary)) + binary
# if checkVal in s:
# continue
# else:
# return False
# return True
|
[
"aditya.doshatti@sjsu.edu"
] |
aditya.doshatti@sjsu.edu
|
00b88efee486df786fb91d829273704caaab765d
|
071017425dbb9a175c3b4c5e090501e35b31d4f9
|
/docqa/allennlp_custom/modules/similarity_functions/constant_tri.py
|
0aaee645c881037f642bef330bfe274c181a0823
|
[
"Apache-2.0"
] |
permissive
|
debjitpaul/discourse-aware-semantic-self-attention
|
9bf22f3a8dcf61cc85ba56a3cbbc4dd9b371e55d
|
5851c95bbe761c980177b2c3e769c9e716551d5f
|
refs/heads/master
| 2022-04-20T17:05:53.216321
| 2020-04-24T09:24:15
| 2020-04-24T09:24:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,087
|
py
|
import numpy as np
import torch
from allennlp.common import Registrable, Params
from allennlp.modules import SimilarityFunction
from allennlp.nn import Activation
from typing import List
from torch.nn import Parameter
from functools import reduce
@SimilarityFunction.register("constant_tri")
class ConstantTriParams(SimilarityFunction):
"""
This function applies linear transformation for each of the input tensors and takes the sum.
If output_dim is 0, the dimensions of tensor_1_dim and tensor_2_dim of the two input tensors are expected
to be equal and the output_dim is set to be their size. This is used since we might want to automatically infer
the size of the output layer from automatically set values for tensor1 without explicitly knowing the semantic of
the similarity function.
Then the output is `W1x + W2y`` where W1 and W2 are linear transformation matrices.
Parameters
----------
tensor_1_dim : ``int``
The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : ``int``
The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_3_dim : ``int``
The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
output_dim : ``int``
The dimension of the output tensor.
activation : ``Activation``, optional (default=linear (i.e. no activation))
An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no
activation.
"""
def __init__(self,
tensor_1_dim: int,
tensor_2_dim: int,
tensor_3_dim: int,
output_constant: List[float],
):
super().__init__()
output_constant = np.array(output_constant)
self._output_constant = torch.tensor(output_constant, requires_grad=False, dtype=torch.float32)
self._output_dim = self._output_constant.shape[-1]
def forward(self, tensor1:torch.LongTensor, tensor2:torch.LongTensor, tensor3:torch.LongTensor) -> torch.Tensor:
# pylint: disable=arguments-differ
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Transforms both tensor to a target output dimensions and returns a sum tensor with same
number of dimensions, such as ``(batch_size, length, out_dim)``.
"""
tile_size = reduce(lambda x, y: x * y, tensor1.shape[:-1])
res_repr = self._output_constant.unsqueeze(0).repeat(tile_size, 1)
return res_repr
|
[
"tbmihailov@gmail.com"
] |
tbmihailov@gmail.com
|
0671af8fd937da608bdee27126acbc05573e7a2b
|
c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6
|
/Fundamentos Python/ecuacion_gauss_2.py
|
7e786edf7e6daa2a8e045011a83ff06ba796512f
|
[] |
no_license
|
mecomontes/Python
|
a0b4a0b69ae33ad3623e908731710563392d1615
|
daba4247cca90c43a979e3e3f292cd7b8951b3d0
|
refs/heads/master
| 2023-05-30T05:24:41.999196
| 2020-03-23T02:30:09
| 2020-03-23T02:30:09
| 249,317,310
| 1
| 0
| null | 2023-05-22T22:42:36
| 2020-03-23T02:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 352
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Sumatoria de los n primeros números
# Método propuesto por Gauss: PAra cualquier número
Numero = float(input("Ingrese el último Número a incluir en la sumatoria: "))
Sumatoria = ((1 + Numero)/2) * Numero
print("Sumatoria de los primeros: ", Numero, " numeros enteros: ", Sumatoria)
|
[
"1574@holbertonschool.com"
] |
1574@holbertonschool.com
|
2b2530136d5c1176b142bb6e61260cf9c0562804
|
50604d2b98220ea485c1ada1d6e5b7c230a621db
|
/src/python/labbox_ephys/extensions/workspaceview/WorkspaceView/sortingExamples/mountainsort4_example.py
|
c6d31e33e8bec6b8c5f1bbdfe498d57d20bc2d05
|
[] |
no_license
|
stjordanis/labbox-ephys
|
e9f4ca783947be9c6ab4caf19267d75577cbba10
|
563d112346e4a557f1aa04a052d245b07a0e9ce4
|
refs/heads/main
| 2023-08-11T21:18:40.122003
| 2021-07-26T18:35:33
| 2021-07-26T18:35:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
import spikeextractors as se
import numpy as np
import labbox_ephys as le
from labbox_ephys import sorters
import kachery_client as kc
if __name__ == '__main__':
# adjust these values
workspace_uri = '{workspaceUri}'
recording_id = '{recordingId}' # {recordingLabel}
workspace = le.load_workspace(workspace_uri)
le_recording = workspace.get_recording(recording_id)
recording_object = le_recording['recordingObject']
sorting_object = sorters.mountainsort4(
recording_object=recording_object,
num_workers=1
)
sorting = le.LabboxEphysSortingExtractor(sorting_object)
S_id = workspace.add_sorting(
sorting=sorting,
recording_id=recording_id,
label='mountainsort4'
)
|
[
"jeremy.magland@gmail.com"
] |
jeremy.magland@gmail.com
|
7739878c949d613861d03ac303411425ddf611ba
|
4c4b4076f960a1e1d0203cc58621090cc3dc45f3
|
/architect/manager/urls.py
|
376aa0b86346aa1fad7b7d25624b89d0e2854bfb
|
[] |
no_license
|
michaelkuty/architect-api
|
6c62f8a8ada4dc9a62b12ea7b38ae9a70d94290e
|
30ebfdafdbdcff4098bf080f00957949e20a71cf
|
refs/heads/master
| 2021-05-02T16:17:53.016373
| 2018-02-08T22:08:24
| 2018-02-08T22:08:24
| 120,673,129
| 0
| 0
| null | 2018-02-07T21:20:43
| 2018-02-07T21:20:43
| null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
from django.urls import path
from . import views
app_name = 'manager'
urlpatterns = [
path('v1', views.ManagerListView.as_view(),
name='manager_list'),
path('v1/manager-check',
views.ManagerCheckView.as_view(),
name='manager_check'),
path('v1/<manager_name>',
views.ManagerDetailView.as_view(),
name='manager_detail'),
path('v1/<manager_name>/sync',
views.ManagerSyncView.as_view(),
name='manager_sync'),
path('v1/<manager_name>/query/<query_name>',
views.ManagerQueryJSONView.as_view(),
name='manager_query'),
path('v1/<manager_name>/action/<resource_kind>/<resource_action>',
views.ManagerActionView.as_view(),
name='manager_action'),
path('v1/<manager_name>/resource/<resource_uid>',
views.ResourceDetailView.as_view(),
name='resource_detail'),
path('v1/<manager_name>/resource/<resource_uid>/<resource_action>',
views.ResourceActionView.as_view(),
name='resource_action'),
]
|
[
"mail@newt.cz"
] |
mail@newt.cz
|
eea7f772f28657f31564888e9c1bda1ae6088163
|
71bc873c20fbc45bb5e13095d2474496818a23f9
|
/code_video clean/query_url.py
|
682d51ed8e0b8b6c493923ee3aed28198d7f1e10
|
[] |
no_license
|
2877992943/lianyun
|
f31c44ea2e266bae51cae4fa464d1bae368c8d3f
|
a872d6cd1b2eff402bcccb326d33d086816d87af
|
refs/heads/master
| 2021-01-20T16:17:20.226401
| 2017-05-10T06:49:31
| 2017-05-10T06:49:31
| 90,830,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,300
|
py
|
#! -*- coding:utf-8 -*-
import urllib2,time,os,urllib
import pandas as pd
import json
import MySQLdb
import random,cPickle
import csv,re
from MySQLdb import cursors
import numpy as np
import sys,time,os
read_host='rr-2zeg40364h2thw9m6o.mysql.rds.aliyuncs.com'
write_host='rds5943721vp4so4j16ro.mysql.rds.aliyuncs.com'
db_product = MySQLdb.connect(host=read_host,
user='yunker',
passwd="yunker2016EP",
db="xddb",
use_unicode=True,
charset='utf8',
cursorclass=cursors.DictCursor)
## 测试库
db = MySQLdb.connect(host='rds0710650me01y6d3ogo.mysql.rds.aliyuncs.com',
user='yunker',
passwd="yunke2016",
db="yunketest",
use_unicode=True,
charset='utf8',
cursorclass=cursors.DictCursor)
def get_request_helloWorld():
full_url='http://localhost:9300'
data=urllib2.urlopen(full_url)
Data=data.read()
print Data
def get_request_testDB():
try:
full_url='http://localhost:9300/?userId=E8B63FF5BB1840EABB9BEB2F9DCCA731'
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def send_request(cid):
try:
full_url='http://yunkecn.com/xdapi/esData/updateAlreadyDownload.action?userAccount=%s&clueId=%s'%(companyCode,cid)
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def requst_crawler(comname):
try:
full_url='http://101.200.139.60:8088/crawler/QiDuoWeiSpider?companyName=%s'%(comname)
print full_url
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def requst_mp3(url,mp3Name):
try:
full_url=url
data=urllib.urlretrieve(url, mp3Name+".mp3")
return data
except:
return None
def query_call_by_att(att, companyCode):
sql1 ="""SELECT %s from crm_t_call_action c left join crm_t_portaluser p
on c.User_Id=p.User_Id
#WHERE c.recordFile LIKE '%s' AND c.record_from = 1
where c.record_from = 1
limit 10000
#where p.User_Company_Id in ('%s')
"""
sql ="""SELECT %s from crm_t_call_action c left join crm_t_portaluser p
on c.User_Id=p.User_Id
WHERE c.recordFile LIKE '%s' AND c.record_from = 1
limit 100000
"""
cur = db_product.cursor()
#cur.execute(sql % (att, "','".join(companyCode)))
#print sql % (att, '%http://yunke-pcfile.oss-cn-beijing%')
cur.execute(sql % (att, '%http://yunke-pcfile.oss-cn-beijing%'))
ret = {}
for r in cur.fetchall():
ret[r['Call_Action_Id']] = r #{id:{record},...}
return ret
def strDuration2second(duration):
duration=re.sub('[\s+]','',duration)
duration=re.sub('[\'\"]',' ',duration)
ll=duration.split(' ')
ll=[int(i) for i in ll if len(i)>=1]
#print ll
minute,second=ll
second+=60*minute
return second
if __name__=='__main__':
#### query
attList=['c.Call_Action_Id','c.recordFile','c.Call_Duration','c.Tip_Type','c.Tip_Name']
companyCode=['bjnbm3','jjjeva','vnraea','ffz3ai','invjvi','mmbnn3']
companyCode=companyCode[5:]
ret=query_call_by_att(','.join(attList),companyCode)
print len(ret)
pd.to_pickle(ret,'../data/ret')
##### get second >60 url
ret=pd.read_pickle('../data/ret')
url_second_feedback={}
for id,r in ret.items()[:]:
recordFile=r['recordFile']
duration=r['Call_Duration']
tiptype=r['Tip_Type']
tipname=r['Tip_Name']
second=strDuration2second(duration)
if second<=0:continue
url_second_feedback[recordFile]=[second,str(tiptype)+' '+tipname]
########
print len(url_second_feedback)
pd.to_pickle(url_second_feedback,'../data/url_second')
####
"""
record_from 录音来源 1app 2pc 3yunkecc 4电话盒子
现在yunkecc渠道的录音应该是没有提示音的,电话盒子还没上线,没数据。pc的应该是null,app的录音是安卓的,带提示音
"""
|
[
"2877992943@qq.com"
] |
2877992943@qq.com
|
eaddb2f9ccb5d75e4fedfba55196b323ca4778ac
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/beer-song/58a6d9e0b5c9402b8236f1b6d329c093.py
|
6c7d5a0ab15761501e144b12e4ed2ee1df7d5554
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
def verse(number):
it = lambda n: "it" if n == 1 else "one"
suffix = lambda n: "no more bottles" if n < 1 else \
"1 bottle" if n < 2 else str(n) + " bottles"
if number == 0:
return "No more bottles of beer on the wall, no more bottles of beer.\n" \
"Go to the store and buy some more, 99 bottles of beer on the wall.\n"
verse = "{0} of beer on the wall, {0} of beer.\n" \
"Take {2} down and pass it around, {1} of beer on the wall.\n"
return verse.format(suffix(number), suffix(number-1), it(number))
def song(start, end=0):
return "\n".join([verse(number)
for number in range(start, end - 1, -1)]) + "\n"
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
be0ea72d88b880a4422662c70a42cd30c368ac6d
|
12a5b72982291ac7c074210afc2c9dfe2c389709
|
/online_judges/Codeforces/271/B/code.py
|
186d8f4e41e3bac292297f62acafa3f9103c1638
|
[] |
no_license
|
krantirk/Algorithms-and-code-for-competitive-programming.
|
9b8c214758024daa246a1203e8f863fc76cfe847
|
dcf29bf976024a9d1873eadc192ed59d25db968d
|
refs/heads/master
| 2020-09-22T08:35:19.352751
| 2019-05-21T11:56:39
| 2019-05-21T11:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
def bin_search(x, lista):
meio = (len(lista))/2
inicio = 0
fim = len(lista)
while inicio < fim:
if lista[meio] < x: inicio = meio + 1
elif lista[meio] > x: fim = meio
else: return meio
meio = (inicio + fim) / 2
return fim
n = int(raw_input())
a = map(int,raw_input().split())
m = int(raw_input())
q = map(int,raw_input().split())
aux = []
soma_ac = 0
for e in a:
soma_ac += e
aux.append(soma_ac)
for e in q:
print bin_search(e,aux) + 1
|
[
"mariannelinharesm@gmail.com"
] |
mariannelinharesm@gmail.com
|
3e58da2d6dd384b3d47fb8eba862f3472aae4e64
|
4aa6b7c3a5ae3817007e09ad1289c1e9f7a355c0
|
/剑指offer/superJumpFloor.py
|
a4fe01802a8158a111a6560953ab7e0e854d2e99
|
[] |
no_license
|
liuhuipy/Algorithm-python
|
8f5143e06cf5fa2de2c178e3ba9e5fd12b9bcdf7
|
4e92a0b874f956d1df84d1493f870a5d1f06cde2
|
refs/heads/master
| 2021-06-03T04:19:01.946149
| 2021-01-08T07:44:40
| 2021-01-08T07:44:40
| 99,838,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'liuhui'
'''
一只青蛙一次可以跳上1级台阶,也可以跳上2级……它也可以跳上n级。求该青蛙跳上一个n级的台阶总共有多少种跳法。
'''
class Solution:
def jumpFloorII(self, number):
# write code here
ans = 1
if number >= 2:
for i in range(number-1):
ans = ans * 2
return ans
if __name__ == '__main__':
solut = Solution()
res = solut.jumpFloorII(10)
print(res)
|
[
"liuhui_py@163.com"
] |
liuhui_py@163.com
|
81464fa1fbd45533b5eca02d118798b2f058e87a
|
54fdaa05078261180cbd7cc94c132527725b189d
|
/test/crab_ElectronPlots_newskim_eraF_70110.py
|
6738548559045b4ba874512b81343c4f103a78bd
|
[] |
no_license
|
psiddire/ZeeAnalyzer
|
e488d3b65108ca923bd459cda41e61f3bd746a5b
|
d94b1fd4f4de19f5cdeaf405e4c0d6629b889888
|
refs/heads/master
| 2021-09-07T12:20:36.554253
| 2018-02-22T18:31:52
| 2018-02-22T18:31:52
| 113,574,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# from https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'plots_Zee_newskim_eraF_70110'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runElectronPlots_newSkim_eraF.py'
config.Data.inputDataset = '/DoubleEG/Run2017F-PromptReco-v1/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1000
config.Data.lumiMask = 'eraF.txt'
config.Data.runRange = '305044-306126'
#config.Data.totalUnits = 1
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'Zee_ElectronPlots_newskim_eraF_70110'
config.Site.storageSite = 'T2_CH_CERN'
#all the configuration parameters https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile
#all crab commands https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3Commands
|
[
"psiddire@nd.edu"
] |
psiddire@nd.edu
|
10b1c6ebba927444ab24d8082e2cd7350b1d7db2
|
6abc9b7e59aa2bc77d16bf0579bc2319db4fa20c
|
/miniverse/dataset/models.py
|
8a5b1134e6babae592efd06546ed04c4a3163e1a
|
[
"MIT"
] |
permissive
|
IQSS/old-miniverse
|
b05823891fafd40a5b12f18894f3dff19404fe37
|
daabcad2fbd6cc29cc05f0091f51157e4fe9e46a
|
refs/heads/master
| 2021-01-21T03:15:54.392430
| 2014-06-27T16:05:55
| 2014-06-27T16:05:55
| 19,803,423
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
import os
from hashlib import md5
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.db import models
from django.template.defaultfilters import slugify
from dataverse.models import Dataverse
from core.models import TimeStampedModel
class DatasetState(models.Model):
"""
Version states for the DatasetVersion object
DRAFT, IN REVIEW, RELEASED, ARCHIVED, DEACCESSIONED
"""
name = models.CharField(max_length=70)
sort_order = models.IntegerField()
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(DatasetState, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
ordering = ('sort_order', 'name',)
class Dataset(TimeStampedModel):
"""Expects a .zip file upload
Modify in the future for shapefiles loaded separately
"""
name = models.CharField(max_length=255)
dataverse = models.ForeignKey(Dataverse)
version_state = models.ForeignKey(DatasetState)
version_number = models.IntegerField(default=1)
minor_version_number = models.IntegerField(default=0)
description = models.TextField(blank=True)
md5 = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
#def get_geographic_metadata(self):
#GeographicMetadata.objects.select_related('datafile').all()
def get_dv_api_params(self):
if not self.id:
return {}
p = { 'dataset_id' : self.id\
, 'dataset_version_id' : self.version_number\
, 'dataset_name' : self.name\
, 'dataset_description' : self.description\
}
p.update(self.dataverse.get_dv_api_params())
return p
def save(self, *args, **kwargs):
if not self.id:
super(Dataset, self).save(*args, **kwargs)
self.md5 = md5('%s%s' % (self.id, self.name)).hexdigest()
super(Dataset, self).save(*args, **kwargs)
def natural_key(self):
return '%s-%s' % (self.name, self.dataverse)
def view_dataset_list(self):
lnk = reverse('view_dataset_list', kwargs={})
return '<a href="%s">view dataset</a>' % lnk
view_dataset_list.allow_tags = True
def get_files(self):
return self.datafile_set.all()
def __unicode__(self):
return self.name
class Meta:
ordering = ('name', )
#verbose_name = 'COA File Load Log'
class DataFile(TimeStampedModel):
"""Used for working with a selected shapefile, specifically using the extensions specified in WORLDMAP_MANDATORY_IMPORT_EXTENSIONS
"""
dataset_file = models.FileField(upload_to='datafile/%Y/%m/%d')# max_length=255)
dataset = models.ForeignKey(Dataset)
has_gis_data = models.BooleanField(default=False)
file_checksum = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
#mime_type = models.CharField(max_length=255, blank=True)
md5 = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
def get_geographic_metadata(self):
return self.geographicmetadata_set.filter(links_working=True)
def get_dv_api_params(self, request=None):
"""
Params to respond to API call from GeoConnect
"""
if not self.id:
return {}
# Params from Datafile
p = { 'datafile_id' : self.id\
, 'datafile_label': self.get_basename()\
#, 'has_gis_data' : self.has_gis_data
,'filename' : self.get_basename()\
,'filesize' : self.dataset_file.size\
,'created' : str(self.created)\
,'datafile_type': '--file-type--'\
,'datafile_expected_md5_checksum': self.file_checksum\
}
# Full url to file, if available
if request:
p['datafile_download_url'] = request.build_absolute_uri(self.dataset_file.url)
# Add params from owning Dataset and Dataverse
p.update(self.dataset.get_dv_api_params())
return p
def get_mapit_link(self):
return 'http://127.0.0.1:8000/shapefile/examine-dvn-file/%s/%s' % (self.dataset.id, self.id)
def dataverse_name(self):
return self.dataset.dataverse.name
dataverse_name.allow_tags = True
def get_basename(self):
return os.path.basename(self.dataset_file.name)
def save(self, *args, **kwargs):
if not self.id:
super(DataFile, self).save(*args, **kwargs)
self.md5 = md5('%s%s' % (self.id, self.dataset_file)).hexdigest()
self.file_checksum = self.md5 # fake, need to add real md5
super(DataFile, self).save(*args, **kwargs)
def __unicode__(self):
return self.get_basename()
class Meta:
ordering = ('dataset_file',)
|
[
"raman_prasad@harvard.edu"
] |
raman_prasad@harvard.edu
|
81955d510e5da2c20cc455a0595ac502556be959
|
d402525075cec8d8b1564eadf03024bcc45ada57
|
/map_api/app.py
|
82162354a9dad36de96f5d9e316d705302a8b7bd
|
[] |
no_license
|
DevHerles/mapa_telesalud_api
|
3d5e0589b88e7178d10edeb798e13d1745d33062
|
de361f9857500b594cc16abffed7987777911673
|
refs/heads/master
| 2023-06-26T00:16:05.174034
| 2021-07-20T20:03:51
| 2021-07-20T22:37:15
| 387,901,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,924
|
py
|
"""APP
FastAPI app definition, initialization and definition of routes
"""
# # Installed # #
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi import status as statuscode
# # Package # #
from .models import *
from .exceptions import *
from .repositories import PeopleRepository, SymptomsRepository
from .middlewares import request_handler
from .settings import api_settings as settings
from .middlewares import request_handler
__all__ = ("app", "run")
app = FastAPI(title=settings.title)
app.middleware("http")(request_handler)
origins = [
"http://localhost",
"http://localhost:8080",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/imei/{imei}",
response_model=PersonRead,
description="Get a single person by its unique IMEI",
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _get_person(imei: str):
return PeopleRepository.getByImei(imei)
@app.get("/people",
response_model=PeopleRead,
description="List all the available persons",
tags=["people"])
def _list_people():
# TODO Filters
return PeopleRepository.list()
@app.get("/people/{person_id}",
response_model=PersonRead,
description="Get a single person by its unique ID",
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _get_person(person_id: str):
return PeopleRepository.get(person_id)
@app.post("/people",
description="Create a new person",
response_model=PersonRead,
status_code=statuscode.HTTP_201_CREATED,
responses=get_exception_responses(PersonAlreadyExistsException),
tags=["people"])
def _create_person(create: PersonCreate):
return PeopleRepository.create(create)
@app.patch(
"/people/{person_id}",
description="Update a single person by its unique ID, providing the fields to update",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(PersonNotFoundException,
PersonAlreadyExistsException),
tags=["people"])
def _update_person(person_id: str, update: PersonUpdate):
PeopleRepository.update(person_id, update)
# Symtoms
@app.get("/person-symptoms/{person_id}",
response_model=SymptomsRead,
description="List all the available symptoms",
tags=["symptoms"])
def _list_person_symptoms(person_id: str):
# TODO Filters
print(person_id)
return SymptomsRepository.list(person_id)
@app.get("/symptoms",
response_model=SymptomsRead,
description="List all the available symptoms",
tags=["symptoms"])
def _list_symptoms():
# TODO Filters
return SymptomsRepository.list()
@app.get("/symptoms/{symptom_id}",
response_model=SymptomRead,
description="Get a single symptom by its unique ID",
responses=get_exception_responses(SymptomNotFoundException),
tags=["symptoms"])
def _get_symptom(symptom_id: str):
return SymptomRepository.get(symptom_id)
@app.post("/symptoms",
description="Create a new symptom",
response_model=SymptomRead,
status_code=statuscode.HTTP_201_CREATED,
responses=get_exception_responses(SymptomAlreadyExistsException),
tags=["symptoms"])
def _create_symptom(create: SymptomCreate):
return SymptomsRepository.create(create)
@app.patch(
"/symptoms/{symptoms_id}",
description="Update a single symptom by its unique ID, providing the fields to update",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(SymptomNotFoundException,
SymptomAlreadyExistsException),
tags=["symptoms"])
def _update_symptom(symptom_id: str, update: SymptomUpdate):
SymptomRepository.update(symptom_id, update)
@app.delete("/symptoms/{symptom_id}",
description="Delete a single symptom by its unique ID",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(SymptomNotFoundException),
tags=["symptoms"])
def _delete_symptom(symptom_id: str):
SymptomsRepository.delete(symptom_id)
@app.delete("/people/{person_id}",
description="Delete a single person by its unique ID",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _delete_person(person_id: str):
PeopleRepository.delete(person_id)
def run():
"""Run the API using Uvicorn"""
uvicorn.run(app,
host=settings.host,
port=settings.port,
log_level=settings.log_level.lower())
|
[
"herles.incalla@gmail.com"
] |
herles.incalla@gmail.com
|
1c4ffdb96e1a0dc36fd40511b6292b9147273e6f
|
d8b93e08fdf884ebed89a38831e26e3753efea72
|
/recalculation.py
|
5aaa0153de63bce8736dbec8ec35e696db6b8732
|
[] |
no_license
|
wolfbolin/Everyclass-Occam
|
68398ece9f5812aa1a0a31946e98181d559cc7ec
|
de347014c4237c88e99207fa05cb7fecb5325d1d
|
refs/heads/master
| 2022-12-14T08:25:30.484056
| 2020-04-27T07:30:26
| 2020-04-27T07:30:26
| 156,236,558
| 2
| 1
| null | 2022-12-08T03:37:45
| 2018-11-05T15:08:01
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
# coding=utf-8
import Util
import Room
import Config
import Course
import Student
import Teacher
import Preprocess
if __name__ == "__main__":
config = Config.load_config("./Config")
# 重新计算所有学期数据
for semester in config["schedule"]:
Util.print_blue("当前计算学期: %s" % semester)
# 重新计算对象信息
Room.update(config, semester, config["schedule"][semester])
Course.update(config, semester, config["schedule"][semester])
Student.update(config, semester, config["schedule"][semester])
Teacher.update(config, semester, config["schedule"][semester])
# 重新完成数据预处理
Preprocess.lesson_data_oc(config, semester)
Preprocess.search_data(config)
|
[
"wolfbolin@foxmail.com"
] |
wolfbolin@foxmail.com
|
7d77d28bee25dffd3bf67e2bbdf27ef676d79359
|
7d5075610b6358dd9fd57132c8876d533813807c
|
/bigtempo/processors/tests/simple_task_tests.py
|
cfe810b0931571c775d7c144cde8515c3a29a7ed
|
[
"MIT"
] |
permissive
|
rhlobo/bigtempo3
|
152e1f76c94939e0a2d69e264e0b66f24f007731
|
848eda5f07f7e61f7659bac335726c567b41083e
|
refs/heads/main
| 2023-07-05T02:08:15.749854
| 2021-08-11T21:35:38
| 2021-08-11T21:35:38
| 394,079,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
# -*- coding: utf-8 -*-
import unittest
from mockito import mock, when, any as anyx, verify
import bigtempo.processors.simple_task as task
class TestModuleFunctions(unittest.TestCase):
def test_processingtask_factory_should_return_processing_task(self):
instance = mock()
registration = mock()
dependencies = mock()
result = task.factory(instance, registration, dependencies)
assert isinstance(result, task.SimpleDatasourceTask)
class TestSimpleDatasourceTask(unittest.TestCase):
def test_process_should_process_dependencies(self):
instance = mock()
registration = mock()
dependencies = {
'a': mock(task.SimpleDatasourceTask),
'b': mock(task.SimpleDatasourceTask),
'c': mock(task.SimpleDatasourceTask),
}
when(dependencies['a']).process(...).thenReturn(None)
when(dependencies['b']).process(...).thenReturn(None)
when(dependencies['c']).process(...).thenReturn(None)
task.SimpleDatasourceTask(instance, registration, dependencies).process()
verify(dependencies['a'], times=1).process()
verify(dependencies['b'], times=1).process()
verify(dependencies['c'], times=1).process()
verify(instance, times=1).evaluate(anyx(dict))
def test_process_should_receive_dependencies_process_results_as_context(self):
class DatasourceMock():
def evaluate(self, context):
assert isinstance(context, dict)
assert len(context) == 2
assert context['a'] == '1'
assert context['b'] == '2'
dependencies = {
'a': mock(task.SimpleDatasourceTask),
'b': mock(task.SimpleDatasourceTask),
}
when(dependencies['a']).process().thenReturn('1')
when(dependencies['b']).process().thenReturn('2')
task.SimpleDatasourceTask(DatasourceMock(), mock(), dependencies).process()
|
[
"rhlobo+github@gmail.com"
] |
rhlobo+github@gmail.com
|
d03b33dff1747f43c63760a1a272b4708f3aca49
|
c5c56d7c14b4518e53bcde2527b9cc6e53a7e1b9
|
/custom_assert/tennis.py
|
5df7831abac8b0ca8585aea91cdde3d40996ccc3
|
[] |
no_license
|
lancelote/pluralsight-unit-testing-python
|
0402a39e3800eec49f2be529e684d028689d3b47
|
fd5ce8264bc95ed66109c4fa575a177248c3d49a
|
refs/heads/master
| 2021-01-10T08:06:39.605195
| 2016-03-23T08:15:25
| 2016-03-23T08:15:25
| 51,952,064
| 4
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
SCORE_NAMES = ('Love', 'Fifteen', 'Thirty', 'Forty')
def tennis_score(player1, player2):
if player1 == player2:
return '%s-All' % SCORE_NAMES[player1]
else:
return '%s-%s' % (SCORE_NAMES[player1], SCORE_NAMES[player2])
|
[
"karateev.pavel@ya.ru"
] |
karateev.pavel@ya.ru
|
bf9470c3ab98fc4a2ed3b629dd2537ada28fcb7e
|
6cad5c613306789b9bd6387c2e7af02515b1c0ad
|
/django_document/inheritance/models/abstract_base_class.py
|
a42ba4fde37800e90813d8caad6d4f2461dfe01b
|
[] |
no_license
|
Isaccchoi/django_document_project
|
ead5eb7b2e932ae5401d5a3cdb3672d3dfd8f9f5
|
980f25c98f99994e6148af16ed82ae4f12d50870
|
refs/heads/master
| 2021-05-08T06:12:51.261138
| 2017-10-13T05:14:58
| 2017-10-13T05:14:58
| 106,355,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
from django.db import models
__all__ = (
'School',
'CommonInfo',
'Student',
'Teacher',
)
class School(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class CommonInfo(models.Model):
# abstractBaseClass에서 상송진행시 ForeignKey의 related_name을 지정하면 오류 발생 - 역참조 이름이 같게됨
# 그렇기때문에 같지 않게 하기 위해 $(app_label)s 및 %(class)s를 사용해 충돌 제거
school = models.ForeignKey(School, blank=True, null=True, related_name='%(app_label)s_%(class)s_set')
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class Meta:
abstract = True
class Student(CommonInfo):
home_group = models.CharField(max_length=5)
def __str__(self):
return self.name
class Teacher(CommonInfo):
subject = models.CharField(max_length=30)
def __str__(self):
return self.name
|
[
"isaccchoi@naver.com"
] |
isaccchoi@naver.com
|
adf1a0335935312323435fd90f890423097b9fad
|
e1fac9437a480e5d1ab9527a28c28f6ee3d7af6e
|
/skyrock/migrations/0014_auto_20190822_0913.py
|
db8d986ed406f1fb4ac05d1783a3beab3225eef2
|
[] |
no_license
|
LuLue7775/Skyrock-Backend-Training
|
3e4c41bcc78fbfabb8a8c1114dd15ca94bc1055e
|
3b6d3d697be1875442eeba5127c8798de1ca6499
|
refs/heads/master
| 2022-06-19T04:26:10.129558
| 2019-09-12T06:41:58
| 2019-09-12T06:41:58
| 207,708,169
| 1
| 1
| null | 2022-05-25T02:24:55
| 2019-09-11T02:45:41
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-08-22 09:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skyrock', '0013_auto_20190822_0909'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='age',
),
migrations.AddField(
model_name='student',
name='birth_date',
field=models.DateTimeField(blank=True, null=True),
),
]
|
[
"astephanerasmus@gmail.com"
] |
astephanerasmus@gmail.com
|
e2e1180c912988844c5fe890a9b70135731ea883
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pQavNkBbdmvSMmx5x_2.py
|
3582e4157098df37fe07a238039d40ead9325681
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
"""
Create a function that returns the **majority vote** in a list. A majority
vote is an element that occurs **> N/2** times in a list (where **N** is the
length of the list).
### Examples
majority_vote(["A", "A", "B"]) ➞ "A"
majority_vote(["A", "A", "A", "B", "C", "A"]) ➞ "A"
majority_vote(["A", "B", "B", "A", "C", "C"]) ➞ None
### Notes
* The frequency of the majority element must be **strictly greater** than 1/2.
* If there is no majority element, return `None`.
* If the list is empty, return `None`.
"""
def majority_vote(lst):
for i in lst:
if lst.count(i) > len(lst)/2:
return i
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b55d40212c755128e12ddd2efb8d0f9d653c8573
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/sphinx/venv/Lib/site-packages/setuptools/msvc.py
|
467c8192bd17c3c14a577e9b90a44ffaec5dc861
|
[
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:1df5ddb5b9a19b10195da6054f634166b5d3f12771ddf66587cc886e594b199d
size 51126
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
359f5287850dad160771b45c8dccdd1bd9ad768b
|
59381d3e69e4a288cdeb4aeecc2e9c84a28759b2
|
/selvbetjening/sadmin2/tests/ui/dashboard.py
|
56194920cccb403216b896dfd25cd46000b0f2ab
|
[
"MIT"
] |
permissive
|
animekita/selvbetjening
|
88cb75164f8ab0b3341a6ba4dd85d425c601ee4d
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
refs/heads/master
| 2021-07-05T01:10:59.900369
| 2015-09-17T15:15:29
| 2015-09-17T15:15:29
| 4,826,342
| 0
| 1
|
MIT
| 2021-06-10T17:35:22
| 2012-06-28T22:17:15
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
from django.core.urlresolvers import reverse
from common import UITestCase
class DashboardTestCase(UITestCase):
fixtures = ['sdemo-example-site.json']
def test_load(self):
self.login_admin()
# Check that the dashboard is the first page we see after login
self.assertTrue(self.wd.is_text_present('Dashboard'))
|
[
"casper@svenningjensen.dk"
] |
casper@svenningjensen.dk
|
271de39a148eac6bca2cf614057de6f6b38f1002
|
8cf427b0574e8e41e5201cc02c3e736f264a2000
|
/original/yolo3_auto_label/FLIR_ws/build/catkin_generated/order_packages.py
|
34f5c1ed3463d16400524b16c632308592765db4
|
[] |
no_license
|
Lin1225/Data_amplification_all
|
a88561b9cae481561683b32b6cede35461fa0e3e
|
e988990ea8dd53b28ed2da6046ea7aeeda6a01b6
|
refs/heads/master
| 2023-05-06T03:41:00.002786
| 2021-05-28T02:48:38
| 2021-05-28T02:48:38
| 296,257,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/lab/Documents/Data_reforement_code/yolo3_auto_label/FLIR_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/lab/Desktop/NEW_hiwin_control/devel;/opt/ros/kinetic".split(';') if "/home/lab/Desktop/NEW_hiwin_control/devel;/opt/ros/kinetic" != "" else []
|
[
"a3ie9981@gmail.com"
] |
a3ie9981@gmail.com
|
b80b2f3fb6d3d9ea31fe7f2a79ebb0112b4efb2a
|
54ed8b1e0f9d0ae2d67cd86067fd920e82a4d441
|
/litex_boards/platforms/gsd_butterstick.py
|
c1c2849cc2f012f8b4294bd7e6a8f1e8d075b2a5
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
doraemoncito/litex-boards
|
88588260371666f23d17b3709794a020084dd7ff
|
55ea71bd0199226e3e993fb7bd224b9c6d5d10ef
|
refs/heads/master
| 2023-07-18T19:35:07.831531
| 2021-09-01T17:21:16
| 2021-09-01T17:21:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,072
|
py
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Greg Davill <greg.davill@gmail.com>
# Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import OpenOCDJTAGProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io_r1_0 = [
# Clk
("clk30", 0, Pins("B12"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("C13"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("D12"), IOStandard("LVCMOS33")),
("user_led", 2, Pins(" U2"), IOStandard("LVCMOS33")),
("user_led", 3, Pins(" T3"), IOStandard("LVCMOS33")),
("user_led", 4, Pins("D13"), IOStandard("LVCMOS33")),
("user_led", 5, Pins("E13"), IOStandard("LVCMOS33")),
("user_led", 6, Pins("C16"), IOStandard("LVCMOS33")),
("user_led_color", 0, Pins("T1 R1 U1"), IOStandard("LVCMOS33")),
# Buttons
("user_btn", 0, Pins("U16"), IOStandard("SSTL135_I")),
("user_btn", 1, Pins("T17"), IOStandard("SSTL135_I")),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"G16 E19 E20 F16 F19 E16 F17 L20 "
"M20 E18 G18 D18 H18 C18 D17 G20 "),
IOStandard("SSTL135_I")),
Subsignal("ba", Pins("H16 F20 H20"), IOStandard("SSTL135_I")),
Subsignal("ras_n", Pins("K18"), IOStandard("SSTL135_I")),
Subsignal("cas_n", Pins("J17"), IOStandard("SSTL135_I")),
Subsignal("we_n", Pins("G19"), IOStandard("SSTL135_I")),
Subsignal("cs_n", Pins("J20 J16"), IOStandard("SSTL135_I")),
Subsignal("dm", Pins("U20 L18"), IOStandard("SSTL135_I")),
Subsignal("dq", Pins(
"U19 T18 U18 R20 P18 P19 P20 N20",
"L19 L17 L16 R16 N18 R17 N17 P17"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=75")),
Subsignal("dqs_p", Pins("T19 N16"), IOStandard("SSTL135D_I"),
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("C20 J19"), IOStandard("SSTL135D_I")),
Subsignal("cke", Pins("F18 J18"), IOStandard("SSTL135_I")),
Subsignal("odt", Pins("K20 H17"), IOStandard("SSTL135_I")),
Subsignal("reset_n", Pins("E17"), IOStandard("SSTL135_I")),
Misc("SLEWRATE=FAST")
),
# RGMII Ethernet
("eth_clocks", 0,
Subsignal("tx", Pins("E15")),
Subsignal("rx", Pins("D11")),
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST"),
),
("eth", 0,
Subsignal("rst_n", Pins("B20")),
Subsignal("mdio", Pins("D16")),
Subsignal("mdc", Pins("A19")),
Subsignal("rx_data", Pins("A16 C17 B17 A17")),
Subsignal("tx_ctl", Pins("D15")),
Subsignal("rx_ctl", Pins("B18")),
Subsignal("tx_data", Pins("C15 B16 A18 B19")),
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors_r1_0 = []
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = "clk30"
default_clk_period = 1e9/30e6
def __init__(self, revision="1.0", device="85F", toolchain="trellis", **kwargs):
assert revision in ["1.0"]
self.revision = revision
io = {"1.0": _io_r1_0}[revision]
connectors = {"1.0": _connectors_r1_0}[revision]
LatticePlatform.__init__(self, f"LFE5UM5G-{device}-8BG381C", io, connectors, toolchain=toolchain, **kwargs)
def create_programmer(self):
return OpenOCDJTAGProgrammer("openocd_butterstick.cfg")
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk30", loose=True), 1e9/30e6)
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
70d99d01332ce75b3a408d0d03328a3232c04d66
|
340b5d95c9dd0cfc3ff487a7bb927944ac40aa51
|
/ch_3 (functions)/005_brote_force.py
|
16d537d9bea57e70fa30621463bddcc0ce8d83bf
|
[] |
no_license
|
sc-199/199
|
200970fb8bf0662755cda9c50599504392b3882f
|
618d8e1136c188276135f9a685a878984c3ea644
|
refs/heads/master
| 2020-05-04T18:33:34.722852
| 2019-03-22T08:13:32
| 2019-03-22T08:13:32
| 167,672,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
'''
ალგორითმები
====================
უხეში ძალის მეთოდი
(Brute force)
გვაქვს განსაზღვრის არე და მნიშვნელობათა არე.
მაგალითად: მოცემულია კონკრეტული რიცხვი. ამ რიცხვისთვის გვაქვს ამონახსნთა სიმრავლე.
გარკვეული პროცედურების გავლის შემდეგ ამონასხნთა სიმრავლიდან ამონახსნს ვარჩევთ სათითაოდ,
რომელიც მოერგება ამ კონკრეტულ რიცხვს. ამას ეწოდება უხეში ძალის მეთოდი.
ცხადია, მნიშვნელობათა არიდან მოიძებნება ერთი ამონახსნი.
'''
# ----------------------
def is_simple_number(x):
''' ფუნქცია განსაზღვრავს x რიცხვი მარტივია თუ შედგენილი.
x – მთელი, დადებითი (ნატურალური) რიცხვია.
რიცხვი თუ მარტივია დაბრუნდება True, წინააღმდეგ შემთხვევაში – False
'''
divizor = 2
while divizor <= x**0.5:
if x % divizor == 0:
return False
divizor += 1
return True
# -----------------------
def factorize_number(x):
''' x რიცხვის მარტივ მამრავლებად დაშლა.
მამრავლებს ბეჭდავს ეკრანზე.
x – მთელი, დადებითი (ნატურალური) რიცხვია.
'''
divizor = 2
while x > 1:
if x % divizor == 0:
print(divizor, end =' ')
x //= divizor
else:
divizor += 1
# ========================
print("Is simple number:", is_simple_number(19))
print()
factorize_number(1024)
print()
factorize_number(999)
print()
# ------------------------
input("\nDone!..")
|
[
"pm72github@yahoo.com"
] |
pm72github@yahoo.com
|
4919d2fe56f781fbadaf6d51a0998a84e7c5aa3c
|
491d2fd36f2ca26975b3eb302a3d5600415bf7c4
|
/TensorFlow/computer_vision/Resnets/utils/logs/hooks_helper.py
|
89b28ade2ae8aeaccd57914ccadefd223d2479c4
|
[
"Apache-2.0"
] |
permissive
|
kmanchella-habana/Model-References
|
9fa42654d57a867d82f417e9fff668946f9105f6
|
460d3b23ce75f30561e8f725ebcb21298897d163
|
refs/heads/master
| 2023-08-28T17:42:48.866251
| 2021-09-18T21:38:04
| 2021-09-18T21:38:04
| 411,371,667
| 0
| 0
| null | 2021-09-28T17:08:13
| 2021-09-28T17:08:13
| null |
UTF-8
|
Python
| false
| false
| 6,251
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from TensorFlow.computer_vision.Resnets.utils.logs import hooks
from TensorFlow.computer_vision.Resnets.utils.logs import logger
from TensorFlow.computer_vision.Resnets.utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate_1',
'cross_entropy_1',
'accuracy_1'])
PROFILE = False
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
'TPU is specified. No hooks will be used.'
.format(name_list))
return []
train_hooks = [ tf.estimator.ProfilerHook(save_steps=50, output_dir=".") ] if PROFILE else []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.estimator.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=10, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.estimator.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
**kwargs: a dictionary of arguments.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
def get_step_counter_hook(**kwargs):
"""Function to get StepCounterHook."""
del kwargs
return tf.estimator.StepCounterHook()
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
'stepcounterhook': get_step_counter_hook
}
|
[
"mpandit@habana.ai"
] |
mpandit@habana.ai
|
adf629c13d106ceb433534d50425de94aee5c25f
|
6c2ddf52efccdfa15ce073da0e74d3352d5108c4
|
/idact/detail/config/validation/validate_bool.py
|
f5e1aea27bb8050ac7dc15bc157df871549484c8
|
[
"MIT"
] |
permissive
|
intdata-bsc/idact
|
4bff248e644629b7ec634b282d790c305fc6703d
|
54cb65a711c145351e205970c27c83e6393cccf5
|
refs/heads/develop
| 2020-05-17T20:33:52.890970
| 2019-12-26T00:03:58
| 2019-12-26T00:03:58
| 183,949,088
| 0
| 0
|
MIT
| 2019-12-26T00:03:59
| 2019-04-28T19:18:58
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
"""This module contains a function for validating a boolean config entry."""
from typing import Optional
from idact.detail.config.validation.validation_error_message import \
validation_error_message
def validate_bool(value, label: Optional[str] = None) -> bool:
"""Returns the parameter, if it's a :class:`bool`, otherwise raises
an exception.
:param value: Object to validate.
:param label: Object label for error message.
:raises TypeError: On wrong type.
"""
if isinstance(value, bool):
return value
raise TypeError(validation_error_message(
label=label,
value=value))
|
[
"matt.garstka@gmail.com"
] |
matt.garstka@gmail.com
|
d102b7d9ef0861fbbb91042145d024d4eedb4eab
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/references/discern/pusher.py
|
43f5bb6436960491846009eeeb34fb37c8416c8e
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,797
|
py
|
import rlkit.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
env_id='SawyerPushNIPSEasy-v0',
grill_variant=dict(
use_discern_sampling=True,
save_video=True,
online_vae_beta=20,
save_video_period=50,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1000,
num_steps_per_epoch=500,
num_steps_per_eval=500,
min_num_steps_before_training=10000,
batch_size=256,
max_path_length=50,
discount=0.99,
num_updates_per_env_step=2,
# collection_mode='online-parallel',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(
),
twin_sac_kwargs=dict(
train_policy_with_reparameterization=True,
soft_target_tau=1e-3, # 1e-2
policy_update_period=1,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
online_vae_kwargs=dict(
vae_training_schedule=vae_schedules.custom_schedule_2,
oracle_data=False,
vae_save_period=50,
parallel_vae_train=False,
),
diverse_kwargs=dict(
p_replace=.05,
p_add_non_diverse=.05,
goal_buffer_size=1024,
),
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=.1,
),
normalize=False,
render=False,
exploration_noise=0.0,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
# generate_uniform_dataset_kwargs=dict(
# init_camera=sawyer_init_camera_zoomed_in,
# env_id='SawyerPushNIPS-v0',
# num_imgs=1000,
# use_cached_dataset=False,
# show=False,
# save_file_prefix='pusher',
# ),
# generate_uniform_dataset_fn=generate_uniform_dataset_reacher,
),
train_vae_variant=dict(
representation_size=4,
beta=20,
num_epochs=0,
dump_skew_debug_plots=False,
decoder_activation='gaussian',
# decoder_activation='sigmoid',
generate_vae_dataset_kwargs=dict(
N=40,
test_p=.9,
use_cached=True,
show=False,
oracle_dataset=True,
oracle_dataset_using_set_to_goal=True,
n_random_steps=100,
non_presampled_goal_img_is_garbage=True,
),
vae_kwargs=dict(
input_channels=3,
architecture=imsize48_default_architecture,
decoder_distribution='gaussian_identity_variance',
),
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=True,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
version='no force',
)
search_space = {
'grill_variant.algo_kwargs.diverse_kwargs.p_replace': [.01]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 2
mode = 'gcp'
exp_prefix = 'steven-door-discern-new-visuals-comp'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_twin_sac_online_vae_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
snapshot_gap=50,
snapshot_mode='gap_and_last',
num_exps_per_instance=3,
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
)
)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
f6495eb9f687d1146f820c4e46e6dcc9b4e71e7e
|
f64ac4dfdf43d5535c30fd7b58f9e80d45b884db
|
/GAN/conditional_gan/cifar100_cgan.py
|
4a6f02fa7a5ab6843cc19a17df9baa77a8436d30
|
[
"Unlicense"
] |
permissive
|
beckybai/generative-models
|
e13fe109d2a589e90cedba35c1c1614eaef12d03
|
dc057e788b8b65ff2dfa7510cb717de6c1bd0b75
|
refs/heads/master
| 2021-01-20T01:50:03.585856
| 2017-07-08T17:43:03
| 2017-07-08T17:43:03
| 89,329,854
| 1
| 0
| null | 2017-04-25T07:18:31
| 2017-04-25T07:18:31
| null |
UTF-8
|
Python
| false
| false
| 4,191
|
py
|
import torch
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
from datetime import datetime
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
import torch.nn as nn
import torch.nn.functional as F
import shutil,sys
import mutil
import model
import data_convert
import owntool
gpu = 2
ngpu = 2
torch.cuda.set_device(gpu)
# mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
cifar_d = data_convert.cifar100()
mb_size = 100 # mini-batch_size
Z_dim = 100
label_dim = 100
X_dim = 32
y_dim = 1
cnt = 0
num = '0'
out_dir = './cifar100_result/basic_{}_{}/'.format(datetime.now(),num)
out_dir.replace(" ","_")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')
sys.stdout = mutil.Logger(out_dir)
in_channel=4
d_num = 3
G = model.G_Net_conv_32(ngpu,in_channel = Z_dim+label_dim, out_channel = 3).cuda()
D = model.D_Net_conv(ngpu,in_channel).cuda()
"""Weight Initialization"""
# def weights_init(m):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1:
# m.weight.data.normal_(0.0, 0.02)
""" ===================== TRAINING ======================== """
d_num = 3
# avd_num = 1/d_num
G_solver = optim.Adam(G.parameters(), lr=1e-4)
D_solver = optim.Adam(D.parameters(), lr=1e-4)
ones_label = Variable(torch.ones(mb_size)).cuda()
zeros_label = Variable(torch.zeros(mb_size)).cuda()
criterion = nn.BCELoss()
c_label = np.array(range(100))
def reset_d_grad():
D.zero_grad()
def step_d_optim():
D_solver.step()
for it in range(100000):
# Sample data
z = Variable(torch.randn(mb_size, Z_dim)).cuda()
X, c = cifar_d.batch_next(mb_size)
X = Variable(torch.from_numpy(X)).cuda()
# label_m = np.nonzero(c)[1]
c_v = Variable(torch.from_numpy(model.set_label_ve_ma(c,100).astype('float32'))).cuda() # for the conditon of the generator
label_m = model.set_label_cifar(c.astype('int'),mb_size,X_dim)
c = Variable(label_m).cuda()
# Dicriminator forward-loss-backward-update
D.zero_grad()
G.zero_grad()
x_g = torch.cat([z,c_v],1).t()
x_g.data.resize_(mb_size, Z_dim+label_dim, 1, 1)
G_sample = G(x_g).detach()
# X.data.resize_(mb_size, 1, X_dim, X_dim)
D_real = D(torch.cat([X,c],1))
D_fake = D(torch.cat([G_sample,c],1))
D_loss_fake = criterion(D_fake, zeros_label)
D_loss_real = criterion(D_real, ones_label)
D_loss_real.backward()
D_loss_fake.backward()
D_solver.step()
# step_d_optim()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# Generator forward-loss-backward-update
z = Variable(torch.randn(mb_size, Z_dim)).cuda()
x_g = torch.cat([z,c_v],1).t()
x_g.data.resize_(mb_size, Z_dim+ label_dim, 1, 1)
G_sample = G(x_g)
DG_loss = D(torch.cat([G_sample, c],1))
G_loss = criterion(DG_loss, ones_label)
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# Print and plot every now and then
if it % 500 == 0:
print('Iter-{}; D_loss_real/fake: {}/{}; G_loss: {}'.format(it, D_loss_real.data.tolist(),
D_loss_fake.data.tolist(), G_loss.data.tolist()))
c = c_label
c_v = Variable(torch.from_numpy(model.set_label_ve_ma(c,100).astype('float32'))).cuda()
x_g = torch.cat([z, c_v], 1).t()
x_g.data.resize_(mb_size, Z_dim + label_dim, 1, 1)
samples = G(x_g)
samples = samples.data.tolist()[:100]
output_path = out_dir + "{}.png".format(it)
owntool.save_color_picture_pixel(samples,output_path)
if it % 10000==0:
torch.save(G.state_dict(),'{}/G_{}.model'.format(out_dir,str(it)))
torch.save(D.state_dict(),'{}/D_{}.model'.format(out_dir,str(it)))
|
[
"kebai0624@gmail.com"
] |
kebai0624@gmail.com
|
3670ebd37aad77b33e512dbf343658f1219a3ec3
|
7ce076dd764fe4b5c7881734f157bc6f77a99ead
|
/tests/providers/google/ads/hooks/test_ads.py
|
a2c1a4f06188115057442b3817c57f2dd2819315
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] |
permissive
|
kaxil/airflow
|
db31c98e23f2e0d869d857484e56a7c58acef231
|
42f1da179db00491610946a0b089dd82269adc74
|
refs/heads/master
| 2023-04-28T04:46:38.478352
| 2020-09-28T20:51:16
| 2020-09-28T20:51:16
| 112,322,392
| 1
| 1
|
Apache-2.0
| 2020-08-27T20:15:22
| 2017-11-28T10:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import pytest
from airflow.providers.google.ads.hooks.ads import GoogleAdsHook
API_VERSION = "api_version"
ADS_CLIENT = {"key": "value"}
SECRET = "secret"
EXTRAS = {
"extra__google_cloud_platform__keyfile_dict": SECRET,
"google_ads_client": ADS_CLIENT,
}
@pytest.fixture()
def mock_hook():
with mock.patch("airflow.hooks.base_hook.BaseHook.get_connection") as conn:
hook = GoogleAdsHook(api_version=API_VERSION)
conn.return_value.extra_dejson = EXTRAS
yield hook
class TestGoogleAdsHook:
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_customer_service(self, mock_client, mock_hook):
mock_hook._get_customer_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("CustomerService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_service(self, mock_client, mock_hook):
mock_hook._get_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("GoogleAdsService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_search(self, mock_client, mock_hook):
service = mock_client.load_from_dict.return_value.get_service.return_value
rows = ["row1", "row2"]
service.search.side_effects = rows
# Here we mock _extract_rows to assert calls and
# avoid additional __iter__ calls
mock_hook._extract_rows = list
query = "QUERY"
client_ids = ["1", "2"]
mock_hook.search(client_ids=client_ids, query="QUERY", page_size=2)
expected_calls = [mock.call(c, query=query, page_size=2) for c in client_ids]
service.search.assert_has_calls(expected_calls)
def test_extract_rows(self, mock_hook):
iterators = [[1, 2, 3], [4, 5, 6]]
assert mock_hook._extract_rows(iterators) == sum(iterators, [])
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_list_accessible_customers(self, mock_client, mock_hook):
accounts = ["a", "b", "c"]
service = mock_client.load_from_dict.return_value.get_service.return_value
service.list_accessible_customers.return_value = mock.MagicMock(resource_names=accounts)
result = mock_hook.list_accessible_customers()
service.list_accessible_customers.assert_called_once_with()
assert accounts == result
|
[
"noreply@github.com"
] |
kaxil.noreply@github.com
|
5d9a50036af925c52e137946772a68b72ec1bfc2
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/vitrage-1.5.2/vitrage/datasources/neutron/port/__init__.py
|
ba3d53285a9c3d37c9cc95bffce2f122fa6717ec
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import UpdateMethod
NEUTRON_PORT_DATASOURCE = 'neutron.port'
OPTS = [
cfg.StrOpt('transformer',
default='vitrage.datasources.neutron.port.'
'transformer.PortTransformer',
help='Neutron port transformer class path',
required=True),
cfg.StrOpt('driver',
default='vitrage.datasources.neutron.port.driver.PortDriver',
help='Neutron port driver class path',
required=True),
cfg.StrOpt('update_method',
default=UpdateMethod.PUSH,
help='None: updates only via Vitrage periodic snapshots.'
'Pull: updates every [changes_interval] seconds.'
'Push: updates by getting notifications from the'
' datasource itself.',
required=True),
]
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
213dd540eb8864eec2bd1888ba1fac73949e86c7
|
8200e9869cae6699d186a4cf9172800f95bede50
|
/rainman/testing.py
|
3158b7e3ba15826b2b6982e7f0379b854aa1e8d0
|
[] |
no_license
|
wickman/rainman
|
d3684f0b4fa834b8800650036caf69093ff0504a
|
14dec93c76f8c3f902fe769f5e9d7a0ee1db95ca
|
refs/heads/master
| 2016-09-05T12:57:25.794351
| 2014-08-13T22:27:19
| 2014-08-13T22:27:19
| 16,630,229
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,986
|
py
|
from __future__ import print_function
import os
import random
import socket
from .client import Client
from .fileset import FileSet, Fileslice
from .fs import DISK
from .metainfo import MetaInfoBuilder
from .peer_id import PeerId
from .torrent import Torrent
from .scheduler import Scheduler
from tornado.testing import bind_unused_port
from twitter.common.dirutil import safe_mkdtemp, safe_mkdir
from twitter.common.quantity import Amount, Data, Time
class SocketClient(Client):
def __init__(self, sock, port, io_loop, peer_id=None, **kw):
self.__sock = sock
self.__port = port
super(SocketClient, self).__init__(peer_id or PeerId.generate(), io_loop=io_loop, **kw)
def listen(self):
self._port = self.__port
self.add_sockets([self.__sock])
def make_fileset(filelist, piece_size, fs=DISK):
"Given (filename, contents) list, return dir, FileSet pair."
td = safe_mkdtemp()
for filename, contents in filelist:
sl = Fileslice(os.path.join(td, filename), slice(0, len(contents)))
fs.fill(sl)
fs.write(sl, contents)
filelist = [(filename, len(contents)) for (filename, contents) in filelist]
return td, FileSet(filelist, piece_size)
def make_metainfo(filelist, piece_size, fs=DISK):
td, fileset = make_fileset(filelist, piece_size, fs=fs)
mib = MetaInfoBuilder(fileset.rooted_at(td), relpath=td)
return td, fileset, mib.build(fs)
def make_torrent(filelist, piece_size, tracker, fs=DISK):
td, fileset, metainfo = make_metainfo(filelist, piece_size, fs=fs)
torrent = Torrent()
torrent.info = metainfo
torrent.announce = tracker
return td, fileset, torrent
def random_stream(N):
return os.urandom(N)
def make_ensemble(
io_loop,
num_seeders=1,
num_leechers=1,
piece_size=16384,
max_filesize=32768,
total_filesize=1048576,
seed=31337,
scheduler_impl=Scheduler,
fs=DISK):
root = safe_mkdtemp()
seeder_sockets = [(PeerId.generate(), bind_unused_port()) for _ in range(num_seeders)]
leecher_sockets = [(PeerId.generate(), bind_unused_port()) for _ in range(num_leechers)]
tracker_info = os.path.join(root, 'tracker_info.txt')
with open(tracker_info, 'w') as fp:
for peer_id, (_, port) in seeder_sockets + leecher_sockets:
print('%s 127.0.0.1 %d' % (peer_id, port), file=fp)
tracker_info = 'file://' + tracker_info
random.seed(seed)
filelist = []
files = 0
while total_filesize > 0:
filesize = min(total_filesize, random.randrange(0, max_filesize))
total_filesize -= filesize
filename = '%x.txt' % files
filelist.append((filename, filesize))
content = random_stream(filesize)
for replica in ['dataset'] + ['seeder%d' % k for k in range(num_seeders)]:
safe_mkdir(os.path.join(root, replica))
real_path = os.path.join(root, replica, filename)
slice_ = Fileslice(real_path, slice(0, filesize))
fs.fill(slice_)
fs.write(slice_, content)
files += 1
fileset = FileSet(filelist, piece_size)
mib = MetaInfoBuilder(
fileset.rooted_at(os.path.join(root, 'dataset')),
relpath=os.path.join(root, 'dataset'),
piece_size=piece_size)
torrent = Torrent()
torrent.info = mib.build(fs)
torrent.announce = tracker_info
seeder_clients = []
leecher_clients = []
def make_peer(peer_id, listener, port, chroot):
client = SocketClient(listener, port, io_loop, peer_id, fs=fs)
scheduler = scheduler_impl(client, request_size=Amount(piece_size // 4, Data.BYTES))
client.listen()
client.register_torrent(torrent, root=chroot)
return scheduler
for index, (peer_id, (listener, port)) in enumerate(seeder_sockets):
seeder_clients.append(
make_peer(peer_id, listener, port, os.path.join(root, 'seeder%d' % index)))
for index, (peer_id, (listener, port)) in enumerate(leecher_sockets):
leecher_clients.append(
make_peer(peer_id, listener, port, os.path.join(root, 'leecher%d' % index)))
return torrent, seeder_clients, leecher_clients
|
[
"wickman@twitter.com"
] |
wickman@twitter.com
|
dcee632cb5018883a234ec0cd01f4f23bcc5dca7
|
448c533feb888fd2b64d0d4f130ca4bc1c82e6c2
|
/juparc/cli/select_cmd.py
|
2f1e52e4d5465b1118e619e2e321a474887eaab1
|
[] |
no_license
|
gems-uff/jupyter-archaeology
|
e2433f745c70cbcc502cfc762127e75e3762ecb4
|
3bf3d6fdf672d2123c599fefa65292de3c3dc1c9
|
refs/heads/main
| 2023-03-24T21:23:06.999836
| 2021-03-12T00:40:50
| 2021-03-12T00:40:50
| 304,041,226
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
"""Select command: select notebooks"""
import json
import sys
import re
from ..extract import load, create_default
def value(original):
"""Convert value to int, float, tuple or str"""
if isinstance(original, (float, int, tuple)):
return original
if isinstance(original, str):
try:
return int(original)
except ValueError:
try:
return float(original)
except ValueError:
if "." in original:
try:
return tuple(int(x) for x in original.split('.'))
except ValueError:
pass
return original
return json.dumps(original)
def compare(notebook_arg, attr):
"""Compare argument to notebook value"""
if attr == "null":
return notebook_arg is None
nval = value(notebook_arg)
if attr.startswith(">"):
if attr.startswith(">="):
return nval >= value(attr[2:])
return nval > value(attr[1:])
if attr.startswith("<"):
if attr.startswith("<="):
return nval <= value(attr[2:])
return nval < value(attr[1:])
if attr.startswith("=="):
return nval == value(attr[2:].lstrip())
if attr.startswith("!="):
return nval != value(attr[2:].lstrip())
return re.match(attr, str(nval)) is not None
def select_cmd(args, _):
"""select cmd"""
if not args.notebooks:
lines = list(sys.stdin)
notebooks = json.loads("\n".join(lines))
else:
notebooks = [load(notebook) for notebook in args.notebooks]
attributes = create_default()
result = []
for notebook in notebooks:
add = True
for arg in attributes:
attr = getattr(args, arg, None)
if attr is None:
continue
attr = attr.strip()
if not compare(notebook[arg], attr):
add = False
continue
if add:
result.append(notebook)
if args.count:
print(len(result))
else:
print(json.dumps(result, indent=2))
def create_subparsers(
subparsers,
cmd='select',
helper='Select notebooks that match condition',
**defaults
):
"""create subcommands"""
parser = subparsers.add_parser(cmd, help=helper)
parser.set_defaults(func=select_cmd, command=parser)
parser.add_argument(
"-n", "--notebooks", default=None, nargs="*",
help="List of notebooks. If empty, it will read json from input"
)
parser.add_argument(
"-c", "--count", action="store_true",
help="Show count instead of notebooks"
)
attributes = create_default()
for attr in attributes:
default = defaults.get(attr, None)
parser.add_argument(
"--" + attr.replace('_', '-'), default=default,
help="Select " + attr
)
|
[
"joaofelipenp@gmail.com"
] |
joaofelipenp@gmail.com
|
2a4a35023e91fbd52dc123b0da9b6c1f6dfcc06d
|
1a2adb80f326435ec4f60bacfcc812566e687f12
|
/unit_tests/bigquery/test_schema.py
|
44cd8816e277b3a63a12bedf819d6675150fa099
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
runt18/gcloud-python
|
66d92095c2b9541cffcb18531f5f5ad3d1adf37f
|
3f836eb728488d78793f7aecaa573bd37f1f2d3c
|
refs/heads/master
| 2021-01-23T15:41:53.642420
| 2016-09-06T20:36:13
| 2016-09-06T20:36:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,745
|
py
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestSchemaField(unittest.TestCase):
def _getTargetClass(self):
from gcloud.bigquery.schema import SchemaField
return SchemaField
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
field = self._makeOne('test', 'STRING')
self.assertEqual(field.name, 'test')
self.assertEqual(field.field_type, 'STRING')
self.assertEqual(field.mode, 'NULLABLE')
self.assertEqual(field.description, None)
self.assertEqual(field.fields, None)
def test_ctor_explicit(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
self.assertEqual(field.name, 'test')
self.assertEqual(field.field_type, 'STRING')
self.assertEqual(field.mode, 'REQUIRED')
self.assertEqual(field.description, 'Testing')
self.assertEqual(field.fields, None)
def test_ctor_subfields(self):
field = self._makeOne('phone_number', 'RECORD',
fields=[self._makeOne('area_code', 'STRING'),
self._makeOne('local_number', 'STRING')])
self.assertEqual(field.name, 'phone_number')
self.assertEqual(field.field_type, 'RECORD')
self.assertEqual(field.mode, 'NULLABLE')
self.assertEqual(field.description, None)
self.assertEqual(len(field.fields), 2)
self.assertEqual(field.fields[0].name, 'area_code')
self.assertEqual(field.fields[0].field_type, 'STRING')
self.assertEqual(field.fields[0].mode, 'NULLABLE')
self.assertEqual(field.fields[0].description, None)
self.assertEqual(field.fields[0].fields, None)
self.assertEqual(field.fields[1].name, 'local_number')
self.assertEqual(field.fields[1].field_type, 'STRING')
self.assertEqual(field.fields[1].mode, 'NULLABLE')
self.assertEqual(field.fields[1].description, None)
self.assertEqual(field.fields[1].fields, None)
def test___eq___name_mismatch(self):
field = self._makeOne('test', 'STRING')
other = self._makeOne('other', 'STRING')
self.assertNotEqual(field, other)
def test___eq___field_type_mismatch(self):
field = self._makeOne('test', 'STRING')
other = self._makeOne('test', 'INTEGER')
self.assertNotEqual(field, other)
def test___eq___mode_mismatch(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED')
other = self._makeOne('test', 'STRING', mode='NULLABLE')
self.assertNotEqual(field, other)
def test___eq___description_mismatch(self):
field = self._makeOne('test', 'STRING', description='Testing')
other = self._makeOne('test', 'STRING', description='Other')
self.assertNotEqual(field, other)
def test___eq___fields_mismatch(self):
sub1 = self._makeOne('sub1', 'STRING')
sub2 = self._makeOne('sub2', 'STRING')
field = self._makeOne('test', 'RECORD', fields=[sub1])
other = self._makeOne('test', 'RECORD', fields=[sub2])
self.assertNotEqual(field, other)
def test___eq___hit(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
other = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
self.assertEqual(field, other)
def test___eq___hit_case_diff_on_type(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
other = self._makeOne('test', 'string', mode='REQUIRED',
description='Testing')
self.assertEqual(field, other)
def test___eq___hit_w_fields(self):
sub1 = self._makeOne('sub1', 'STRING')
sub2 = self._makeOne('sub2', 'STRING')
field = self._makeOne('test', 'RECORD', fields=[sub1, sub2])
other = self._makeOne('test', 'RECORD', fields=[sub1, sub2])
self.assertEqual(field, other)
|
[
"tseaver@palladion.com"
] |
tseaver@palladion.com
|
291e27fa3dca7be5618fa79c258cbc506a56be91
|
4c535d2c7b76955b014ed87f06f5a9f078730fa7
|
/10008_2.py
|
20590de890ded052a6f4f8fdd8ae246f9e3df8de
|
[] |
no_license
|
jennyChing/onlineJudge
|
cf750b5b68456f8949bed0429c3b4a328ca2eeea
|
0d317db29c21a9faec7a7cf43de5fa03fda50b3e
|
refs/heads/master
| 2020-04-06T06:56:34.850903
| 2016-08-21T23:38:08
| 2016-08-21T23:38:08
| 52,972,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import re
from collections import Counter
n = int(input())
line = []
for i in range(n):
line += re.findall('[a-zA-Z]', input().strip())
c = Counter(c.upper() for c in line)
for k, v in c.most_common():
print(k, v)
|
[
"jklife3@gmail.com"
] |
jklife3@gmail.com
|
de5ce76ee7e89d5a5155a27cb0dd2b9425b6015a
|
b18fb2f2f7955e8830ec1c615ab82c14cd52c8f5
|
/server/src/app/http/mods/admin/admin_tools.py
|
aa5906344bb4426eb3e4a256528544d7997c31fa
|
[] |
no_license
|
xyzmyall/spff_public
|
260492c6ee917093ba3e54364d61e989d184c810
|
9a9a0973bce715de8767e4991ad24515311752c9
|
refs/heads/master
| 2022-10-13T20:47:08.467510
| 2020-06-10T09:25:30
| 2020-06-10T09:25:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
import time
from werkzeug.security import check_password_hash, generate_password_hash
from app.http.http_tools.tools import http_tools
from data.server import Data
from common.common import common_tools as common, common_tools
class admin_tool():
@staticmethod
def upload_case(data,user_base):
content = common.get_base64(data['content'].encode('utf-8'))
content_md5 = common.get_md5(content)
user_id = user_base['id']
ctime = int(time.time())
event_time = common.str_to_time(data['event_time'] + ' 00:00:00')
title = common.get_base64(data['title'].encode('utf-8'))
if Data.find('case_info', [('title', '=', title)]) != None:
# self.send_faild(error.ERROR_CASE_EXIST)
return
params = {
'user_id': user_id,
'c_time': ctime,
# 'content': content,
'content_md5': content_md5,
'event_time': event_time,
'title': title,
}
Data.insert('case_info', params)
# 插入主体内容
cond = [
('user_id', '=', user_id),
('c_time', '=', ctime),
('content_md5', '=', content_md5)
]
res = Data.find('case_info', cond)
params = {
'case_id': res['id'],
'content': content
}
Data.insert('case_content',params)
# 取得id,插入内容到表中
res['content'] = content
http_tools.split_case_info(res)
return res
@staticmethod
def update_case(self):
# id = case_id
return
@staticmethod
def check_pw(user_base,pw):
pw_md5 = common_tools.get_md5(pw)
return check_password_hash(user_base['pwhash'], pw_md5)
@staticmethod
def create_pw(pw):
pw_md5 = common_tools.get_md5(pw)
return generate_password_hash(pw_md5)
|
[
"campanulamediuml@gmail.com"
] |
campanulamediuml@gmail.com
|
4b441cd8eaaa31d69a152fb2fafbe683a2a2aac3
|
dbf7910f178ff895516c5df286c3cf13e1dd48ca
|
/dashboard/urls.py
|
771e1e715e42ef2b30e0afc8c6b1f5e8ba8114fa
|
[] |
no_license
|
nimadorostkar/crop_stocks
|
df5b5ef42e746c4fe66416d33534685eb2d33053
|
b382f3d912ad4276882eb7bb47a6147f335e4faa
|
refs/heads/master
| 2023-05-15T00:48:46.889463
| 2021-06-10T08:26:58
| 2021-06-10T08:26:58
| 353,626,010
| 1
| 0
| null | 2021-04-25T07:08:59
| 2021-04-01T08:16:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard, name='dashboard'),
#path('payment/', views.payment, name='payment'),
path('ticket/', views.ticket, name='ticket'),
path('money_req/', views.money_req, name='money_req'),
]
|
[
"nimadorostkar97@gmail.com"
] |
nimadorostkar97@gmail.com
|
bf4a161f80efb1db0526dcffe7eb8a5ff38a2e0c
|
d41a7e63204b66b3d3b27dbf1e1a3266080c8396
|
/customers/models.py
|
32243ad6316aac73f2807db3cd38549951c7a132
|
[] |
no_license
|
SimonOkello/salesapp
|
552aa3c607a90ad9fea19aa08194750dccda1eca
|
3480965fed58c214177f56231e6bd835e47bb201
|
refs/heads/main
| 2023-03-23T12:08:08.354434
| 2021-03-23T12:48:19
| 2021-03-23T12:48:19
| 350,634,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.db import models
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=120)
logo = models.ImageField(upload_to='customers', default='default.png')
def __str__(self):
return self.name
|
[
"simonokello93@gmail.com"
] |
simonokello93@gmail.com
|
d19f42f3b942444cd6f0918191526326b5240ab2
|
18631e9a657324ef1f83da58f4346e9f2c368d28
|
/test/functional/rpc_spentindex.py
|
d0528a1eb21af3a8430c831774faf5aa8af868f7
|
[
"MIT"
] |
permissive
|
thehomosapien/AMLBitcoin
|
1e68bf6621d9ee055385ef420b45c9dc289b4f8c
|
f097ca52c2e8039761f1927d83a9fe0b4c355b1c
|
refs/heads/master
| 2020-05-26T08:27:40.095034
| 2019-05-24T07:31:43
| 2019-05-24T07:31:43
| 188,166,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,158
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2017-2018 The AmlBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC addressindex generation and fetching
#
import time
from test_framework.test_framework import AmlBitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(AmlBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
["-debug"],
["-debug", "-spentindex"],
# Nodes 2/3 are used for testing
["-debug", "-spentindex"],
["-debug", "-spentindex", "-txindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
feeAmlBits = 10000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - feeAmlBits)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeAmlBits)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - feeAmlBits);
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeAmlBits)
assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeAmlBits)
assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["AmlBits"], (amount + feeAmlBits) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["AmlBits"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
|
[
"rishabhshukla@opulasoft.com"
] |
rishabhshukla@opulasoft.com
|
63e68fda343dd77d5274e64276af8f9dbe8a4cf3
|
18219d0fc95936ded56fe44f9a65ecb27f015232
|
/195 processor function.py
|
1bb14d07ad6b1b96ff846c371a8b2dc8eb157242
|
[] |
no_license
|
JDavid121/Script-Curso-Cisco-Python
|
20a61b91b09376dcaef54f8ae5f86fe252de5c33
|
6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b
|
refs/heads/master
| 2021-05-18T04:54:59.948970
| 2020-03-29T20:19:53
| 2020-03-29T20:19:53
| 251,120,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 12:07:52 2020
processor fucntion
@author: David
"""
from platform import processor
# The processor() function returns a string filled with
# the real processor name (if possible).
print(processor())
|
[
"noreply@github.com"
] |
JDavid121.noreply@github.com
|
bc994eebc5b04d3239183fd006857bffb91a1af8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/171/usersdata/265/81901/submittedfiles/decimal2bin.py
|
877f170e5cf66f6b01ad797c8e446c197b5ab56d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
# -*- coding: utf-8 -*-
d=int(input('digite o valor de d: '))
contador=0
soma=0
while (d>=0):
if d%10==0:
soma = soma + (0*2*contador)
else :
soma = soma + (i*2*contador)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f40c73f9d512cabfc6000d3b2f2aba4bcf4280dc
|
210ecd63113ce90c5f09bc2b09db3e80ff98117a
|
/AbletonLive9_RemoteScripts/Oxygen8v2/__init__.py
|
31d05f0dde8d5b0ee169a7f5bebf6abf5dce0303
|
[] |
no_license
|
ajasver/MidiScripts
|
86a765b8568657633305541c46ccc1fd1ea34501
|
f727a2e63c95a9c5e980a0738deb0049363ba536
|
refs/heads/master
| 2021-01-13T02:03:55.078132
| 2015-07-16T18:27:30
| 2015-07-16T18:27:30
| 38,516,112
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/Oxygen8v2/__init__.py
from _Generic.GenericScript import GenericScript
import Live
from config import *
def create_instance(c_instance):
""" The generic script can be customised by using parameters (see config.py). """
return GenericScript(c_instance, Live.MidiMap.MapMode.absolute, Live.MidiMap.MapMode.absolute, DEVICE_CONTROLS, TRANSPORT_CONTROLS, VOLUME_CONTROLS, TRACKARM_CONTROLS, BANK_CONTROLS, CONTROLLER_DESCRIPTION, MIXER_OPTIONS)
|
[
"admin@scoopler.com"
] |
admin@scoopler.com
|
8b97c1e14adfcb09806e2d37e2f5c4f0b356c009
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc088/c.py
|
6f50226c2b81dbb528686b2a04839a3d3dee1e8c
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
#
# abc088 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1 0 1
2 1 2
1 0 1"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 2 2
2 1 2
2 2 2"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """0 8 8
0 8 8
0 8 8"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """1 8 6
2 9 7
0 7 7"""
output = """No"""
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
# unittest.main()
resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
75ff5701bb8130bd4c6b3ed2171d396f007ef5bc
|
e8cf96d2c1cef71c0bcbe200a0a65dee59d21d65
|
/molotov/quickstart/loadtest.py
|
4df0a04ebc981811bb4bc1b6b11fb733a40ba6bf
|
[
"Apache-2.0"
] |
permissive
|
ronnix/molotov
|
11ed38e699bf58ce469b7d96a617ffb98ab7f6f6
|
b2684ec13edb6d0ff901398467b16c885c5ff502
|
refs/heads/master
| 2020-06-20T05:41:36.070912
| 2017-06-09T10:02:01
| 2017-06-09T10:02:01
| 94,195,118
| 0
| 0
| null | 2017-06-13T09:25:58
| 2017-06-13T09:25:58
| null |
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
""" Molotov-based test.
"""
import json
from molotov import scenario, setup, global_setup, teardown, global_teardown
# This is the service you want to load test
_API = 'http://localhost:8080'
@global_setup()
def test_starts(args):
""" This functions is called before anything starts.
Notice that it's not a coroutine.
"""
pass
@setup()
async def worker_starts(worker_id, args):
""" This function is called once per worker.
If it returns a mapping, it will be used with all requests.
You can add things like Authorization headers for instance,
by setting a "headers" key.
"""
headers = {'SomeHeader': '1'}
return {'headers': headers}
@teardown()
def worker_ends(worker_id):
""" This functions is called when the worker is done.
Notice that it's not a coroutine.
"""
pass
@global_teardown()
def test_ends():
""" This functions is called when everything is done.
Notice that it's not a coroutine.
"""
pass
# each scenario has a weight. Molotov uses it to determine
# how often the scenario is picked.
@scenario(40)
async def scenario_one(session):
async with session.get(_API) as resp:
# if Molotov is called with --statsd
# you will have a statsd client set into the session
# you can use to add metrics
if session.statsd:
session.statsd.incr('BLEH')
# when you read the body, don't forget to use await
res = await resp.json()
assert res['result'] == 'OK'
assert resp.status == 200
# all scenarii are coroutines
@scenario(30)
async def scenario_two(session):
# a call to one of the session method should be awaited
# see aiohttp.Client docs for more info on this
async with session.get(_API) as resp:
assert resp.status == 200
@scenario(30)
async def scenario_three(session):
somedata = json.dumps({'OK': 1})
async with session.post(_API, data=somedata) as resp:
assert resp.status == 200
|
[
"tarek@ziade.org"
] |
tarek@ziade.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.