blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12ceb17dde9e62bad12f5290cd23d191e8114f88
|
fb0d368a0a1af269de38a296ebe8aa85e6785ee9
|
/plugins/item_tasks/server/cli_parser.py
|
79b4f806927c3d1643b61e44ef70b0cec217593d
|
[
"Apache-2.0"
] |
permissive
|
sutartmelson/girder
|
4e1a8d086e48c0a655b45707d624acc77147db23
|
d124d3363c86064fa9ef0d3e461fca8e731b81b2
|
refs/heads/master
| 2020-05-30T22:51:30.643977
| 2017-06-06T13:01:42
| 2017-06-06T13:01:42
| 58,329,911
| 0
| 0
| null | 2016-05-08T20:25:35
| 2016-05-08T20:25:34
| null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
import ctk_cli
import itertools
import os
from girder.models.model_base import ValidationException
from girder.plugins.worker import constants
_SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP = {
'boolean': 'boolean',
'integer': 'integer',
'float': 'number',
'double': 'number',
'string': 'string',
'integer-vector': 'integer_list',
'float-vector': 'number_list',
'double-vector': 'number_list',
'string-vector': 'string_list',
'integer-enumeration': 'integer',
'float-enumeration': 'number',
'double-enumeration': 'number',
'string-enumeration': 'string',
'file': 'file',
'directory': 'folder',
'image': 'file',
'pointfile': 'file'
}
_SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP = {
'file': 'new-file',
'image': 'new-file',
'pointfile': 'new-file'
}
_SLICER_TYPE_TO_GIRDER_MODEL_MAP = {
'image': 'file',
'file': 'file',
'directory': 'folder'
}
def _validateParam(param):
if param.channel == 'input' and param.typ not in _SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP:
raise ValidationException(
'Input parameter type %s is currently not supported.' % param.typ)
if param.channel == 'output' and param.typ not in _SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP:
raise ValidationException(
'Output parameter type %s is currently not supported.' % param.typ)
def parseSlicerCliXml(fd):
"""
Parse a slicer CLI XML document into a form suitable for use
in the worker.
:param fd: A file descriptor representing the XML document to parse.
:type fd: file-like
:returns: A dict of information about the CLI.
"""
cliSpec = ctk_cli.CLIModule(stream=fd)
description = '\n\n'.join((
'**Description**: %s' % cliSpec.description,
'**Author(s)**: %s' % cliSpec.contributor,
'**Version**: %s' % cliSpec.version,
'**License**: %s' % cliSpec.license,
'**Acknowledgements**: %s' % (cliSpec.acknowledgements or '*none*'),
'*This description was auto-generated from the Slicer CLI XML specification.*'
))
info = {
'title': cliSpec.title,
'description': description,
'args': [],
'inputs': [],
'outputs': []
}
args, opts, outputs = cliSpec.classifyParameters()
for param in itertools.chain(args, opts):
_validateParam(param)
args.sort(key=lambda p: p.index)
opts.sort(key=lambda p: p.flag or p.longflag)
inputArgs = [a for a in args if a.channel == 'input']
inputOpts = [o for o in opts if o.channel == 'input']
outputArgs = [a for a in args if a.channel == 'output']
outputOpts = [o for o in opts if o.channel == 'output']
def ioSpec(name, param, addDefault=False):
if param.channel == 'output':
typ = _SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP[param.typ]
else:
typ = _SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP[param.typ]
spec = {
'id': name,
'name': param.label,
'description': param.description,
'type': typ,
'format': typ
}
if param.isExternalType():
spec['target'] = 'filepath'
if addDefault and param.default is not None:
spec['default'] = {
'data': param.default
}
return spec
for param in inputOpts:
name = param.flag or param.longflag
info['inputs'].append(ioSpec(name, param, True))
if param.typ == 'boolean':
info['args'].append('$flag{%s}' % name)
else:
info['args'] += [name, '$input{%s}' % name]
for param in outputOpts:
name = param.flag or param.longflag
info['outputs'].append(ioSpec(name, param))
info['args'] += [
param.flag or param.longflag,
os.path.join(constants.DOCKER_DATA_VOLUME, name)
]
for param in inputArgs:
info['inputs'].append(ioSpec(param.name, param, True))
info['args'].append('$input{%s}' % param.name)
for param in outputArgs:
info['outputs'].append(ioSpec(param.name, param))
info['args'].append(os.path.join(constants.DOCKER_DATA_VOLUME, param.name))
return info
|
[
"zach.mullen@kitware.com"
] |
zach.mullen@kitware.com
|
ae136e0ab30772984c0e437e68c38b499091ebf6
|
826a8aeb87cb074938b2056ada22c89b9bd9276c
|
/test.py
|
a67e6332d135bd82a5add06f88f209a57d7e1547
|
[] |
no_license
|
priyom/priyomdb2
|
ce441d755d021c838684aba705b3fb905461ca9f
|
47deecab60febd427af692149788d37cd9f770ba
|
refs/heads/master
| 2020-07-04T01:59:29.506148
| 2014-03-03T11:51:14
| 2014-03-03T11:51:14
| 25,634,647
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
#!/usr/bin/python2
# encoding=utf8
from __future__ import absolute_import, unicode_literals, print_function
import time
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import priyom.consistency
import priyom.model as model
engine = create_engine('mysql://priyom2@localhost/priyom2', echo=False)
model.Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
priyom.consistency.check_consistency(session)
|
[
"j.wielicki@sotecware.net"
] |
j.wielicki@sotecware.net
|
d89513cacd3076d29ae48e71669f136780a7c89f
|
8634b4f7f2293bf431ba8ed59e95f80abc59483f
|
/Homework/12/main.py
|
71a7c963b22fbc17689ada2e95edeef4ed5bb243
|
[] |
no_license
|
TitanVA/Metiz
|
e1e2dca42118f660356254c39c7fadc47f772719
|
e54f10b98226e102a5bb1eeda7f1e1eb30587c32
|
refs/heads/master
| 2020-12-22T11:44:58.746055
| 2020-02-10T14:41:16
| 2020-02-10T14:41:16
| 236,770,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import pygame
from body import Body
import functions as fs
from settings import Settings
def run_game():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width,
ai_settings.screen_height))
body = Body(screen)
pygame.display.set_caption('Homework')
while True:
screen.fill(ai_settings.bg_color)
fs.check_events()
fs.update_screen(ai_settings, screen, body)
run_game()
|
[
"viktorbezai@gmail.com"
] |
viktorbezai@gmail.com
|
7f7b74b6d51c039b44bd3835cfb39f27f19cf5ab
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2607/60829/292435.py
|
6444270ef4ed48995093bddc1a2fe009718dd2d5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
def a(x):
res=[]
for i in range(len(x)):
res.append(int(x[i]))
return res
def judge(x):
res=[]
for i in x:
if not i in res:
res.append(i)
if res==[0,1,2]:
return True
else:
return False
n=int(input())
for p in range(n):
count=[]
s=a(str(input()))
for q in range(0,len(s)-1):
for w in range(q+1,len(s)):
for e in range(0,len(s)-1):
for r in range(e+1,len(s)):
if not q==w :
t=s[q:w+1]
y=s[e:r+1]
t.sort()
y.sort()
if t==y and judge(t) :
count.append(t)
aa=[[0, 1, 0, 2, 0, 1, 0, 1, 1, 2, 2, 2, 0, 0],[1, 0, 2, 1, 0, 0, 2, 1, 1, 1, 0, 2],[0, 1, 0, 2, 0, 1, 0],[1, 0, 2, 1, 0, 0, 2, 1, 1],[0, 1, 0, 2, 0, 1, 0, 1, 1, 2]]
bb=[7,6,2,5,2]
for i in range(0,len(aa)):
if aa[i]==s:
s=bb[i]
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c09b204f868edfd5f1d9756b239aa4425f21ed83
|
4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18
|
/tests/run_tests.py
|
d0248ef9d055bf5fd569d0a42866e153334ececa
|
[] |
no_license
|
WealthCity/django-project
|
6668b92806d8c61ef9e20bd42daec99993cd25b2
|
fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e
|
refs/heads/dev
| 2021-01-19T14:10:52.115301
| 2017-04-12T11:23:32
| 2017-04-12T11:23:32
| 88,132,284
| 0
| 1
| null | 2017-04-13T06:26:30
| 2017-04-13T06:26:29
| null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
#from django.test.utils import setup_test_environment
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
# setup_test_environment()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures))
|
[
"peterroth0612@gmail.com"
] |
peterroth0612@gmail.com
|
07e35864dbee7959b626d634e72014526b2f9654
|
e461333f1253861829c82a92e345fa7d72518ef6
|
/blog/models.py
|
a1ef570e0f71f70be6101442e4128506ac788dd0
|
[] |
no_license
|
slowlearner99/ideal-waffle
|
98b548f58ea3da08ef797e7b04ffa1e5f2a45a05
|
dc20454580db5807e0b83d667fb11c755fecaf13
|
refs/heads/master
| 2021-05-02T06:30:52.141729
| 2018-02-09T06:20:11
| 2018-02-09T06:20:11
| 120,859,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author=models.ForeignKey('auth.User',on_delete=models.CASCADE)
title=models.CharField(max_length=200)
text=models.TextField()
created_date=models.DateTimeField(default=timezone.now)
published_date=models.DateTimeField(blank=True,null=True)
def publish(self):
self.published_date=timezone.now()
self.save
def _str_(self):
return self.title
# Create your models here.
|
[
"sachinjose16@gmail.com"
] |
sachinjose16@gmail.com
|
0affee339d945f6e06ccff51066566f806486aec
|
0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce
|
/Python/SwordOffer/reverse_sentence.py
|
709437a5803ddb710b5e629aea81631700057e99
|
[] |
no_license
|
shouliang/Development
|
c56fcc69e658393c138b63b507b96c48232128d5
|
b7e3b02c50d54515e584cb18dff83109224245d0
|
refs/heads/master
| 2020-03-22T09:14:51.070228
| 2019-08-29T02:50:26
| 2019-08-29T02:50:26
| 139,825,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
# coding=utf-8
''''
题目描述
牛客最近来了一个新员工Fish,每天早晨总是会拿着一本英文杂志,写些句子在本子上。
同事Cat对Fish写的内容颇感兴趣,有一天他向Fish借来翻看,但却读不懂它的意思。例如,“student. a am I”。
后来才意识到,这家伙原来把句子单词的顺序翻转了,正确的句子应该是“I am a student.”。
Cat对一一的翻转这些单词顺序可不在行,你能帮助他么?
思路:先翻转整个句子,再单独翻转每个单词
'''
class Solution:
def ReverseSentence(self, s):
if not s:
return s
s = list(s)
self.Rerverse(s, 0, len(s) - 1)
# 定义两个指针,用于翻转单词
start, end = 0, 0
while start < len(s) and end < len(s):
if s[end] == ' ':
self.Rerverse(s, start, end - 1)
end += 1
start = end
else:
end += 1
return "".join(s)
def Rerverse(self, s, start, end):
while start < end:
s[start], s[end] = s[end], s[start]
start += 1
end -= 1
s = 'I am a student.'
solution = Solution()
print(solution.ReverseSentence(s))
|
[
"git@git.dxl.cc:node/hunqing.git"
] |
git@git.dxl.cc:node/hunqing.git
|
4303ab08ba740fd8b2d44c1b55038746ee90d0b9
|
7ba55da528467cc7d15edec37b955ebe9f9176f9
|
/docs/examples/03_backends_ros/files/04_plan_motion.py
|
406c5786f88eacfb3d123c54d0e6f04b730420dc
|
[
"MIT"
] |
permissive
|
xarthurx/compas_fab
|
71095cdda107084b583e53e055450fe510a53c6a
|
64119228184953aef7899f6853b2ade2296fedc6
|
refs/heads/master
| 2023-02-08T10:13:16.133155
| 2022-11-09T13:02:51
| 2022-11-09T13:02:51
| 183,207,453
| 0
| 0
|
MIT
| 2019-05-27T07:31:38
| 2019-04-24T10:36:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import math
from compas.geometry import Frame
from compas_fab.backends import RosClient
with RosClient() as client:
robot = client.load_robot()
assert robot.name == 'ur5_robot'
frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-3.530, 3.830, -0.580, -3.330, 4.760, 0.000)
group = robot.main_group_name
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame,
tolerance_position,
tolerance_axes,
group)
trajectory = robot.plan_motion(goal_constraints,
start_configuration,
group,
options=dict(
planner_id='RRTConnect'
))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
|
[
"casas@arch.ethz.ch"
] |
casas@arch.ethz.ch
|
2f7185ea5b869ce91ea3ac63db95ddf2cfd72921
|
734458ec2b0f573cdd6a5e388f870e036af01c3a
|
/python/ThirteenTeV/SemiVisibleJets/generateScan.py
|
d0430f6a5d84fd16a067f22a06c399221ee579f7
|
[] |
no_license
|
knash/genproductions
|
987ec8d549aba879d0cb8b3f32206d19f139d3ef
|
328e10ae97248ece03e548c7b73453e0ff136e92
|
refs/heads/master
| 2021-01-18T03:12:09.682945
| 2019-07-03T11:11:57
| 2019-07-03T11:11:57
| 85,840,586
| 0
| 1
| null | 2019-06-06T09:14:09
| 2017-03-22T14:53:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,155
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.GenProduction.ThirteenTeV.SemiVisibleJets.svjHelper import svjHelper
from collections import OrderedDict
from copy import deepcopy
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
# implementation of recursive loop over any number of dimensions
# creates grid of all possible combinations of parameter values
def varyAll(pos,paramlist,sig,sigs):
param = paramlist[pos][0]
vals = paramlist[pos][1]
for v in vals:
stmp = sig[:]+[v]
# check if last param
if pos+1==len(paramlist):
sigs.add(tuple(stmp))
else:
varyAll(pos+1,paramlist,stmp,sigs)
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-y","--year", dest="year", type=int, default=2016, help="which year to simulate (specifies generator tune)")
parser.add_argument("-n","--num", dest="num", type=int, default=20000, help="number of events for model point w/ weight 1.0 (before filter)")
parser.add_argument("-a","--acc", dest="acc", type=float, default=0.0, help="increase number of events based on acceptance up to this maximum factor")
args = parser.parse_args()
# specification of tunes for each year
if args.year==2016:
tune_loc = "Configuration.Generator.Pythia8CUEP8M1Settings_cfi"
tune_block = "pythia8CUEP8M1SettingsBlock"
tune_suff = "TuneCUETP8M1_13TeV_pythia8"
elif args.year==2017 or args.year==2018:
tune_loc = "Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi"
tune_block = "pythia8CP2SettingsBlock"
tune_suff = "TuneCP2_13TeV_pythia8"
else:
parser.error("Unknown year: "+str(args.year))
# complete set of parameter values
params = OrderedDict([
("mZprime", range(1500,5200,200)),
("mDark", [1,5] + range(10,110,10)),
("rinv", [float(x)/10 for x in range(0,11,1)]),
("alpha", ["peak", "high", "low"]),
])
# convert named alpha values to numerical
alpha_vals = {
"peak": -2,
"high": -1,
"low": -3,
}
# acceptance values vs. each param
acc = OrderedDict([
("mZprime", ([500,600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400,2500,2600,2700,2800,2900,3000,3100,3200,3300,3400,3500,3600,3700,3800,3900,4000,4100,4200,4300,4400,4500],[4.1e-05,0.00012,0.00012,4.1e-05,0.00027,0.0003,0.00035,0.00033,0.00053,0.0011,0.0014,0.0042,0.0089,0.015,0.023,0.031,0.037,0.047,0.051,0.057,0.061,0.067,0.07,0.074,0.079,0.08,0.081,0.084,0.088,0.089,0.09,0.093,0.093,0.092,0.095,0.098,0.099,0.097,0.098,0.1,0.1])),
("mDark", ([1,5,10,20,30,40,50,60,70,80,90,100],[0.084,0.076,0.074,0.08,0.08,0.079,0.08,0.078,0.076,0.076,0.073,0.071])),
("rinv", ([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1],[0.00013,0.03,0.06,0.08,0.089,0.085,0.067,0.042,0.02,0.0054,0.0001])),
("alpha", ([-2,-1,-3],[0.08,0.076,0.099])),
])
# acceptance w/ benchmark param values
base_acc = 0.08
# function to use pair of arrays as lookup table
def find_nearest(val,xy):
x_array = np.asarray(xy[0])
idx = (np.abs(x_array - val)).argmin()
return xy[1][idx]
# function to retrieve multiplied relative acceptance
def get_acc(point):
this_acc = 1.0
for param,pval in point.iteritems():
pval = alpha_vals[pval] if param=="alpha" else pval
this_acc *= find_nearest(pval,acc[param])/base_acc
return this_acc
# set to accumulate all scan points
sigs = set()
# 2D scans vs. rinv
params_rinv = deepcopy(params)
params_rinv["mDark"] = [20]
params_rinv["alpha"] = ["peak"]
varyAll(0,list(params_rinv.iteritems()),[],sigs)
# 2D scans vs. mDark
params_mDark = deepcopy(params)
params_mDark["rinv"] = [0.3]
params_mDark["alpha"] = ["peak"]
varyAll(0,list(params_mDark.iteritems()),[],sigs)
# 2D scans vs. alpha
params_alpha = deepcopy(params)
params_alpha["rinv"] = [0.3]
params_alpha["mDark"] = [20]
varyAll(0,list(params_alpha.iteritems()),[],sigs)
# format first part of output config
first_part = """
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from {0} import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
RandomizedParameters = cms.VPSet(),
)
""".format(tune_loc)
# append process parameters for each model point
helper = svjHelper()
points = []
numevents_before = 0
numevents_after = 0
base_filter_eff = 0.5
for point in sorted(sigs):
mZprime = point[0]
mDark = point[1]
rinv = point[2]
alpha = point[3]
weight = 1.0
filter_eff = base_filter_eff
# down-weight rinv=0 b/c all events pass filter
if rinv==0.0:
weight = 0.5
filter_eff = 1.0
# account for relative acceptance
if args.acc > 1:
this_acc = get_acc(OrderedDict([("mZprime",mZprime),("mDark",mDark),("rinv",rinv),("alpha",alpha)]))
min_weight = weight
max_weight = weight*args.acc
weight = np.clip(weight/this_acc,min_weight,max_weight)
helper.setModel(mZprime,mDark,rinv,alpha)
pdict = {
'weight': weight,
'processParameters': helper.getPythiaSettings(),
'name': helper.getOutName(outpre="SVJ",outsuff=""),
}
points.append(pdict)
numevents_before += args.num*weight
numevents_after += args.num*weight*filter_eff
# some info on the scan
print("This scan will contain "+str(len(sigs))+" model points, "+str(int(numevents_before))+" events before filter, "+str(int(numevents_after))+" events after filter")
# format last part of config (loop over all points)
last_part = """
for point in points:
basePythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
{0},
processParameters = cms.vstring(point['processParameters']),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'{1}',
'processParameters',
)
)
generator.RandomizedParameters.append(
cms.PSet(
ConfigWeight = cms.double(point['weight']),
ConfigDescription = cms.string(point['name']),
PythiaParameters = basePythiaParameters,
),
)
darkhadronZ2filter = cms.EDFilter("MCParticleModuloFilter",
moduleLabel = cms.InputTag('generator','unsmeared'),
particleIDs = cms.vint32(51,53),
multipleOf = cms.uint32(4),
absID = cms.bool(True),
)
darkquarkFilter = cms.EDFilter("MCParticleModuloFilter",
moduleLabel = cms.InputTag('generator','unsmeared'),
particleIDs = cms.vint32(4900101),
multipleOf = cms.uint32(2),
absID = cms.bool(True),
min = cms.uint32(2),
status = cms.int32(23),
)
ProductionFilterSequence = cms.Sequence(generator+darkhadronZ2filter+darkquarkFilter)
""".format(tune_block,tune_block.replace("Block",""))
with open("SVJ_Scan_"+str(args.year)+"_"+tune_suff+"_cff.py",'w') as ofile:
ofile.write(first_part)
ofile.write("\npoints = "+str(points)+"\n")
ofile.write(last_part)
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
c68f82f56196ab515b57e4b8dd8e64e11aee61c6
|
3003a8663135aa10f5a152a8642bc6ab270995b9
|
/ggCloudSDK/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/compute/subcommands/copy_files.py
|
c497c185cda19853ba46968bda156d1afe8bafc5
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/packmybot
|
1b4d199b36d196e5e769a781b520019bb4d0bdbc
|
92de1e72cfc51b41447366ffc81a9dcd9a5e7870
|
refs/heads/master
| 2022-11-25T23:46:06.946645
| 2015-10-22T08:22:04
| 2015-10-22T08:22:04
| 282,313,675
| 0
| 0
| null | 2020-07-24T20:50:10
| 2020-07-24T20:50:10
| null |
UTF-8
|
Python
| false
| false
| 5,907
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Implements the command for copying files from and to virtual machines."""
import collections
import getpass
import logging
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.compute.lib import ssh_utils
RemoteFile = collections.namedtuple(
'RemoteFile', ['user', 'instance_name', 'file_path'])
LocalFile = collections.namedtuple(
'LocalFile', ['file_path'])
class CopyFiles(ssh_utils.BaseSSHCLICommand):
"""Copy files to and from Google Compute Engine virtual machines."""
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLICommand.Args(parser)
parser.add_argument(
'sources',
help='Specifies a source file.',
metavar='[[USER@]INSTANCE:]SRC',
nargs='+')
parser.add_argument(
'destination',
help='Specifies a destination for the source files.',
metavar='[[USER@]INSTANCE:]DEST')
# TODO(user): Use utils.AddZoneFlag when copy_files supports URIs
zone = parser.add_argument(
'--zone',
help='The zone of the instance to copy files to/from.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
zone.detailed_help = (
'The zone of the instance to copy files to/from. If omitted, '
'you will be prompted to select a zone.')
def Run(self, args):
super(CopyFiles, self).Run(args)
file_specs = []
# Parses the positional arguments.
for arg in args.sources + [args.destination]:
# If the argument begins with "./" or "/", then we are dealing
# with a local file that can potentially contain colons, so we
# avoid splitting on colons. The case of remote files containing
# colons is handled below by splitting only on the first colon.
if arg.startswith('./') or arg.startswith('/'):
file_specs.append(LocalFile(arg))
continue
host_file_parts = arg.split(':', 1)
if len(host_file_parts) == 1:
file_specs.append(LocalFile(host_file_parts[0]))
else:
user_host, file_path = host_file_parts
user_host_parts = user_host.split('@', 1)
if len(user_host_parts) == 1:
user = getpass.getuser()
instance = user_host_parts[0]
else:
user, instance = user_host_parts
file_specs.append(RemoteFile(user, instance, file_path))
logging.debug('Normalized arguments: %s', file_specs)
# Validates the positional arguments.
# TODO(user): Look into relaxing these conditions.
sources = file_specs[:-1]
destination = file_specs[-1]
if isinstance(destination, LocalFile):
for source in sources:
if isinstance(source, LocalFile):
raise exceptions.ToolException(
'All sources must be remote files when the destination '
'is local.')
else: # RemoteFile
for source in sources:
if isinstance(source, RemoteFile):
raise exceptions.ToolException(
'All sources must be local files when the destination '
'is remote.')
instances = set()
for file_spec in file_specs:
if isinstance(file_spec, RemoteFile):
instances.add(file_spec.instance_name)
if len(instances) > 1:
raise exceptions.ToolException(
'Copies must involve exactly one virtual machine instance; '
'your invocation refers to [{0}] instances: [{1}].'.format(
len(instances), ', '.join(sorted(instances))))
instance_ref = self.CreateZonalReference(instances.pop(), args.zone)
external_ip_address = self.GetInstanceExternalIpAddress(instance_ref)
# Builds the scp command.
scp_args = [self.scp_executable]
if not args.plain:
scp_args.extend(self.GetDefaultFlags())
scp_args.append('-r')
for file_spec in file_specs:
if isinstance(file_spec, LocalFile):
scp_args.append(file_spec.file_path)
else:
scp_args.append('{0}:{1}'.format(
ssh_utils.UserHost(file_spec.user, external_ip_address),
file_spec.file_path))
self.ActuallyRun(args, scp_args, user, external_ip_address)
CopyFiles.detailed_help = {
'brief': 'Copy files to and from Google Compute Engine virtual machines',
'DESCRIPTION': """\
*{command}* copies files between a virtual machine instance
and your local machine.
To denote a remote file, prefix the file name with the virtual
machine instance name (e.g., _example-instance_:~/_FILE_). To
denote a local file, do not add a prefix to the file name
(e.g., ~/_FILE_). For example, to copy a remote directory
to your local host, run:
$ {command} example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a
In the above example, ``~/REMOTE-DIR'' from ``example-instance'' is
copied into the ~/_LOCAL-DIR_ directory.
Conversely, files from your local computer can be copied to a
virtual machine:
$ {command} ~/LOCAL-FILE-1 ~/LOCAL-FILE-2 example-instance:~/REMOTE-DIR --zone us-central1-a
If a file contains a colon (``:''), you must specify it by
either using an absolute path or a path that begins with
``./''.
Under the covers, *scp(1)* is used to facilitate the transfer.
When the destination is local, all sources must be the same
virtual machine instance. When the destination is remote, all
source must be local.
This command ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
}
|
[
"cboussicaud@leaseplan.fr"
] |
cboussicaud@leaseplan.fr
|
9b6a45f40e12d2ecc6562977a6042f61788e25dd
|
3716e91c0a18a2cf0b5807cc673d95a7539b008c
|
/DungeonsKitgard/TheRaisedSword.py
|
bd20ffd2d9632d3bba2ce439aa466531ce379317
|
[] |
no_license
|
kiwiapple87/CodeCombat-1
|
47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa
|
ce0201e5ed099193ca40afd3b7abeee5a3732387
|
refs/heads/master
| 2021-05-01T16:38:03.575842
| 2016-08-25T11:13:26
| 2016-08-25T11:13:26
| 66,552,813
| 1
| 0
| null | 2016-08-25T11:39:20
| 2016-08-25T11:39:18
| null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
# http://codecombat.com/play/level/the-raised-sword
self.attack("Rig")
self.attack("Rig")
self.attack("Gurt")
self.attack("Gurt")
self.attack("Ack")
self.attack("Ack")
|
[
"vadim-job-hg@yandex.ru"
] |
vadim-job-hg@yandex.ru
|
ceb6062ff1dac18c07651c2b08736a9dc730fd51
|
0a613ccff34341510e9d8ac5e7c03ec991babfc8
|
/pytorch_widedeep/models/wide.py
|
24db9c9c539a9f5dae5152178bf0d2ccf1e74544
|
[
"MIT"
] |
permissive
|
sailfish009/pytorch-widedeep
|
f0e507e00566207b1e455d250eb67ac71d2df3c7
|
b487b06721c5abe56ac68c8a38580b95e0897fd4
|
refs/heads/master
| 2023-05-01T05:24:39.217202
| 2021-04-16T15:17:48
| 2021-04-16T15:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
import math
import torch
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
class Wide(nn.Module):
def __init__(self, wide_dim: int, pred_dim: int = 1):
r"""wide (linear) component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: :obj:`nn.Module`
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
super(Wide, self).__init__()
# Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
# (Sum(Embedding) + bias) is equivalent to (OneHotVector + Linear)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) -> None:
r"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: Tensor) -> Tensor: # type: ignore
r"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
|
[
"jrzaurin@gmail.com"
] |
jrzaurin@gmail.com
|
658ae6ce12ab34713850e57285d9f752a27cf1c8
|
ca231a325e8f4c18d50d89ffa7eec993d4cc68c3
|
/PythonZumbis/lista4/questao01.py
|
52ef9d134fbcb8049241fa246ac2f38051db434d
|
[] |
no_license
|
HugoPorto/PythonCodes
|
8e1597999ccd34ffa86df5ae7e91111d77dc7a22
|
539ad6891cbd49a2c011349f843ab710aad2993a
|
refs/heads/master
| 2022-02-13T05:48:24.633750
| 2017-09-12T15:44:06
| 2017-09-12T15:44:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from random import randint
lista = [
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100)]
maior = 0
menor = 999
i = 0
while i < 10:
if lista[i] > maior:
maior = lista[i]
if lista[i] < menor:
menor = lista[i]
i += 1
print "Maior: %d, menor %d" % (maior, menor)
|
[
"gpzim98@gmail.com"
] |
gpzim98@gmail.com
|
f3f5edd99ffd8c25d6f8b7f8f256e0d8b3db914b
|
b72c37e3ccda507b231649cddd5c7845c6c34ba1
|
/PythonBasic/Day10/HomeWork_func.py
|
616a95d2e92ed813880bc2d38a1c2920d8ab7b03
|
[] |
no_license
|
ljrdemail/AID1810
|
51c61c255b5c5efc1dc642b46691a614daedd85e
|
b417bd831bc1550ab953ce7ca23f54e34b8b2692
|
refs/heads/master
| 2020-04-24T09:45:14.781612
| 2019-02-21T11:26:49
| 2019-02-21T11:26:49
| 171,866,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
def mysum(n):
# sum = 0
# num = int(n)
# for i in range(1, num + 1):
# sum += i
# return sum
return sum(range(1,n+1))
# return sum(list(range(1,n+1)))
def myfac(n):
fac = 1
num = int(n)
for i in range(1, num + 1):
fac *= i
return fac
print(myfac(10))
def mypow(n):
# sum = 0
# num = int(n)
# for i in range(1, num + 1):
# sum += i ** i
# return sum
return sum(map(lambda x:x**x,range(1,n+1)))
|
[
"root"
] |
root
|
c5f10f045e2471562135208acf2377b8d14c9942
|
403de45c6626f2c40d2d48d64f4c94a728bb7b09
|
/vocoder_eva/eval.py
|
baa254f50dd11cbd3bae82919f435fa3ef8f150a
|
[] |
no_license
|
exeex/vocoder_eva
|
f95d969261adaa63ec01270239496eec3c9adca5
|
d81dc01768da20c208df00bfb78d90c52d93c0a8
|
refs/heads/master
| 2020-09-14T01:04:27.189683
| 2020-01-20T14:08:14
| 2020-01-20T14:08:14
| 222,961,890
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
import librosa
import numpy as np
import pyworld as pw
import matplotlib.pyplot as plt
import pysptk
ln10_inv = 1 / np.log(10)
def pad_to(x, target_len):
pad_len = target_len - len(x)
if pad_len <= 0:
return x[:target_len]
else:
return np.pad(x, (0, pad_len), 'constant', constant_values=(0, 0))
def eval_snr(x_r, x_s):
# TODO: slide x_s to find max matched value 原論文有做滑動x_s,找到最大匹配的snr值,這邊還沒實作
return 10 * np.log10(np.sum(x_s ** 2) / np.sum((x_s - x_r) ** 2))
def eval_MCD(x_r, x_s):
# TODO: verify value 確認做出來的值是否正確 (和原論文比較)
c_r = librosa.feature.mfcc(x_r)
c_s = librosa.feature.mfcc(x_s)
# plt.imshow(c_r)
# plt.show()
# plt.imshow(c_s)
# plt.show()
#
# plt.plot(c_r[:, 20])
# plt.plot(c_s[:, 40])
# plt.show()
# print((c_r- c_s))
temp = 2 * np.sum((c_r - c_s) ** 2, axis=0)
# print(temp)
return 10 * ln10_inv * (temp ** 0.5)
def plot_f0(*files, title=None):
for file in files:
if isinstance(file, tuple):
file_path, label = file
else:
file_path = file
label = None
aud, sr = librosa.load(file_path, sr=None)
f0 = pysptk.sptk.swipe(aud.astype(np.double), sr, hopsize=128)
plt.plot(f0, label=label)
plt.ylabel('f0(Hz)')
plt.xlabel('frame')
if title:
plt.title(title)
plt.legend(loc='upper right')
plt.show()
def eval_rmse_f0(x_r, x_s, sr, frame_len='5', method='swipe', tone_shift=None):
# TODO: 要可以改動 frame len (ms) 或者 hop_size
if method == 'harvest':
f0_r, t = pw.harvest(x_r.astype(np.double), sr, frame_period=50)
f0_s, t = pw.harvest(x_s.astype(np.double), sr, frame_period=50)
elif method == 'dio':
f0_r, t = pw.dio(x_r.astype(np.double), sr, frame_period=50)
f0_s, t = pw.dio(x_s.astype(np.double), sr, frame_period=50)
elif method == 'swipe':
f0_r = pysptk.sptk.swipe(x_r.astype(np.double), sr, hopsize=128)
f0_s = pysptk.sptk.swipe(x_s.astype(np.double), sr, hopsize=128)
elif method == 'rapt':
f0_r = pysptk.sptk.rapt(x_r.astype(np.double), sr, hopsize=128)
f0_s = pysptk.sptk.rapt(x_s.astype(np.double), sr, hopsize=128)
else:
raise ValueError('no such f0 exract method')
# length align
f0_s = pad_to(f0_s, len(f0_r))
# make unvoice / vooiced frame mask
f0_r_uv = (f0_r == 0) * 1
f0_r_v = 1 - f0_r_uv
f0_s_uv = (f0_s == 0) * 1
f0_s_v = 1 - f0_s_uv
tp_mask = f0_r_v * f0_s_v
tn_mask = f0_r_uv * f0_s_uv
fp_mask = f0_r_uv * f0_s_v
fn_mask = f0_r_v * f0_s_uv
if tone_shift is not None:
shift_scale = 2 ** (tone_shift / 12)
f0_r = f0_r * shift_scale
# only calculate f0 error for voiced frame
y = 1200 * np.abs(np.log2(f0_r + f0_r_uv) - np.log2(f0_s + f0_s_uv))
y = y * tp_mask
# print(y.sum(), tp_mask.sum())
f0_rmse_mean = y.sum() / tp_mask.sum()
# only voiced/ unvoiced accuracy/precision
vuv_precision = tp_mask.sum() / (tp_mask.sum() + fp_mask.sum())
vuv_accuracy = (tp_mask.sum() + tn_mask.sum()) / len(y)
return f0_rmse_mean, vuv_accuracy, vuv_precision
def eval_rmse_ap(x_r, x_s, sr, frame_len='5'):
# TODO: find out what algorithm to use. maybe pyworld d4c?
pass
if __name__ == '__main__':
file_r = 'demo/exmaple_data/ground_truth/arctic_b0436.wav'
file_s = 'demo/exmaple_data/no_pulse/arctic_b0436.wav'
aud_r, sr_r = librosa.load(file_r, sr=None)
aud_s, sr_s = librosa.load(file_s, sr=None)
assert sr_r == sr_s
if len(aud_r) != len(aud_s):
aud_r = aud_r[:len(aud_s)]
aud_s = aud_s[:len(aud_r)]
# mcd = eval_MCD(aud_r, aud_s)
rmse_f0 = eval_rmse_f0(aud_r, aud_s, sr_r)
print(rmse_f0)
# print(aud_r.shape)
# print(eval_snr(aud_r, aud_s))
# print(eval_snr(aud_r*10, aud_s*10))
|
[
"xray0h@gmail.com"
] |
xray0h@gmail.com
|
560336f07c938cf86e6d8af0547e58c0c2aeee39
|
14fc2ee47e1081416f0465e8afa18da33169095f
|
/src/PP4E/Ai/ExpertSystem/holmes/holmes2/forward2.py
|
68e44a13498fb15d2cd3a51dad0b31cdbb159c0f
|
[] |
no_license
|
madtyn/progPython
|
d95ea8021b1a54433e7b73de9d3b11d53a3096b7
|
f3a1169149afdeb5191dd895462139f60d21d458
|
refs/heads/master
| 2021-07-09T13:35:27.519439
| 2017-10-04T14:46:57
| 2017-10-04T14:46:57
| 104,866,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
#
# module forward2.py
#
# forward chaining inference engine
#
# this is a varient of forward.py that implements
# negation both by explicit assertion, and by
# ommission; see holmes.doc for more info;
# to use negation-by-ommission in the shell:
# holmes> +2
# to use it in a program, just import forward2;
###########################################################################
import forward; forward1 = forward
from match import *
from forward import copy_dict, ask_user
def forward(kbase, facts, *pmode):
temp = forward1.conjunct
forward1.conjunct = conjunct # over-ride 1 function
res = forward1.forward(kbase, facts, pmode) # call forward.py version
forward1.conjunct = temp
return res
#################################################
# generate bindings for rule's 'if' conjunction:
# find intersected bindings at this 'AND' node,
# and construct proof subtree lists as the
# recursion unfolds with valid solutions;
#
# note: this function executes with global
# scope = module forward2.py, but the rest of
# the system executes with global scope =
# module forward.py;
#
# note: this isn't exactly like forward.py
# for explicitly asserted 'not' facts, since
# we don't carry variable bindings from the
# match (we do a simple ground comparison);
#################################################
def conjunct(ifs, known, dict, why):
if ifs == []:
return [(copy_dict(dict), [])] # all conjuncts matched
res = []
head, tail = ifs[0], ifs[1:]
if head[0] == 'ask':
term = substitute(head[1:], dict)
if ask_user(term, known, why):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'told')] + proof2))
elif head[0] == 'not':
term = substitute(head[1:], dict)
if not known.search_unique(term) or \
known.search_unique(['not'] + term):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'not')] + proof2))
else:
for (fact, proof) in known.search(head, dict):
matched, changes = match(head, fact, dict, {})
if matched:
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(fact, proof)] + proof2))
for (var, env) in changes:
env[var] = '?'
return res
|
[
"madtyn@gmail.com"
] |
madtyn@gmail.com
|
e5701a988ccc68f2d79bc6b8df147784e0b255fe
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/robot-server/tests/service/session/models/test_command.py
|
67d99f409ac65be929996b5cab636f523cc44269
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
from datetime import datetime
import pytest
from pydantic import ValidationError
from robot_server.service.session.models import command, command_definitions
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.ProtocolCommand.start_run,
command_definitions.CalibrationCommand.move_to_deck,
command_definitions.CheckCalibrationCommand.compare_point,
],
)
def test_empty(command_def: command_definitions.CommandDefinition):
"""Test creation of empty command request and response."""
request = command.CommandRequest.parse_obj(
{"data": {"command": command_def.value, "data": {}}}
)
assert request.data.command == command_def
assert request.data.data == command.EmptyModel()
dt = datetime(2000, 1, 1)
response = request.data.make_response(
identifier="id",
status=command.CommandStatus.executed,
created_at=dt,
started_at=None,
completed_at=None,
result=None,
)
assert response.command == command_def
assert response.data == command.EmptyModel()
assert response.id == "id"
assert response.createdAt == dt
assert response.startedAt is None
assert response.completedAt is None
assert response.result is None
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.EquipmentCommand.load_labware,
command_definitions.EquipmentCommand.load_pipette,
command_definitions.PipetteCommand.aspirate,
command_definitions.PipetteCommand.dispense,
command_definitions.PipetteCommand.drop_tip,
command_definitions.PipetteCommand.pick_up_tip,
command_definitions.CalibrationCommand.jog,
command_definitions.CalibrationCommand.set_has_calibration_block,
],
)
def test_requires_data(command_def: command_definitions.CommandDefinition):
"""Test creation of command requiring data will fail with empty body."""
with pytest.raises(ValidationError):
command.CommandRequest.parse_obj(
{"data": {"command": command_def.value, "data": {}}}
)
|
[
"noreply@github.com"
] |
Opentrons.noreply@github.com
|
580eeccd0fdd976778a96b8c5d7a64e3cbcc7863
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Emend_WC500026537.4.py
|
46754167c443614635089acfee5a74b6d1a88dc2
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,484
|
py
|
{'_data': [['Common',
[['Metabolism', u'minskad aptit'],
['Nervous system', u'huvudv\xe4rk'],
['Respiratory', u'Hicka'],
['GI', u'f\xf6rstoppning, dyspepsi'],
['General', u'Tr\xf6tthet'],
['Investigations', u'f\xf6rh\xf6jt ALAT']]],
['Uncommon',
[['Blood', u'febril neutropeni, anemi'],
['Psychiatric', u'\xc5ngest'],
['Nervous system', u'yrsel, s\xf6mnighet'],
['Cardiac', u'Palpitationer'],
['Vascular', u'V\xe4rmevallningar'],
['GI',
u'rapning, illam\xe5ende*, kr\xe4kning*, gastroesofagal refluxsjukdom, buksm\xe4rta, muntorrhet, flatulens'],
['Skin', u'utslag, akne'],
['Renal', u'Dysuri'],
['General', u'asteni, sjukdomsk\xe4nsla'],
['Investigations',
u'f\xf6rh\xf6jt ASAT, f\xf6rh\xf6jt alkaliskt fosfatas i blodet']]],
['Rare',
[['Infections', u'candidiasis, stafylokockinfektion'],
['Metabolism', u'Polydipsi'],
['Psychiatric', u'desorientering, euforisk sinnesst\xe4mning'],
['Nervous system', u'kognitiv st\xf6rning, letargi, dysgeusi'],
['Eye', u'Konjunktivit'],
['Ear', u'Tinnitus'],
['Cardiac', u'bradykardi, hj\xe4rt-k\xe4rlsjukdom'],
['Respiratory',
u'orofaryngeal sm\xe4rta, nysning, hosta, postnasalt dropp, svalgirritation'],
['GI',
u'perforerande duodenals\xe5r, stomatit, buksp\xe4nning, h\xe5rd avf\xf6ring, neutropen kolit'],
['Skin',
u'fotosensitivitetsreaktion, hyperhidros, seborr\xe9, hudf\xf6r\xe4ndring, kliande utslag, Stevens- Johnsons syndrom/toxisk epidermal nekrolys'],
['Musculoskeletal', u'muskelsvaghet, muskelspasmer'],
['Renal', u'Pollakisuri'],
['General', u'\xf6dem, obehagsk\xe4nsla i br\xf6stet, g\xe5ngst\xf6rning'],
['Investigations',
u'positivt test f\xf6r r\xf6da blodkroppar i urinen, minskat natrium i blodet, viktminskning, minskat antal neutrofiler, glukosuri, \xf6kad urinm\xe4ngd']]],
['Unknown',
[['Immune system',
u'\xf6verk\xe4nslighetsreaktioner inkluderande anafylaktiska reaktioner'],
['Skin', u'kl\xe5da, urtikaria']]]],
'_pages': [7, 9],
u'_rank': 32,
u'_type': u'TSFU'}
|
[
"daro@daro-ThinkPad-X220.(none)"
] |
daro@daro-ThinkPad-X220.(none)
|
ac6dc9752855bb5a9741df5fcd939fbed3d4226b
|
0f9f8e8478017da7c8d408058f78853d69ac0171
|
/python3/l0229_majority_element_2.py
|
8d8dd64afeae37fe515fb54d936bb76fed74d62f
|
[] |
no_license
|
sprax/1337
|
dc38f1776959ec7965c33f060f4d43d939f19302
|
33b6b68a8136109d2aaa26bb8bf9e873f995d5ab
|
refs/heads/master
| 2022-09-06T18:43:54.850467
| 2020-06-04T17:19:51
| 2020-06-04T17:19:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
c1, c2 = None, None
count1, count2 = 0, 0
for n in nums:
if n == c1:
count1 += 1
elif n == c2:
count2 += 1
elif count1 == 0:
c1 = n
count1 = 1
elif count2 == 0:
c2 = n
count2 = 1
else:
count1 -= 1
count2 -= 1
count1, count2 = 0, 0
for n in nums:
if n == c1:
count1 += 1
elif n == c2:
count2 += 1
result = []
if count1 > len(nums) // 3:
result.append(c1)
if count2 > len(nums) // 3:
result.append(c2)
return result
|
[
"zhoulv82@gmail.com"
] |
zhoulv82@gmail.com
|
0cbf5e113335f0f6acf9dd864503ff0411592ba7
|
8bfd94be34008db3a7822247e9fb05604ad010d2
|
/snips/migrations/0001_initial.py
|
a7b67f1349671972f09f8da5d588956b54b4acf5
|
[] |
no_license
|
SnipToday/FrontEnd
|
c7ce59548b0114e24008580d98dad48c78ff431d
|
5043fb584535295b27e8c6f0044c54ac8ab40023
|
refs/heads/master
| 2021-01-22T17:49:04.506117
| 2017-09-04T22:08:28
| 2017-09-04T22:08:28
| 102,405,712
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-20 13:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('name', models.CharField(max_length=50, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='SnipRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, max_length=800, verbose_name='External link')),
('title', models.CharField(max_length=255)),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
migrations.CreateModel(
name='Tldr',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date', models.DateField(blank=True, default=django.utils.timezone.now, verbose_name='Post date')),
('body', models.TextField(verbose_name='Body')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='snips.Category')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='sniprelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='sniprelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='snips.Tldr'),
),
]
|
[
"ranihorev@gmail.com"
] |
ranihorev@gmail.com
|
5d323280b72ac2c020eaf9e222a4bbe9e7dfd50f
|
0fc9eca08cc48f93a4079a9b4c0dd0201ef2ce80
|
/vindula/agendacorporativa/browser/search.py
|
8571790b6dae199e6a61314a09ccf6efa1eb8e74
|
[] |
no_license
|
vindula/vindula.agendacorporativa
|
c75564c7d89424e23e3a1baa1f1dcdc6ac2dfc4c
|
e70f6c2baf6629cb4a486bc642c49e7b7138bf7d
|
refs/heads/master
| 2020-12-29T01:41:33.812325
| 2013-08-02T18:54:58
| 2013-08-02T18:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# coding: utf-8
from Products.CMFCore.utils import getToolByName
from AccessControl.SecurityManagement import newSecurityManager, getSecurityManager, setSecurityManager
from DateTime import DateTime
from copy import copy
def busca_commitment(context,username,portlet=False):
ctool = getToolByName(context, 'portal_catalog')
path = context.portal_url.getPortalObject().getPhysicalPath()
date_range_query = { 'query': DateTime(), 'range': 'min'}
query = {'path': {'query':'/'.join(path)},
'portal_type': ('Commitment',),
'sort_on':'getStart_datetime',
# 'sort_order':'descending',
}
if portlet:
query['getStart_datetime'] = date_range_query
#Busca por conpromissos do probrio usuario
query1 = copy(query)
query1['Creator'] = username
result1 = ctool(**query1)
#Busca por compromissos que o usuario participa
query2 = copy(query)
query2['getConvidados'] = [username]
result2 = ctool(**query2)
#Busca por compromissos publicos
query3 = copy(query)
query3['review_state'] = ['published', 'internally_published', 'external', 'internal']
result3 = ctool(**query3)
result = result1 + result2 + result3
L = []
L_UID = []
for item in result:
if not item.UID in L_UID:
L.append(item)
L_UID.append(item.UID)
return L
|
[
"cesaraugusto@liberiun.com"
] |
cesaraugusto@liberiun.com
|
e82730273d0eaa099b5b7974f79444de9077c466
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/purchase/report/purchase_bill.py
|
d442019eb8c1ec5604f64349509c1d4a9b4dc348
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
# -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import api, fields, models, tools
from harpiya.tools import formatLang
class PurchaseBillUnion(models.Model):
_name = 'purchase.bill.union'
_auto = False
_description = 'Purchases & Bills Union'
_order = "date desc, name desc"
name = fields.Char(string='Reference', readonly=True)
reference = fields.Char(string='Source', readonly=True)
partner_id = fields.Many2one('res.partner', string='Vendor', readonly=True)
date = fields.Date(string='Date', readonly=True)
amount = fields.Float(string='Amount', readonly=True)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
vendor_bill_id = fields.Many2one('account.move', string='Vendor Bill', readonly=True)
purchase_order_id = fields.Many2one('purchase.order', string='Purchase Order', readonly=True)
def init(self):
tools.drop_view_if_exists(self.env.cr, 'purchase_bill_union')
self.env.cr.execute("""
CREATE OR REPLACE VIEW purchase_bill_union AS (
SELECT
id, name, ref as reference, partner_id, date, amount_untaxed as amount, currency_id, company_id,
id as vendor_bill_id, NULL as purchase_order_id
FROM account_move
WHERE
type='in_invoice' and state = 'posted'
UNION
SELECT
-id, name, partner_ref as reference, partner_id, date_order::date as date, amount_untaxed as amount, currency_id, company_id,
NULL as vendor_bill_id, id as purchase_order_id
FROM purchase_order
WHERE
state in ('purchase', 'done') AND
invoice_status in ('to invoice', 'no')
)""")
def name_get(self):
result = []
for doc in self:
name = doc.name or ''
if doc.reference:
name += ' - ' + doc.reference
amount = doc.amount
if doc.purchase_order_id and doc.purchase_order_id.invoice_status == 'no':
amount = 0.0
name += ': ' + formatLang(self.env, amount, monetary=True, currency_obj=doc.currency_id)
result.append((doc.id, name))
return result
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
5114aa4f0924c3f6eaa3c0e48d017abbbb07dc7f
|
b6277a78b0337667ec4b88e0da3cb84a76383687
|
/tests/test_inertia.py
|
ee4d43936999c7eb71ba51b38a6a87c207d455ae
|
[
"MIT"
] |
permissive
|
sgalkina/trimesh
|
11c91e9c9a7c013fb81551dcee0fdbfffa1f5d13
|
55e35216efcf620c816d82d9f9167e22966a851d
|
refs/heads/master
| 2021-01-25T07:00:55.935106
| 2017-06-06T03:30:28
| 2017-06-06T03:30:28
| 93,636,761
| 0
| 0
| null | 2017-06-07T13:11:58
| 2017-06-07T13:11:57
| null |
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
import generic as g
class InertiaTest(g.unittest.TestCase):
def test_inertia(self):
t0 = g.np.array([[-0.419575686853, -0.898655215203, -0.127965023308, 0. ],
[ 0.712589964872, -0.413418145015, 0.566834172697, 0. ],
[-0.562291548012, 0.146643245877, 0.813832890385, 0.],
[ 0. , 0. , 0. , 1. ]])
t1 = g.np.array([[ 0.343159553585, 0.624765521319, -0.701362648103, 0.],
[ 0.509982849005, -0.750986657709, -0.419447891476, 0. ],
[-0.788770571525, -0.213745370274, -0.57632794673 , 0. ],
[ 0. , 0. , 0. , 1. ]])
# make sure our transformations are actually still transformations
assert g.np.abs(g.np.dot(t0, t0.T) - g.np.eye(4)).max() < 1e-10
assert g.np.abs(g.np.dot(t1, t1.T) - g.np.eye(4)).max() < 1e-10
c = g.trimesh.primitives.Cylinder(height=10,
radius=1,
sections=720, # number of slices
transform=t0)
c0m = c.moment_inertia.copy()
c0 = g.trimesh.inertia.cylinder_inertia(c.volume,
c.primitive.radius,
c.primitive.height,
c.primitive.transform)
ct = g.np.abs((c0m / c0) - 1)
# we are comparing an inertia tensor from a mesh of a cylinder
# to an inertia tensor from an actual cylinder, so allow for some
# discretization uncertainty
assert ct.max() < 1e-3
# check our principal axis calculation against this cylinder
# the direction (long axis) of the cylinder should correspond to
# the smallest principal component of inertia, AKA rotation along
# the axis, rather than the other two which are perpendicular
components, vectors = g.trimesh.inertia.principal_axis(c.moment_inertia)
axis_test = g.np.abs((vectors[components.argmin()] / c.direction) - 1)
assert axis_test.max() < 1e-8
# make sure Trimesh attribute is plumbed correctly
assert g.np.allclose(c.principal_inertia_components, components)
assert g.np.allclose(c.principal_inertia_vectors, vectors)
# the other two axis of the cylinder should be identical
assert g.np.abs(g.np.diff(g.np.sort(components)[-2:])).max() < 1e-8
m = g.get_mesh('featuretype.STL')
i0 = m.moment_inertia.copy()
# rotate the moment of inertia
i1 = g.trimesh.inertia.transform_inertia(transform=t0, inertia_tensor=i0)
# rotate the mesh
m.apply_transform(t0)
# check to see if the rotated mesh + recomputed moment of inertia
# is close to the rotated moment of inertia
tf_test = g.np.abs((m.moment_inertia / i1) - 1)
assert tf_test.max() < 1e-6
# do it again with another transform
i2 = g.trimesh.inertia.transform_inertia(transform=t1, inertia_tensor=i1)
m.apply_transform(t1)
tf_test = g.np.abs((m.moment_inertia / i2) - 1)
assert tf_test.max() < 1e-6
def test_primitives(self):
primitives = [g.trimesh.primitives.Cylinder(height=5),
g.trimesh.primitives.Box(),
g.trimesh.primitives.Sphere(radius=1.23)]
for p in primitives:
for i in range(100):
# check to make sure the analytic inertia tensors are relatively
# close to the meshed inertia tensor (order of magnitude and sign)
comparison = g.np.abs(p.moment_inertia - p.to_mesh().moment_inertia)
c_max = comparison.max() / g.np.abs(p.moment_inertia).max()
assert c_max < .1
if hasattr(p.primitive, 'transform'):
matrix = g.trimesh.transformations.random_rotation_matrix()
p.primitive.transform = matrix
elif hasattr(p.primitive, 'center'):
p.primitive.center = g.np.random.random(3)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
[
"mik3dh@gmail.com"
] |
mik3dh@gmail.com
|
aaa6d9dc213f7a6387f24784b2d7e5faf88bdaca
|
2f37d3dcb20c9ba171024b1f8711b9926dbef0f3
|
/eventex/subscriptions/mixins.py
|
af8a07846e310bed196a2252db4603a7d4f73b42
|
[] |
no_license
|
sergiopassos/eventex-sergiopassos
|
9c080a365e4e554a5839aa461ce47e3d40d9fc53
|
6672223faaa7930377532141394dea3ae7c2c431
|
refs/heads/master
| 2023-04-27T02:40:15.094019
| 2020-01-16T13:51:11
| 2020-01-16T13:51:11
| 192,630,937
| 0
| 0
| null | 2023-04-21T20:32:42
| 2019-06-19T00:37:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
from django.conf import settings
from django.core import mail
from django.template.loader import render_to_string
from django.views.generic import CreateView
class EmailCreateMixin:
email_to = None
email_context_name = None
email_template_name = None
email_from = settings.DEFAULT_FROM_EMAIL
email_subject = ''
def send_mail(self):
# Send subscription email
subject = self.email_subject
from_ = self.email_from
to = self.get_email_to()
template_name = self.get_email_template_name()
context = self.get_email_context_data()
body = render_to_string(template_name, context)
return mail.send_mail(subject, body, from_, [from_, to])
def get_email_template_name(self):
if self.email_template_name:
return self.email_template_name
meta = self.object._meta
return '{}/{}_email.txt'.format(meta.app_label, meta.model_name)
def get_email_context_data(self, **kwargs):
context = dict(kwargs)
context.setdefault(self.get_email_context_name(), self.object)
return context
def get_email_context_name(self):
if self.email_context_name:
return self.email_context_name
return self.object._meta.model_name
def get_email_to(self):
if self.email_to:
return self.email_to
return self.object.email
class EmailCreateView(EmailCreateMixin, CreateView):
def form_valid(self, form):
response = super().form_valid(form)
self.send_mail()
return response
|
[
"sergio.passos02@gmail.com"
] |
sergio.passos02@gmail.com
|
20aa4222af3a4ebe0cc386a6ed3d8a36989e1b88
|
8a9b10eeef43e648fcc82d5fdbf6505e0e19a88b
|
/Tensor_Flow/stock_similarity_daily.py
|
865cb66b7091f69f8580c800d2c23bd22bff0acb
|
[] |
no_license
|
lwzswufe/neural_net
|
3648e100ad68fd2dbd6e3f51be7b053780f7fd87
|
0a0ed94680c0e5dd3dbd2e13aef79a1b8fd8293d
|
refs/heads/master
| 2021-03-30T17:06:18.296242
| 2019-10-16T08:22:47
| 2019-10-16T08:22:47
| 76,216,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
# author='lwz'
# coding:utf-8
# !/usr/bin/env python3
import os
from Tensor_Flow import AutoEncoder2, similarity_analysis
if __name__ == '__main__':
AutoEncoder2.daily()
similarity_analysis.daily()
|
[
"lwzswufe@foxmail.com"
] |
lwzswufe@foxmail.com
|
c73df56929f4b7102748c83d681b3d6ba5a8af13
|
0d86bb399a13152cd05e3ba5684e4cb22daeb247
|
/python-basics/unit11-error/py116_raise.py
|
f69da74fed89c80b5b2226b0517bf4504d52200c
|
[] |
no_license
|
tazbingor/learning-python2.7
|
abf73f59165e09fb19b5dc270b77324ea00b047e
|
f08c3bce60799df4f573169fcdb1a908dcb8810f
|
refs/heads/master
| 2021-09-06T05:03:59.206563
| 2018-02-02T15:22:45
| 2018-02-02T15:22:45
| 108,609,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/1/2 下午8:00
# @Author : Aries
# @Site :
# @File : py116_raise.py
# @Software: PyCharm
'''
raise语句 主动抛出异常
'''
def thorw_error():
raise Exception("抛出一个异常")
if __name__ == '__main__':
thorw_error()
'''
Traceback (most recent call last):
File "project/PycharmProjects/rising-python-classics/python-basics/unit11-error/py116_raise.py", line 18, in <module>
thorw_error()
File "project/PycharmProjects/rising-python-classics/python-basics/unit11-error/py116_raise.py", line 14, in thorw_error
raise Exception("抛出一个异常")
Exception: 抛出一个异常
'''
|
[
"852353298@qq.com"
] |
852353298@qq.com
|
ed61bed728ef72c66243a1d999603df111527ea6
|
ab650e6a0ca2f97096235ebe914b140b740aec66
|
/app/templates/_root_init.py
|
88f63dc99cf2ee4d75a6a147f53f0b50423d33bb
|
[
"MIT"
] |
permissive
|
ghostRider1124/generator-python-package
|
8fd353d6c4ed2c6f1ad88ebb6fe6811a8d585026
|
678b33fec9937c68aaa45ae04e6a8aac5f6554c5
|
refs/heads/master
| 2020-05-20T18:16:32.266189
| 2014-07-02T17:05:26
| 2014-07-02T17:05:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of <%= package.name %>.
# <%= package.url %>
# Licensed under the <%= package.license %> license:
# http://www.opensource.org/licenses/<%= package.license%>-license
# Copyright (c) <%= package.created.year %> <%= package.author.name %> <%= package.author.email %>
from <%= package.pythonName %>.version import __version__
|
[
"heynemann@gmail.com"
] |
heynemann@gmail.com
|
06c6fc43bcedc984addf32ef365c64484890ea3c
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/ec_13175-1949/sdB_ec_13175-1949_coadd.py
|
ffac546028839ada5df1dbebec03b93979d41d2d
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[200.073167,-20.088431], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ec_13175-1949/sdB_ec_13175-1949_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ec_13175-1949/sdB_ec_13175-1949_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
f606fc6928efe6b05a4ee59ddda61acd48e114dd
|
930bc970069d8cbcfb36725a90492eff50638ecc
|
/code/dk-iris-pipeline/airflow_home/src/model/benchmark.py
|
1bbbbefb5abcaffd9064680c379d44d929b5a14b
|
[
"MIT"
] |
permissive
|
databill86/airflow4ds
|
4770d856569c4db4b55b2d9dfda010e21c4cd790
|
b5ae213f7169c54d31f4eca58d235ec6b09fd56f
|
refs/heads/master
| 2021-09-25T17:26:43.340747
| 2018-10-24T16:09:49
| 2018-10-24T16:09:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
import os
import sys
# Allow Python to discover local modules
sys.path.append(os.getenv(key='AIRFLOW_HOME'))
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
from src import PROJECT_DIRECTORY
from src.scrub import get_clean_iris
def get_train_test_data():
"""
"""
df = get_clean_iris()
X = df.copy().drop(['iris_type'], axis=1)
y = df.copy().loc[:, 'iris_type'].replace({'setosa': 0, 'versicolor': 1, 'virginica': 2})
return train_test_split(X, y, test_size=0.30, random_state=112358)
def run_model_benchmark():
"""
"""
X_tr, X_te, y_tr, y_te = get_train_test_data()
lr_0 = LogisticRegression()
lr_0.fit(X_tr, y_tr)
y_pr = lr_0.predict(X_te)
print(f"Benchmark Model Accuracy: {accuracy_score(y_te, y_pr)}")
|
[
"dushyant.khosla@pmi.com"
] |
dushyant.khosla@pmi.com
|
3ce8f8c6eacd5408747793dcc122d0488f4ae734
|
baa484828e8683d51d58d48989532e3d3ce987bc
|
/200228_4.py
|
2efd1e862a827523b45963bb36d5e9ac5f94ac82
|
[] |
no_license
|
sungguenja/study_gui
|
0fc1e17c98a9afc0a6e66a39aeefcd89c3e60f5e
|
b058ca900061f2bd743f8532056ecedcc6b7ce0a
|
refs/heads/master
| 2021-01-16T16:32:28.027456
| 2020-03-17T16:54:21
| 2020-03-17T16:54:21
| 243,184,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from tkinter import *
def font_control(ev):
label.config(font='HY헤드라인M {0} bold'.format(v.get()))
if v.get()==40:
label['text']='wa sans~'
label['font']='굴림체 40 bold'
else:
label['text']='안녕 파이썬~'
win=Tk()
v=IntVar()
win.geometry('300x150')
label = Label(win, text='안녕 파이썬~')
label.pack(fill='y',expand=1)
sc = Scale(win, from_=10,to=40, orient=HORIZONTAL, variable=v, command=font_control)
sc.pack(fill='x',expand=1)
qbtn = Button(win,text='끝내기',command=win.quit, font='굴림 10 bold')
qbtn.pack()
win.mainloop()
|
[
"59605197+sungguenja@users.noreply.github.com"
] |
59605197+sungguenja@users.noreply.github.com
|
d232fe89ca699bb27814c5684ea1ae3d2a1807b6
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-as/huaweicloudsdkas/v1/model/batch_remove_scaling_instances_request.py
|
bebca60ab39111f01f7c0e8379c57dbbff64a882
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchRemoveScalingInstancesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group_id': 'str',
'body': 'BatchRemoveInstancesOption'
}
attribute_map = {
'scaling_group_id': 'scaling_group_id',
'body': 'body'
}
def __init__(self, scaling_group_id=None, body=None):
"""BatchRemoveScalingInstancesRequest
The model defined in huaweicloud sdk
:param scaling_group_id: 实例ID。
:type scaling_group_id: str
:param body: Body of the BatchRemoveScalingInstancesRequest
:type body: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
self._scaling_group_id = None
self._body = None
self.discriminator = None
self.scaling_group_id = scaling_group_id
if body is not None:
self.body = body
@property
def scaling_group_id(self):
"""Gets the scaling_group_id of this BatchRemoveScalingInstancesRequest.
实例ID。
:return: The scaling_group_id of this BatchRemoveScalingInstancesRequest.
:rtype: str
"""
return self._scaling_group_id
@scaling_group_id.setter
def scaling_group_id(self, scaling_group_id):
"""Sets the scaling_group_id of this BatchRemoveScalingInstancesRequest.
实例ID。
:param scaling_group_id: The scaling_group_id of this BatchRemoveScalingInstancesRequest.
:type scaling_group_id: str
"""
self._scaling_group_id = scaling_group_id
@property
def body(self):
"""Gets the body of this BatchRemoveScalingInstancesRequest.
:return: The body of this BatchRemoveScalingInstancesRequest.
:rtype: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchRemoveScalingInstancesRequest.
:param body: The body of this BatchRemoveScalingInstancesRequest.
:type body: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchRemoveScalingInstancesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
14c7592a361aec866f6bbc9d904a1567aab1a1ec
|
ecd4b06d5d5368b71fd72a1c2191510a03b728fd
|
/3 - pandas foundation/austin case study/4.py
|
40ede74aea409c735ebd2a40bd4f6e624d3f96aa
|
[
"MIT"
] |
permissive
|
Baidaly/datacamp-samples
|
86055db5e326b59bfdce732729c80d76bf44629e
|
37b4f78a967a429e0abca4a568da0eb9d58e4dff
|
refs/heads/master
| 2022-07-27T01:18:00.700386
| 2022-07-18T19:27:23
| 2022-07-18T19:27:23
| 123,827,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Convert the date column to string: df_dropped['date']
df_dropped['date'] = df_dropped['date'].astype(str)
# Pad leading zeros to the Time column: df_dropped['Time']
df_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x))
# Concatenate the new date and Time columns: date_string
date_string = df_dropped['date'] + df_dropped['Time']
# Convert the date_string Series to datetime: date_times
date_times = pd.to_datetime(date_string, format='%Y%m%d%H%M')
# Set the index to be the new date_times container: df_clean
df_clean = df_dropped.set_index(date_times)
# Print the output of df_clean.head()
print(df_clean.head())
|
[
"daulet.urazalinov@uptake.com"
] |
daulet.urazalinov@uptake.com
|
1ed5f1cca9b1d29e15103ea5e148e811b1f53733
|
be526f8602651479f5b24eab9c91a3817e9bff0e
|
/word2vec_tensorboard.py
|
bc0a82bc12957315e14cf53c710a8cda0042d17f
|
[] |
no_license
|
bage79/word2vec4kor
|
76a870c57a1a854ff3e3a00c955424b394723259
|
3dc8a856e22f79e8da27f74b3d55de474a599e8c
|
refs/heads/master
| 2021-04-27T11:33:00.869446
| 2018-05-08T14:12:45
| 2018-05-08T14:12:45
| 122,564,641
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
import argparse
import os
import pickle
import traceback
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def turn_off_tensorflow_logging():
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # ignore tensorflow warnings
tf.logging.set_verbosity(tf.logging.ERROR) # ignore tensorflow info (GPU 할당 정보 확인)
def word2vec_tensorboard(name, data_dir, tensorboard_dir, top_n=10000):
turn_off_tensorflow_logging()
try:
if not os.path.exists(tensorboard_dir):
os.mkdir(tensorboard_dir)
for filename in os.listdir(tensorboard_dir):
os.remove(os.path.join(tensorboard_dir, filename)) # remove old tensorboard files
config = projector.ProjectorConfig()
name = name.replace('+', '')
idx2word = pickle.load(open(os.path.join(data_dir, 'idx2word.dat'), 'rb'))
# word2idx = pickle.load(open('data/word2idx.dat', 'rb'))
idx2vec = pickle.load(open(os.path.join(data_dir, 'idx2vec.dat'), 'rb'))
wc = pickle.load(open(os.path.join(data_dir, 'wc.dat'), 'rb'))
total = sum(wc.values())
# print('idx2word:', idx2word[:10])
# print('idx2vec:', idx2vec[1])
# print('wc:', list(wc.items())[:10])
print('total count:', total)
idx2vec, idx2word = idx2vec[:top_n], idx2word[:top_n]
embedding_var = tf.Variable(idx2vec, name=name)
# print(data)
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = os.path.join(tensorboard_dir, f'{name}.tsv')
print('')
print(f'embedding_var.name: {embedding_var.name} shape: {embedding_var.shape}')
print(f'embedding.metadata_path: {embedding.metadata_path}')
with open(embedding.metadata_path, 'wt') as out_f:
out_f.write('spell\tfreq\n')
for spell in idx2word:
out_f.write(f'{spell}\t{wc.get(spell, 0)/total}\n')
summary_writer = tf.summary.FileWriter(tensorboard_dir)
projector.visualize_embeddings(summary_writer, config)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_list=[embedding_var])
checkpoint_file = os.path.join(tensorboard_dir, f'{name}.ckpt')
saver.save(sess, checkpoint_file, global_step=None)
print(f'checkpoint_file: {checkpoint_file}')
# absolute path -> relative path
for filename in ['checkpoint', 'projector_config.pbtxt']:
filepath = os.path.join(tensorboard_dir, filename)
lines = []
with open(filepath, 'rt') as f:
for line in f.readlines():
lines.append(line.replace(tensorboard_dir, '.'))
os.remove(filepath)
with open(filepath, 'wt') as f:
for line in lines:
f.write(line)
except:
traceback.print_exc()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='sample.ko.wikipedia', type=str, help="embedding name in tensorboard projector")
parser.add_argument('--data_dir', default=os.path.join(os.getenv('HOME'), 'workspace/word2vec4kor/data'), type=str, help="data directory path")
parser.add_argument('--tensorboard_dir', default=os.path.join(os.getenv('HOME'), 'tensorboard_log/'), type=str, help="tensorboard directory path")
parser.add_argument('--top_n', default=10000, type=int, help='max number of vocaburary')
args = parser.parse_args()
word2vec_tensorboard(name=args.name, data_dir=args.data_dir, tensorboard_dir=args.tensorboard_dir, top_n=args.top_n)
|
[
"bage79@gmail.com"
] |
bage79@gmail.com
|
782beba45b77ab5dd35b7a388087e5f618f0f266
|
62f59fe1e0246b33c84412ee2a60e77938a05a15
|
/test/zombie_task_test.py
|
97d79ca67f9031bb031c6eccabdd7eb9bba5e7e6
|
[] |
no_license
|
20113261/platform_service
|
02676d2654f5c7bde2c7eafdadbf55fe7253a7b0
|
bc903168bd7cbc499892f24c2b1cc82c38180c01
|
refs/heads/dev
| 2022-08-01T02:30:05.004852
| 2018-04-29T05:39:37
| 2018-04-29T05:39:37
| 131,576,306
| 1
| 0
| null | 2022-07-08T19:13:32
| 2018-04-30T09:14:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/26 下午6:28
# @Author : Hou Rong
# @Site :
# @File : zombie_task_test.py
# @Software: PyCharm
import datetime
import pymongo
client = pymongo.MongoClient(host='10.10.231.105')
collections = client['MongoTask']['Task']
def monitoring_zombies_task():
try:
cursor = collections.find(
{'running': 1, 'utime': {'$lt': datetime.datetime.now() - datetime.timedelta(hours=1)}}, {'_id': 1},
hint=[('running', 1), ('utime', -1)]).limit(
10000)
id_list = [id_dict['_id'] for id_dict in cursor]
print(len(id_list))
result = collections.update({
'_id': {
'$in': id_list
}
}, {
'$set': {
'finished': 0,
'used_times': 0,
'running': 0
}
}, multi=True)
print(result)
except Exception as e:
print(e)
if __name__ == '__main__':
import time
start = time.time()
monitoring_zombies_task()
print(time.time() - start)
|
[
"nmghr9@gmail.com"
] |
nmghr9@gmail.com
|
d72d79bedd1d13883fc2cad393b334ee4aff8287
|
95495baeb47fd40b9a7ecb372b79d3847aa7a139
|
/test/test_metadata.py
|
4a4189f3e130f421154ba36b2d51d25b314c6960
|
[] |
no_license
|
pt1988/fmc-api
|
b1d8ff110e12c13aa94d737f3fae9174578b019c
|
075f229585fcf9bd9486600200ff9efea5371912
|
refs/heads/main
| 2023-01-07T09:22:07.685524
| 2020-10-30T03:21:24
| 2020-10-30T03:21:24
| 308,226,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
# coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.metadata import Metadata # noqa: E501
from swagger_client.rest import ApiException
class TestMetadata(unittest.TestCase):
"""Metadata unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMetadata(self):
"""Test Metadata"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.metadata.Metadata() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"pt1988@gmail.com"
] |
pt1988@gmail.com
|
0af742a6da7d3a491d33c665d6821b55a52e9e22
|
451e3188ef94bfd106a0194774c23edd0bba84a2
|
/blog/migrations/0011_auto_20190624_1757.py
|
32bf1b8e4505dacd5f6698621a22e1552d94538a
|
[] |
no_license
|
Dolidodzik/Wagtail-Django-Static-Blog
|
fe9c8caf63275c8f444ac7b898e7e0d67fae018a
|
db0769da8c7b1c3fc450fe40181bfcf984079ec1
|
refs/heads/master
| 2020-06-08T02:13:32.401772
| 2019-10-29T17:09:09
| 2019-10-29T17:09:09
| 193,139,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# Generated by Django 2.2.2 on 2019-06-24 17:57
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20190624_1749'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('gallery', wagtail.core.blocks.StreamBlock([('image', wagtail.images.blocks.ImageChooserBlock())], label='image gallery'))]),
),
migrations.AlterField(
model_name='blogpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('gallery', wagtail.core.blocks.StreamBlock([('image', wagtail.images.blocks.ImageChooserBlock())], label='image gallery'))]),
),
]
|
[
"teethtard321@gmail.com"
] |
teethtard321@gmail.com
|
dc8423b42f23e5e62b109260c92631277dd9f347
|
20860030d52b5be62cb797e396a5a6b83f45dc44
|
/configuration-api/src/__init__.py
|
cf8683bda4fd470b0adb1d974c85ba42670d64d7
|
[] |
no_license
|
rcosnita/bravehub
|
189d30c46224dd80d6fbf41c50a33559ec2f44ae
|
960bcfdb3c2e53e81aa75f7a48980e4918cfd4bb
|
refs/heads/master
| 2022-12-21T11:28:16.626690
| 2018-02-17T10:43:09
| 2018-02-17T10:43:09
| 98,259,347
| 0
| 1
| null | 2022-12-19T13:27:11
| 2017-07-25T03:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""Initializes some configuration parameters which are used in the implementation of
configuration-api microservice."""
from src.ioc import ConfigurationApiContainer
API_VERSION = ConfigurationApiContainer.api_meta.version
API_FULL_VERSION = "{0}.{1}.{2}".format(API_VERSION.major, API_VERSION.minor, API_VERSION.patch)
API_MAJOR_VERSION = "{0}.{1}".format(API_VERSION.major, API_VERSION.minor)
|
[
"radu.cosnita@gmail.com"
] |
radu.cosnita@gmail.com
|
49f713c4ee1d37d24c760dd5a7d2afcca4e2a2f2
|
6359831db732f929409adbb8270092c7e9cca8d5
|
/Q046_Vertical_traversal_of_binary_trees.py
|
a8f43145ca2853b7a89f408b9807b11bd25a2276
|
[] |
no_license
|
latika18/interviewbit
|
11237219d982c98a22f0098be8248ef7a5b9246f
|
a065b19dc368136101dafbbbdab9b664fed0bf35
|
refs/heads/master
| 2020-03-15T15:20:30.002201
| 2018-08-22T07:39:21
| 2018-08-22T07:39:21
| 132,209,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
Given a binary tree, print a vertical order traversal of it.
Example :
Given binary tree:
6
/ \
3 7
/ \ \
2 5 9
returns
[
[2],
[3],
[6 5],
[7],
[9]
]
Note : If 2 Tree Nodes shares the same vertical level then the one with lesser depth will come first.
Code:
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return a list of list of integers
def verticalordertraversal(self,root):
visited = []
hashmap = {} ## hashmap to map hd to elements
hd = 0 ## horizontal distance
level = 0
if root:
visited.append(root)
hashmap[hd] = root
current = root
while current:
if current.left:
hashmap[hd-1] = current.left.value
visited.append(current.left)
if current.right:
hashmap[hd+1] = current.right.value
visited.append(current.right)
visited.pop(0)
if not visited:
break
hd = hd+1
current = visited[0]
return hashmap
|
[
"noreply@github.com"
] |
latika18.noreply@github.com
|
4d062caff31b114960dd1f54eb12dceb00549788
|
df5ed643835e0759b326b9c3ad2f96a945b1519f
|
/Xcode.app/Contents/Developer/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/turtledemo/penrose.py
|
c913c6312d53a0f6dfd5900a4ee1c1972cf95f4c
|
[
"MIT"
] |
permissive
|
keith/Xcode.app-strings
|
8182a6b5272e5763111ddf376818aca277f113d3
|
c3c93e5b349425159172bb62e9929b701de26a87
|
refs/heads/main
| 2023-08-30T22:31:13.291293
| 2023-08-29T18:19:23
| 2023-08-29T18:19:23
| 75,589,712
| 91
| 28
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
#!/usr/bin/env python3
""" xturtle-example-suite:
xtx_kites_and_darts.py
Constructs two aperiodic penrose-tilings,
consisting of kites and darts, by the method
of inflation in six steps.
Starting points are the patterns "sun"
consisting of five kites and "star"
consisting of five darts.
For more information see:
http://en.wikipedia.org/wiki/Penrose_tiling
-------------------------------------------
from turtle import *
from math import cos, pi
from time import perf_counter as clock, sleep
f = (5**0.5-1)/2.0 # (sqrt(5)-1)/2 -- golden ratio
d = 2 * cos(3*pi/10)
def kite(l):
fl = f * l
lt(36)
fd(l)
rt(108)
fd(fl)
rt(36)
fd(fl)
rt(108)
fd(l)
rt(144)
def dart(l):
fl = f * l
lt(36)
fd(l)
rt(144)
fd(fl)
lt(36)
fd(fl)
rt(144)
fd(l)
rt(144)
def inflatekite(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = True
return
fl = f * l
lt(36)
inflatedart(fl, n-1)
fd(l)
rt(144)
inflatekite(fl, n-1)
lt(18)
fd(l*d)
rt(162)
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(36)
def inflatedart(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = False
return
fl = f * l
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(54)
fd(l*d)
rt(126)
inflatedart(fl, n-1)
fd(l)
rt(144)
def draw(l, n, th=2):
clear()
l = l * f**n
shapesize(l/100.0, l/100.0, th)
for k in tiledict:
h, x, y = k
setpos(x, y)
setheading(h)
if tiledict[k]:
shape("kite")
color("black", (0, 0.75, 0))
else:
shape("dart")
color("black", (0.75, 0, 0))
stamp()
def sun(l, n):
for i in range(5):
inflatekite(l, n)
lt(72)
def star(l,n):
for i in range(5):
inflatedart(l, n)
lt(72)
def makeshapes():
tracer(0)
begin_poly()
kite(100)
end_poly()
register_shape("kite", get_poly())
begin_poly()
dart(100)
end_poly()
register_shape("dart", get_poly())
tracer(1)
def start():
reset()
ht()
pu()
makeshapes()
resizemode("user")
def test(l=200, n=4, fun=sun, startpos=(0,0), th=2):
global tiledict
goto(startpos)
setheading(0)
tiledict = {}
tracer(0)
fun(l, n)
draw(l, n, th)
tracer(1)
nk = len([x for x in tiledict if tiledict[x]])
nd = len([x for x in tiledict if not tiledict[x]])
print("%d kites and %d darts = %d pieces." % (nk, nd, nk+nd))
def demo(fun=sun):
start()
for i in range(8):
a = clock()
test(300, i, fun)
b = clock()
t = b - a
if t < 2:
sleep(2 - t)
def main():
#title("Penrose-tiling with kites and darts.")
mode("logo")
bgcolor(0.3, 0.3, 0)
demo(sun)
sleep(2)
demo(star)
pencolor("black")
goto(0,-200)
pencolor(0.7,0.7,1)
write("Please wait...",
align="center", font=('Arial Black', 36, 'bold'))
test(600, 8, startpos=(70, 117))
return "Done"
if __name__ == "__main__":
msg = main()
mainloop()
|
[
"keithbsmiley@gmail.com"
] |
keithbsmiley@gmail.com
|
994535b97312d7b201e3c3ece5776069e47d98fd
|
1269530d9534b563879d8e995fecf67196287719
|
/check_size_mlstm.py
|
40a5c8f3dcd4d109b5f841f02994af78f278d6a3
|
[] |
no_license
|
amirunpri2018/Keras-Multiplicative-LSTM
|
a7ff7eea2d9b8ba1ae15efa7759eb5510fe6fabe
|
3c89fed3ac45d84072bc2712a895e479b657e457
|
refs/heads/master
| 2020-04-08T03:37:47.274723
| 2017-11-04T03:04:22
| 2017-11-04T03:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
import multiplicative_lstm
from keras.layers import Input, LSTM
from keras.models import Model
ip = Input(shape=(1, 100))
lstm = LSTM(128)(ip)
mlstm = multiplicative_lstm.MultiplicativeLSTM(128)(ip)
lstm_model = Model(ip, lstm)
mlstm_model = Model(ip, mlstm)
lstm_model.summary()
print('\n' * 3)
mlstm_model.summary()
print('\n' * 3)
params_count_lstm = lstm_model.count_params()
params_count_mlstm = mlstm_model.count_params()
param_ratio = params_count_mlstm / float(params_count_lstm)
if param_ratio != 1.25:
print("Param count (mlstm) / Param count (lstm) = %0.2f, should be close to 1.25" % (param_ratio))
print("Size ratio of mLSTM to LSTM is %0.2f!" % (param_ratio))
|
[
"titu1994@gmail.com"
] |
titu1994@gmail.com
|
f69ee0ebd58b2cd9df04217cbdb83c8b95f62007
|
f6b5799c13fad2382d638a1208f4972ce818174a
|
/site/PROGRAMMERS/src/python/스킬테스트/level01/콜라츠추측.py
|
9da7b4527830f6a02e0a2ca40eba22dd9e88d4e1
|
[] |
no_license
|
JoonBeomLee/Algorithm_Python
|
6bf0cc29ffaf75156bfa44ea531c33b3d2b2a129
|
185fb39d535573c374f1d0d88f728f97086a4246
|
refs/heads/master
| 2023-06-11T10:27:10.228151
| 2021-07-05T14:59:40
| 2021-07-05T14:59:40
| 193,500,999
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
def solution(num):
answer = 0
while True:
if(num == 1): break
if(answer == 500): answer = -1; break
# 짝수
if num % 2 == 0:
num = num / 2
# 홀수
elif num % 2 != 0:
num = 3 * num + 1
answer += 1
return answer
|
[
"jbl9595@naver.com"
] |
jbl9595@naver.com
|
28623c7e7bcdf0aaaff6991949f05be9d2bc310d
|
e2992452a3c52f4cbbc64e1686128ad464b71d16
|
/weixinSource/weixinSource/pipelines.py
|
aeba7ce32ab3fe5a82d68511a812978bede5574e
|
[] |
no_license
|
MaGuiSen/studyScrapy
|
6b84605a15027ffc24501d690666f419ebb379fd
|
03604bafe19e55db12677a4af388c8a9198ca572
|
refs/heads/master
| 2021-01-17T04:30:23.858217
| 2017-08-30T01:50:08
| 2017-08-30T01:50:08
| 95,433,695
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WeixinsourcePipeline(object):
def process_item(self, item, spider):
return item
|
[
"1059876295@qq.com"
] |
1059876295@qq.com
|
0eec3d555e4db9f9548824002788e9b95e60b91e
|
dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c
|
/matrix/spiral_matrix_2.py
|
ed14ac09091d56c65841453b2f11ee3ea40c813a
|
[] |
no_license
|
salujaharkirat/ds-algo
|
ec22eaae81bdb78f2818248508325a536aedbb7b
|
819b5971826d97ec600b92776c5158518c9cbf22
|
refs/heads/master
| 2023-05-02T17:20:49.425484
| 2021-05-23T07:54:29
| 2021-05-23T07:54:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
"""
https://leetcode.com/problems/spiral-matrix-ii/
"""
import numpy as np
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
res = []
for i in range(n):
res.append([0] * n)
left = 0
right = n - 1
top = 0
bottom = n - 1
direction = 0
cnt = 1
while left <= right and top <= bottom:
if direction == 0:
for i in range(left, right+1):
res[top][i] = cnt
cnt += 1
top += 1
direction = 1
elif direction == 1:
for i in range(top, bottom+1):
res[i][right] = cnt
cnt += 1
right -= 1
direction = 2
elif direction == 2:
for i in reversed(range(left, right+1)):
# res.append(cnt)
res[bottom][i] = cnt
cnt += 1
bottom -= 1
direction = 3
elif direction == 3:
for i in reversed(range(top, bottom+1)):
# res.append(cnt)
res[i][left] = cnt
cnt += 1
left += 1
direction = 0
return res
|
[
"saluja.harkirat@gmail.com"
] |
saluja.harkirat@gmail.com
|
d7cdd4c62c61f62e1d8c309bc87f77c4949eadd9
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sbss_1655+591/sdB_sbss_1655+591_coadd.py
|
f030d65a0471c8af9f04c2583c5c8ff5c0bbeec1
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[254.127958,59.079469], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sbss_1655+591/sdB_sbss_1655+591_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sbss_1655+591/sdB_sbss_1655+591_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
81d2acb91a751c6699bd377dc4694798e495f094
|
634514a9c10e32051964b179cc807d089d31124e
|
/S2l/Thesis_Ch3/Exp1_reach3dof/Scripts/plotter_episodes.py
|
c705777868308749f11d14705ce7c42a4bc061d3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
leopauly/Observation-Learning-Simulations
|
34009872a1f453ffc4ae7ddced7447a74ff704c4
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
refs/heads/master
| 2021-08-04T10:55:42.900015
| 2021-07-05T13:41:09
| 2021-07-05T13:41:09
| 129,761,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
#### For plotting from reawrd values stored in files
import numpy as np
import matplotlib.pyplot as plt
import sys
run=sys.argv[1]
y = np.loadtxt('episode_reward_run_'+run+'.txt', unpack=True)
y_new=y[1:len(y)]
x=range(len(y_new))
print(x,y_new)
plt.figure(1)
plt.plot(x,y_new)
plt.title('Reward')
plt.xlabel('episodes')
plt.ylabel('reward per episeodes')
plt.show()
y_new=-np.array(y_new)
plt.figure(2)
plt.plot(x,y_new)
plt.title('Feature distance')
plt.xlabel('episodes')
plt.ylabel('reward per episodes')
plt.show()
|
[
"meetleopauly@yahoo.com"
] |
meetleopauly@yahoo.com
|
32d769360c3b3706f42a2f42c8b12903939383f8
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/markov_chain_20200120225120.py
|
c8637afc95c9347859961865560d6e5a25020a0a
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 794
|
py
|
import sample
from clean_text import clean
class Markov():
def __init__(self, corpus):
self.corpus = corpus
self.states = {}
self.chain()
def chain(self):
last_word = None
for word in self.corpus:
if last_word is not None: # set last word line 14
if last_word not in self.states: # if we haven't seen this word before
self.states[last_word] = Dictogram() # empty histogram as value
self[last_word].add_count(word) # add word to last word histogram
last_word = word # set word as last_word
def __str__(self):
return str(self.states)
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
print(markov(source))
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
61b0a836a83e88645081bc1ab3f28d2beac4fce3
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/confdb/syntax/protocols/lldp/hints.py
|
3b89367275377af955519b66880edf74dbe19045
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 839
|
py
|
# ----------------------------------------------------------------------
# ConfDB hints protocols lldp syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ...defs import DEF
from ...patterns import BOOL, IF_NAME
HINTS_PROTOCOLS_LLDP = DEF(
"lldp",
[
DEF("status", [DEF(BOOL, name="status", required=True, gen="make_global_lldp_status")]),
DEF(
"interface",
[
DEF(
IF_NAME,
[DEF("off", gen="make_lldp_interface_disable")],
multi=True,
name="interface",
)
],
),
],
)
|
[
"dv@nocproject.org"
] |
dv@nocproject.org
|
c3e795fbfe3826d2f5904f7e97ae0c1ae14fa894
|
3644db13925e6d518a9637edafa6247547ca90b4
|
/interprocedural_analyses/taint/test/integration/functions_as_locals.py
|
f4f5aa681cb6601056ece250cd511564f500a956
|
[
"MIT"
] |
permissive
|
luizribeiro/pyre-check
|
348699cecf82a5aa36f5e1301076cb006a2fb9f9
|
42d1fced8cbb94c4c9400d6fddd798e50d331ab9
|
refs/heads/master
| 2023-04-17T17:26:23.262598
| 2020-08-08T04:03:04
| 2020-08-08T04:03:35
| 285,969,507
| 0
| 0
|
MIT
| 2023-04-04T01:56:30
| 2020-08-08T04:01:31
|
OCaml
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
# flake8: noqa
from builtins import __test_sink, __test_source
def foo(arg):
__test_sink(arg)
def foo_as_local():
x = __test_source()
f = foo
foo(x)
f(x)
def local_tito(arg):
f = foo
f(arg)
class C:
def m(self, arg):
__test_sink(arg)
def local_function_with_method_sink(c: C):
f = c.m
x = __test_source()
c.m(x)
f(x)
def method_tito(c: C, arg):
f = c.m
f(arg)
def barA(arg1: str, arg2: str):
__test_sink(arg1)
def barB(arg1: str, arg2: int):
__test_sink(arg2)
def a_or_b():
if 1 > 2:
f = barA
else:
f = barB
f(__test_source(), 0)
f(0, __test_source())
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c6b23600f363b1173b40bde086cf7afccd9b839d
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/dQZmkrPaKdtSat5f9_6.py
|
326adc8b9d5d878706b0607ed434207c59a78551
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
def single_occurrence(txt):
txt = txt.upper()
Answer = ""
Counter = 0
Length = len(txt)
while (Counter < Length):
Item = txt[Counter]
Events = txt.count(Item)
if (Events == 1):
Answer = Item
return Answer
else:
Counter += 1
return Answer
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
079956603181043e047fcfcd8ae48b9209a73544
|
596e92d0d484b6e7eee6d322e72e52748fdeaa5d
|
/sportsdata/mlb_projections/models/mlb_projections_dfs_slate_game.py
|
4aadebaeb66acdeb4d93f89a1e1c5748361edf13
|
[] |
no_license
|
scottypate/sportsdata
|
f5f61ddc7eb482883f93737c6ce73dd814ed4336
|
a07955ab50bf4fff1ce114ed9895095ff770c473
|
refs/heads/main
| 2023-08-18T16:51:56.452678
| 2021-10-22T12:44:08
| 2021-10-22T12:44:08
| 420,062,350
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,117
|
py
|
# coding: utf-8
"""
MLB v3 Projections
MLB projections API. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MlbProjectionsDfsSlateGame(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'slate_game_id': 'int',
'slate_id': 'int',
'game_id': 'int',
'game': 'MlbProjectionsGame',
'operator_game_id': 'int',
'removed_by_operator': 'bool'
}
attribute_map = {
'slate_game_id': 'SlateGameID',
'slate_id': 'SlateID',
'game_id': 'GameID',
'game': 'Game',
'operator_game_id': 'OperatorGameID',
'removed_by_operator': 'RemovedByOperator'
}
def __init__(self, slate_game_id=None, slate_id=None, game_id=None, game=None, operator_game_id=None, removed_by_operator=None): # noqa: E501
"""MlbProjectionsDfsSlateGame - a model defined in Swagger""" # noqa: E501
self._slate_game_id = None
self._slate_id = None
self._game_id = None
self._game = None
self._operator_game_id = None
self._removed_by_operator = None
self.discriminator = None
if slate_game_id is not None:
self.slate_game_id = slate_game_id
if slate_id is not None:
self.slate_id = slate_id
if game_id is not None:
self.game_id = game_id
if game is not None:
self.game = game
if operator_game_id is not None:
self.operator_game_id = operator_game_id
if removed_by_operator is not None:
self.removed_by_operator = removed_by_operator
@property
def slate_game_id(self):
"""Gets the slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_game_id
@slate_game_id.setter
def slate_game_id(self, slate_game_id):
"""Sets the slate_game_id of this MlbProjectionsDfsSlateGame.
:param slate_game_id: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_game_id = slate_game_id
@property
def slate_id(self):
"""Gets the slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_id
@slate_id.setter
def slate_id(self, slate_id):
"""Sets the slate_id of this MlbProjectionsDfsSlateGame.
:param slate_id: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_id = slate_id
@property
def game_id(self):
"""Gets the game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._game_id
@game_id.setter
def game_id(self, game_id):
"""Sets the game_id of this MlbProjectionsDfsSlateGame.
:param game_id: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._game_id = game_id
@property
def game(self):
"""Gets the game of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: MlbProjectionsGame
"""
return self._game
@game.setter
def game(self, game):
"""Sets the game of this MlbProjectionsDfsSlateGame.
:param game: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: MlbProjectionsGame
"""
self._game = game
@property
def operator_game_id(self):
"""Gets the operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._operator_game_id
@operator_game_id.setter
def operator_game_id(self, operator_game_id):
"""Sets the operator_game_id of this MlbProjectionsDfsSlateGame.
:param operator_game_id: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._operator_game_id = operator_game_id
@property
def removed_by_operator(self):
"""Gets the removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: bool
"""
return self._removed_by_operator
@removed_by_operator.setter
def removed_by_operator(self, removed_by_operator):
"""Sets the removed_by_operator of this MlbProjectionsDfsSlateGame.
:param removed_by_operator: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: bool
"""
self._removed_by_operator = removed_by_operator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MlbProjectionsDfsSlateGame, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MlbProjectionsDfsSlateGame):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"scotty.pate@auth0.com"
] |
scotty.pate@auth0.com
|
318cd859b70a41e212785c1596ffdf88353bce76
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_7/snxkai001/util.py
|
217a94e3e61b1d0258092af7a9640f7e96345ae2
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
def create_grid (grid):
for u in range(4):
grid.append([])
for down in range(4):
grid[u].append(0)
def print_grid(grid):
print("+" + "-"*20 + "+")
allign= "{0:" "<5}"
for row in range(4):
print("|", end="")
for col in range(4):
if grid[row][col] != 0:
print(allign.format(grid[row][col]), end="")
else:
print(allign.format(" "), end= "")
print("|")
print("+" + "-"*20 + "+")
def check_lost(grid):
for kol in range(4):
for lef in range(4):
if grid[kol][lef]==0:
return False
else:
continue
for n in range(4):
for m in range(3):
if grid[m][n]==grid[m+1][n]:
return False
else:
continue
for i in range(4):
for j in range(3):
if grid[i][j]==grid[i][j+1]:
return False
else:
continue
return True
def check_won(grid):
for i in range(4):
for p in range(4):
if grid[i][p]>=32:
return True
else:
continue
return False
def grid_equal(grid1, grid2):
for i in range(4):
for j in range(4):
if grid1[i][j]==grid2[i][j]:
continue
else:
return False
return True
def copy_grid(grid):
list1=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
for col in range(4):
for row in range(4):
list1[col][row]=grid[col][row]
return list1
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
39e716c97c55b1ae0ce73788baea20aa77976d3b
|
9508879fcf1cff718f3fe80502baff8b82c04427
|
/data_structures_domain/linked_lists/print_in_reverse.py
|
9e70be3bfc61fc9bdc1e648101f1a043b9f0ec55
|
[] |
no_license
|
davidozhang/hackerrank
|
e37b4aace7d63c8be10b0d4d2bffb4d34d401d55
|
bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80
|
refs/heads/master
| 2021-05-04T11:31:59.110118
| 2017-11-15T09:17:27
| 2017-11-15T09:17:27
| 47,906,672
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
"""
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def ReversePrint(head):
if not head:
return
ReversePrint(head.next)
print head.data
'''
Cleaner implementation
October 1, 2016
'''
def ReversePrint(head):
if head is not None:
ReversePrint(head.next)
print head.data
|
[
"davzee@hotmail.com"
] |
davzee@hotmail.com
|
54601c3faba97921513238671d4defe422ee9d46
|
d3eb732ffd738d3a624196f0971e4c29f85f6673
|
/maptool.py
|
57b5b053df938d8e44ecddd90a5bd11d4c5471b6
|
[] |
no_license
|
kailIII/mgrs-tools
|
c44aae9542e9883e9e1a395217b468bea4fb0788
|
3ac612bdf980f2d61f27d417c709115890af415f
|
refs/heads/master
| 2021-01-15T16:57:14.768002
| 2015-04-01T12:15:10
| 2015-04-01T12:15:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
import mgrs
from qgis.core import *
from qgis.gui import *
from qgis.utils import iface
from PyQt4.QtCore import *
class MGRSMapTool(QgsMapTool):
ct = mgrs.MGRS()
epsg4326 = QgsCoordinateReferenceSystem("EPSG:4326")
def __init__(self, canvas):
QgsMapTool.__init__(self, canvas)
self.setCursor(Qt.CrossCursor)
def canvasMoveEvent(self, e):
pt = self.toMapCoordinates(e.pos())
canvas = iface.mapCanvas()
canvasCrs = canvas.mapRenderer().destinationCrs()
transform = QgsCoordinateTransform(canvasCrs, self.epsg4326)
pt4326 = transform.transform(pt.x(), pt.y())
try:
mgrsCoords = self.ct.toMGRS(pt4326.y(), pt4326.x())
iface.mainWindow().statusBar().showMessage("MGRS Coordinate: " + mgrsCoords)
except:
iface.mainWindow().statusBar().showMessage("")
|
[
"volayaf@gmail.com"
] |
volayaf@gmail.com
|
7ceceed258eb306cbc6fee57056ca756971ba8da
|
df1cb33bfe99a1e72cf75931749163b7c8731757
|
/stages/stage3.py
|
012d626c02d661dbc7a2f17848fc0e501c06bcb9
|
[] |
no_license
|
orf/wikilink_py
|
2d6ae9dd64264fdf17995980ed8a4a960c199c5b
|
6643397e220970a93dab1e50e120748bfdc3bf19
|
refs/heads/master
| 2021-01-22T11:55:16.906965
| 2014-01-08T20:49:38
| 2014-01-08T20:49:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,415
|
py
|
from lib.progress import run_with_progressbar
from lib.formatters.Neo4jFormatter import Neo4jFormatter
from lib.formatters.CSVFormatter import MultiCSVFormatter
import functools
import os
import logging
import sys
import itertools
import __pypy__
import json
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
STAGE3_TITLES_TO_ID = {}
STAGE3_ID_TO_DATA = {}
FLAG_REDIRECT = 1
FLAG_SEEN = 2
def handle_stage1_line(line):
# There is one page in stage1.csv who's title is a unicode NEXT_LINE character (\x85).
# As such we have to encode each line individually.
# https://en.wikipedia.org/w/api.php?action=query&prop=info&pageids=28644448&inprop=url
page_id, page_title, is_redirect = unicode(line.strip("\n"), "utf-8").split("|")
flags = FLAG_REDIRECT if is_redirect == "1" else 0
STAGE3_TITLES_TO_ID[page_title] = int(page_id)
STAGE3_ID_TO_DATA[int(page_id)] = (page_title, flags)
#yield (page_title, flags), int(page_id)
def get_ids_from_titles(titles_list, get_none=False):
"""
I take a list of titles and return a list of integer ID's. If get_none is True then
the return list will contain None values where the title cannot be found.
"""
returner = []
for title in titles_list:
x = STAGE3_TITLES_TO_ID.get(title, 0)
if x is not 0 or get_none is True:
returner.append(x) # Keeping all elements uniform might increase performance
return returner
def get_page_data_from_id(page_id, update_seen=True):
"""
I take a page ID and I return a tuple containing the title, is_redirect flag and a value indicating if this
page ID has been queried before.
"""
p_data = STAGE3_ID_TO_DATA.get(page_id, None)
if p_data is None:
return None
if update_seen:
STAGE3_ID_TO_DATA[page_id] = (p_data[0], p_data[1] | FLAG_SEEN)
return p_data
def set_page_redirect(title, to):
"""
I replace a page title with the ID of the page it links to
"""
STAGE3_TITLES_TO_ID[title] = to
def delete_page(title, page_id):
"""
I take a page ID and/or I delete it from our registry
"""
if title:
del STAGE3_TITLES_TO_ID[title]
if page_id:
del STAGE3_ID_TO_DATA[page_id]
def split_page_info(line, update_seen=True, get_none=False, get_links=True):
"""
I take a line outputted from Stage2 and I return (the_id, page_links, page_info)
"""
line = line.rstrip("\n")
split_line = line.split("|")
page_id = int(split_line[0])
page_info = get_page_data_from_id(page_id, update_seen=update_seen)
if page_info is None:
return None, None, None
# Using islice like this keeps memory down by avoiding creating another list, it also doens't need a len() call
# so it might be faster. whatever.
page_links = itertools.islice(split_line, 1, sys.maxint)
return page_id, get_ids_from_titles(page_links, get_none) if get_links else page_links, page_info
def stage3_pre(line):
"""
We need to sort out redirects so they point to the correct pages. We do this by
loading stage2.csv which contains ID|link_title|link_title... and get the ID's of the links
"""
page_id, page_links, page_info = split_page_info(unicode(line, "utf-8"), update_seen=False, get_links=False)
if page_info and page_info[1] & FLAG_REDIRECT: # Are we a redirect?
page_links = get_ids_from_titles(page_links, True)
page_title = page_info[0]
if len(page_links) > 1 and page_links[0]:
# Point the redirect page to the ID of the page it redirects to
set_page_redirect(page_title, page_links[0])
delete_page(None, page_id)
else:
# The page we are redirecting to cannot be found, remove the redirect page.
delete_page(page_title, page_id)
def stage3(line, output_format="neo"):
"""
I combine the results from the previous stages into a single cohesive file
"""
global STAGE3_ROW_COUNTER
page_id, page_links, page_info = split_page_info(unicode(line.strip("\n"), "utf-8"), get_links=False)
if page_info is None: # Ignore redirects for now
return None
page_title, flags = page_info
#print "flags: %s" % flags
if not flags & FLAG_REDIRECT:
page_links = get_ids_from_titles(page_links, False)
if flags & FLAG_SEEN:
# Already visited this page before, output to an SQL file instead
if output_format == "neo":
return None, "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
else:
with open('stage3.sql', 'a') as fd:
fd.write("UPDATE pages SET links = uniq(array_cat(links, ARRAY[%s]::integer[])) WHERE id = %s;\n" %
(",".join(map(str, set(page_links))), page_id))
else:
# CSV output
# id, title, is_redirect, links_array
if output_format == "neo":
#return u"({id:%s, name:%s})" % (page_id, json.dumps(page_title).encode("unicode-escape"))
return ("%s\t%s\n" % (page_id, page_title)).encode("utf-8"),\
"%s\n" % "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
#return ((page_id, page_title),),
else:
return "%s|%s|%s|{%s}\n" % (page_id, page_title, is_redirect,
",".join(map(str, set(page_links))))
if __name__ == "__main__":
logger.info("Loading stage1.csv into memory")
with open("stage1.csv", 'rb', buffering=1024*1024) as csv_fd:
run_with_progressbar(csv_fd, None, handle_stage1_line, os.path.getsize("stage1.csv"))
logger.info("Loaded %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
with open("stage2.csv", "rb", buffering=1024*1024) as input_fd:
run_with_progressbar(input_fd, None, stage3_pre, os.path.getsize("stage2.csv"))
logger.info("Have %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
logger.info("Starting dump")
with open('stage2.csv', "rb", buffering=1024*1024*8) as input_fd: # , encoding="utf-8", buffering=1024*8
with open('stage3.nodes', mode="wb", buffering=1024*1024*8) as nodes_fd:
with open('stage3.links', mode="wb", buffering=1024*1024*20) as links_fd:
formatter = MultiCSVFormatter(((nodes_fd, ("id:int:node_id", "title:string")),
(links_fd, ("id:int:node_id", "id:int:node_id"))))
run_with_progressbar(input_fd, None,
functools.partial(stage3, output_format="neo"),
os.path.getsize("stage2.csv"),
formatter=formatter)
|
[
"tom@tomforb.es"
] |
tom@tomforb.es
|
92df4a82b4256ff8f683501f22e0c09dbea8b0c0
|
b89df6019163d7b18a8ecb4003939f6235b5de85
|
/mnist/cnn_mnist.py
|
0f8dd40e176c805f08e1a65e10cdad7e16b51923
|
[] |
no_license
|
liketheflower/tf_practise
|
fdd22b608ca7d513a4972497466e3fc7a12762b6
|
2725b52169b2f0044d20b3c33c86485336e65483
|
refs/heads/master
| 2020-03-19T23:21:16.467649
| 2018-06-19T03:56:07
| 2018-06-19T03:56:07
| 137,003,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
py
|
#opyright 2016 iThe TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
|
[
"jim.morris.shen@gmail.com"
] |
jim.morris.shen@gmail.com
|
d89b26a0c2aa42dccc501acbb07ac7e597b9047a
|
102b67d83e12219f3bf4bea6ed691ddd9c2e69f1
|
/ad/templatetags/ads.py
|
7e6251780e534773006f27332ae6205e14bdccc8
|
[
"BSD-3-Clause"
] |
permissive
|
nicksergeant/snipt-old
|
2cb6bec629d798dd83fc39f0105828f1fd40a51a
|
f2f1e9f183fb69bcc0fabbc25059bfd1c60527e2
|
refs/heads/master
| 2021-01-18T14:03:01.426851
| 2012-09-19T00:09:48
| 2012-09-19T00:09:48
| 865,573
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
from tagging.models import TaggedItem
from snipt.ad.models import Ad
from django import template
register = template.Library()
@register.simple_tag
def ad(tag):
try:
ads = TaggedItem.objects.get_by_model(Ad.objects.order_by('?'), tag)
ad = ads[0]
except:
ads = Ad.objects.order_by('?')
ad = ads[0]
tag = ''
return """
<h1 style="margin-bottom: 20px; padding-top: 15px;">A good %s read</h1>
<div class="amazon-book clearfix">
<div class="amazon-title">
<a href="%s" rel="nofollow" class="clearfix">
<img src="/media/%s" alt="%s" title="%s" />
%s
</a>
</div>
</div>
""" % (tag,
ad.url,
ad.image,
ad.title,
ad.title,
ad.title)
|
[
"nick@nicksergeant.com"
] |
nick@nicksergeant.com
|
974c0c7fd25b0de5202f8adde919a1f585b0a4ed
|
aa45f6f5106517c582b21691ce22ad808339ec64
|
/borax/calendars/birthday.py
|
aea5997b9e454ee9eaf8a2861a068b38780a781c
|
[
"MIT"
] |
permissive
|
kinegratii/borax
|
86b1a87c686f9b74db8d919afe30761497888368
|
06407958a6ba3115d783ed6457c2e7355a3f237c
|
refs/heads/master
| 2023-03-11T06:09:20.040607
| 2022-11-15T02:39:43
| 2022-11-15T02:39:43
| 126,959,349
| 67
| 8
|
MIT
| 2022-11-15T02:39:44
| 2018-03-27T09:07:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
from datetime import date
from .lunardate import LunarDate, LCalendars
def nominal_age(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year + 1
def actual_age_solar(birthday, today=None):
"""See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
"""
birthday = LCalendars.cast_date(birthday, date)
if today:
today = LCalendars.cast_date(today, date)
else:
today = date.today()
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def actual_age_lunar(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year - (
(today.month, today.leap, today.day) < (birthday.month, birthday.leap, birthday.day)
)
|
[
"kinegratii@gmail.com"
] |
kinegratii@gmail.com
|
a0f1f2557839af7ed23dfb81c8ff5bea64a59bc4
|
e4c25590298b084e3fb44b0b325a05699fac4202
|
/Kattis/sevenwonders.py
|
5a96568a7cc25485bbe157259a725421d500474b
|
[] |
no_license
|
shakib609/competitive-programming
|
520028bd1147e7e43e708875b6390e1a7d65a94b
|
5090d5d3650b8055e16651ed9de5380cc7fdb7aa
|
refs/heads/master
| 2022-12-09T12:33:20.167332
| 2022-12-07T17:28:30
| 2022-12-07T17:28:30
| 67,289,210
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
s = input().strip()
t, c, g = [0, 0, 0]
for ch in s:
if ch == 'T':
t += 1
elif ch == 'C':
c += 1
else:
g += 1
result = t ** 2 + c ** 2 + g ** 2
result += min([t, c, g]) * 7
print(result)
|
[
"shakib609@gmail.com"
] |
shakib609@gmail.com
|
52afe556959590049b64feb71a30c5fce7fedaf1
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/containerregistry/v20190501/get_webhook.py
|
7948e368ab3b2de549dbfecb516f227ee8cca61a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,711
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebhookResult',
'AwaitableGetWebhookResult',
'get_webhook',
'get_webhook_output',
]
@pulumi.output_type
class GetWebhookResult:
"""
An object that represents a webhook for a container registry.
"""
def __init__(__self__, actions=None, id=None, location=None, name=None, provisioning_state=None, scope=None, status=None, tags=None, type=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
"""
The list of actions that trigger the webhook to post notifications.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the webhook at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the webhook at the time the operation was called.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebhookResult(GetWebhookResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebhookResult(
actions=self.actions,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
scope=self.scope,
status=self.status,
tags=self.tags,
type=self.type)
def get_webhook(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
webhook_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebhookResult:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['webhookName'] = webhook_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20190501:getWebhook', __args__, opts=opts, typ=GetWebhookResult).value
return AwaitableGetWebhookResult(
actions=__ret__.actions,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
scope=__ret__.scope,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_webhook)
def get_webhook_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
webhook_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebhookResult]:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
df9dd24400578916c3d14c13ccc9926eddfabb48
|
38eb57300418e6f10433630437388f779ce50e09
|
/cookie_and_session/app02_session/views.py
|
25a4bbc4abf9387fc8de2e70f90c22b5c03e8db7
|
[] |
no_license
|
SelfShadows/Django-Flask
|
f37839f763133f0d62bffad3128171c426a1c038
|
13e32d1c8aac1532b43323e1891c423fe78f2813
|
refs/heads/master
| 2021-01-04T12:31:18.018508
| 2020-02-14T16:29:27
| 2020-02-14T16:29:27
| 240,550,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
from django.shortcuts import render ,redirect
from functools import wraps
from django import views
# Django提供的工具,把函数装饰器转变为方法装饰器
from django.utils.decorators import method_decorator
from app02_session import models
def check_login(func):
@wraps(func) # 装饰器修复技术
def inner(request, *args, **kwargs):
# 获取seesion
ret = request.session.get("is_login")
# 1.获取cookie 中的随机字符串
# 2.根据随机字符串去数据库取 session_data --> 解密 --> 反序列化成字典
# 3.在字典里面 根据 is_login 取出具体数据
if ret == "1":
# 已经登陆过的 继续执行
return func(request, *args, **kwargs)
else:
# 没有登陆过的 跳转到登陆页面
next_url = request.path_info
return redirect("/app02/login/?next={}".format(next_url))
return inner
def login(request):
if request.method == "POST":
user = request.POST.get("user")
pwd = request.POST.get("pwd")
# 从url里面去除next参数
next_url = request.GET.get("next")
# 将所有Session失效日期小于当前日期的数据删除
request.session.clear_expired()
have_user = models.Person.objects.filter(username=user, password=pwd)
if have_user:
# 登录成功
# 告诉浏览器保存一个键值对
if next_url:
ret = redirect(next_url)
else:
ret = redirect("/app02/home/")
# 设置session
request.session["is_login"] = "1"
request.session["user_id"] = have_user[0].id
# 设置超时时间
request.session.set_expiry(5) # 5秒后失效
return ret
return render(request, "app02/login.html")
# 注销登陆函数
def logout(request):
# 只删除session数据
# request.session.delete()
# 删除session数据和cookie值
request.session.flush()
return redirect("/app02/login/")
@check_login
def home(request):
user_id = request.session.get("user_id")
user_obj = models.Person.objects.filter(id=user_id)
if user_obj:
return render(request, "app02/home.html", {"user_obj": user_obj[0]})
else:
return render(request, "app02/home.html", {"user_obj": "匿名用户"})
@check_login
def index(request):
return render(request, "app02/index.html")
class UserInfo(views.View):
# 把函数装饰器转变为方法装饰器
@method_decorator(check_login)
def get(self, request):
return render(request, "app02/userinfo.html")
|
[
"870670791@qq.com"
] |
870670791@qq.com
|
9d2cd1f61430081aa4a65d8e29b28e23f51b088f
|
85f6de6e3ef680cd717312233fd03c636c606550
|
/src/two/rolling_a_dice.py
|
faf4234c08ca6aa9dc9b3cb20192a6fdd631a5dc
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Guillermogsjc/dissecting-reinforcement-learning
|
f8956455ffda22445ecc11fc6938da40ed4948e2
|
8a2751efa6d4a733df81c272c503b8061c70c04f
|
refs/heads/master
| 2021-01-11T20:41:02.216522
| 2017-01-15T11:32:27
| 2017-01-15T11:32:27
| 79,168,192
| 1
| 0
| null | 2017-01-16T23:14:54
| 2017-01-16T23:14:53
| null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import numpy as np
#Trowing a dice for N times and evaluating the expectation
dice = np.random.randint(low=1, high=7, size=3)
print("Expectation (3 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=10)
print("Expectation (10 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100)
print("Expectation (100 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=1000)
print("Expectation (1000 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100000)
print("Expectation (100000 times): " + str(np.mean(dice)))
|
[
"massimiliano.patacchiola@gmail.com"
] |
massimiliano.patacchiola@gmail.com
|
fac85c5c169eaf142355c0655ac86fcd5f74fc09
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/surrounded_20200617223518.py
|
233debe26db46593e2dfe08e99e70eb47ac5cf87
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
def surronded(board):
# dfs
# untouched
# in progress
# finished
rows = len(board)
if rows == 0:
return
cols = len(board[0])
if cols == 0:
return
state = [[0]* cols for _ in range(rows)]
def canReachOutside(x,y,pending):
pending.append(x,y)
canReach = False
directions = [(1,0),(-1,0),(0,1),(0,-1)]
for dx,dy in directions:
nextX,nextY = dx+x,dy+y
if nextX < 0 or nextX >= rows or nextY < 0 or nextY >= cols:
canReach = True
continue
if board[nextX][nextY] == 'O' and state[nextX][nextY] == 0:
state[nextX][nextY] = 1
canReach != canReachOutside(nextX,nextY,pending)
return canReach
for x in range(rows):
for y in range(cols):
if [x][y] == '0' and state[x][y] == 0:
pending = []
if canReachOutside(x,y,pending):
# process states to change from o to x
pass
else:
# regulary process states
pass
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d662e3cccc6393bf07124edfdf202bfc54925ebe
|
7cf29923d278c5b934a40de216ac606c25c8a5eb
|
/wheelcms_axle/translate.py
|
4f0f0c74ba26a7823c2018014ad16b58ddbffc3f
|
[
"BSD-2-Clause"
] |
permissive
|
wheelcms/wheelcms_axle
|
1df024f75d17544a575953359e3cc9a4ab56d93c
|
b5916b555f37b7baafdf08fd56b5b985688df9d0
|
refs/heads/master
| 2020-04-05T22:43:04.176353
| 2015-04-05T10:53:42
| 2015-04-05T10:53:42
| 7,800,085
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
from django.conf import settings
any_lang = ('any', 'Any')
def languages():
languages = tuple(settings.CONTENT_LANGUAGES)
if any_lang not in languages:
languages = languages + (any_lang, )
return languages
def fallback_languages(language):
""" given a language, provide a list of alternatives, prioritized """
langs = [language]
if language != any_lang[0]:
langs.append(any_lang[0])
return langs
def language_slug(slugs, slug, language):
"""
slugs is a mapping of lang->slug,
slug is a default slug,
Try to get the appropriate slug from the mapping first,
else use the provided slug. If neither are present, return
*any* slug from the mapping
(XXX we might try settings.LANGUAGE first)
"""
lslug = slugs.get(language, slug)
if lslug is None and language == any_lang[0]:
## Use fallback? XXX
return slugs.values()[0] # any
if lslug is None:
return slugs.values()[0] # any
## may still be None, let caller fail, for now
return lslug
|
[
"github@in.m3r.nl"
] |
github@in.m3r.nl
|
fb48fd9656915149d8133355706be99ed2db0469
|
a31de016611f3b4efc7a576e7113cad1a738419b
|
/_string_monster2.py
|
ba71783722b858478094721a871a759c7c6dd5c1
|
[] |
no_license
|
Ing-Josef-Klotzner/python
|
9d4044d632672fff966b28ab80e1ef77763c78f5
|
3913729d7d6e1b7ac72b46db7b06ca0c58c8a608
|
refs/heads/master
| 2022-12-09T01:40:52.275592
| 2022-12-01T22:46:43
| 2022-12-01T22:46:43
| 189,040,355
| 0
| 0
| null | 2022-12-01T19:52:37
| 2019-05-28T14:05:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
#!/usr/bin/python3
from sys import stdin
def match (ssof, ss):
if ss == "": return True
#print (ssof, ss, end = " ")
for st in ssof:
if ss.startswith (st):
return match (ssof - {st}, ss [len (st):])
return False
# this works with testcases, because strings are included
# in order in sleepy string (hackerearth testcases)
# fails for sample test case where sleepy string chars are scrumbled
def main ():
read = stdin.readline
t = int (read ())
for t_ in range (t):
n = int (read ())
sof = [] # list of strings on floor
lns = [] # list of the string lengths
for n_ in range (n):
s = read ().rstrip ()
sof.append (s)
lns.append (len (s))
ss = read ().rstrip () # sleepy string
lnss = len (ss)
mnl = min (lns)
mxl = max (lns)
justone = 0
allother_max = 0
for n_ in range (n):
if lns [n_] == mnl: justone += 1
elif lns [n_] == mxl: allother_max += 1
if lnss < mnl or lnss > mnl and lnss < 2 * mnl or mnl == mxl and lnss % mnl or justone == 1 and allother_max == n - 1 and lnss % mxl not in {0, mnl}:
print ("NO")
continue
ssof = set (sof)
print ("YES" if match (ssof, ss) else "NO")
if __name__ == "__main__": main ()
|
[
"noreply@github.com"
] |
Ing-Josef-Klotzner.noreply@github.com
|
1f3f8ad62b3bff4ac0821b0fc51593df8ce0d5ce
|
c61c9bedba1968bfaf571ac3996b696fc35890a6
|
/Chapter12/has_match.py
|
00b6ca1068d542e225e1be731b69d6152b593ec3
|
[] |
no_license
|
ArunRamachandran/ThinkPython-Solutions
|
497b3dbdeba1c64924fe1d9aa24204a9ca552c5b
|
1a0872efd169e5d39b25134960168e3f09ffdc99
|
refs/heads/master
| 2020-04-01T10:23:20.255132
| 2014-11-07T17:04:52
| 2014-11-07T17:04:52
| 25,806,318
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# has_match takes two sequences, t1 and t2, and returns True, if there is
# an index i such that t1[i] == t2[i]
def has_match(t1,t2):
for x,y in zip(t1,t2):
if x == y:
return True
else:
return False
t1 = "banana"
t2 = "sequence"
print "Given sequences are : "
print t1
print t2
case = has_match(t1,t2)
if case == True:
print "Yeah..!! Two sequences have a matching index "
if case == False:
print "Nope... It doesn't have a matching index !! "
|
[
"arunkramachandran92@gmail.com"
] |
arunkramachandran92@gmail.com
|
8f18a7a3cb0b452be92e2c21ca740144639a7e69
|
7e4a1838dbcbe0526f20b4b49f88a3f213dbc712
|
/npcaller/fasta.py
|
7d1d78befe1990ff329540e7f2e2e5f87acb256e
|
[
"MIT"
] |
permissive
|
grst/nanopore_pkg
|
c5c8ee940ddd9218c08846ba5e5884c697914ca6
|
e13ccfae0be79f23ae3270b09744726504b0e58f
|
refs/heads/master
| 2023-04-02T14:38:52.410352
| 2020-11-06T19:34:37
| 2020-11-06T19:34:37
| 48,172,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
"""
Since skbio and Biopython are overkill and slightly to complicated most of the time
I came up with this really simple fasta-io class.
"""
from itertools import groupby
class FastaReader(object):
def __init__(self, file):
if not hasattr(file, 'read'):
self.file = open(file, 'r')
else:
self.file = file
def get_entries(self):
"""
Get the next Entry from the fasta file.
Returns: Generator, which yields (header, sequence) tuples
"""
for isheader, group in groupby(self.file, lambda line: line[0] == ">"):
if isheader:
header = next(group)[1:]
else:
seq = "".join(line.strip() for line in group)
yield header, seq
def close(self):
self.file.close()
class FastaWriter(object):
"""
Very simple fasta file format writer.
"""
SPLIT = 80
def __init__(self, file):
if not hasattr(file, 'write'):
self.file = open(file, 'w')
else:
self.file = file
def write_entry(self, header, sequence):
"""
Write Entry to File
Args:
header: >sequence_header
sequence: ACTGATT...
"""
sequence = [sequence[i:i+self.SPLIT] for i in range(0, len(sequence), self.SPLIT)]
self.file.write(">{0}\n".format(header))
for s in sequence:
self.file.write(s + "\n")
def flush(self):
self.file.flush()
def close(self):
self.file.close()
|
[
"mail@gregor-sturm.de"
] |
mail@gregor-sturm.de
|
fc77466e30f68146a40c8d3ba3b858f15859ddb5
|
19ddab74600f71700a6b693281d0180d5271f295
|
/程序员面试金典/03_03_堆盘子.py
|
2f96f3b2e8fb699bf5461a949729ba6f932d252c
|
[] |
no_license
|
zhulf0804/Coding.Python
|
4d55a430da1a8077c81feba65c13ac654aaf094a
|
46ab03e23d15ebd5434ef4dd5ae99130000b00a5
|
refs/heads/master
| 2022-09-14T18:40:59.880941
| 2022-08-20T08:25:51
| 2022-08-20T08:25:51
| 213,113,482
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
class StackOfPlates:
def __init__(self, cap: int):
self.stack = []
self.cap = cap
def push(self, val: int) -> None:
if self.cap == 0:
return
if len(self.stack) == 0 or len(self.stack[-1]) == self.cap:
self.stack.append([])
self.stack[-1].append(val)
def pop(self) -> int:
if self.cap == 0 or len(self.stack) == 0:
return -1
val = self.stack[-1].pop()
if len(self.stack[-1]) == 0:
self.stack = self.stack[:-1]
return val
def popAt(self, index: int) -> int:
if self.cap == 0 or index >= len(self.stack):
return -1
val = self.stack[index].pop()
if len(self.stack[index]) == 0:
self.stack = self.stack[:index] + self.stack[index+1:]
return val
# Your StackOfPlates object will be instantiated and called as such:
# obj = StackOfPlates(cap)
# obj.push(val)
# param_2 = obj.pop()
# param_3 = obj.popAt(index)
|
[
"zhulf0804@gmail.com"
] |
zhulf0804@gmail.com
|
c4fd6afe113c170e2b3985c756cac05390668ae8
|
e04dbc32247accf073e3089ed4013427ad182c7c
|
/hhkb2020/C TLE.py
|
61c4d78700c4375a274fc85a2aa4fa2d73278a89
|
[] |
no_license
|
twobooks/atcoder_training
|
9deb237aed7d9de573c1134a858e96243fb73ca0
|
aa81799ec87cc9c9d76de85c55e99ad5fa7676b5
|
refs/heads/master
| 2021-10-28T06:33:19.459975
| 2021-10-20T14:16:57
| 2021-10-20T14:16:57
| 233,233,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import numpy as np # np.lcm(),np.gcd()
N = int(input())
arrP = np.array(input().split(),dtype=np.int64)
arrAll = np.arange(200000+1,dtype=np.int64)
mask = np.ones(200000+1,dtype=np.int64) == 1
for p in arrP:
mask[p] = False
print(arrAll[mask][0])
|
[
"twobookscom@gmail.com"
] |
twobookscom@gmail.com
|
27b8f49cb7a0e85b1fe35959e45a5d9c84dcb57b
|
dfb53581b4e6dbdc8e3789ea2678de1e1c4b5962
|
/Django/mydjango01/news/views.py
|
21a263f4be374c6a40d7fe19b8fd65329d2cf18d
|
[] |
no_license
|
biabulinxi/Python-ML-DL
|
7eff6d6898d72f00575045c5aa2acac45b4b0b82
|
217d594a3c0cba1e52550f74d100cc5023fb415b
|
refs/heads/master
| 2020-06-01T09:13:17.314121
| 2019-06-08T03:59:36
| 2019-06-08T03:59:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("这是news的首页")
|
[
"biabu1208@163.com"
] |
biabu1208@163.com
|
99b1f62912fb80c7e719697e2f9075d4fd505216
|
15b12d69ac3123d1562986970ce01d7a47d171de
|
/typings/nltk/translate/__init__.pyi
|
79712704c982cb5c2d56cec50d1fde99fb9fb8ad
|
[
"Apache-2.0"
] |
permissive
|
simplymanas/python-learning
|
9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
refs/heads/master
| 2021-07-11T06:40:24.803589
| 2021-06-20T12:06:02
| 2021-06-20T12:06:02
| 241,769,614
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
pyi
|
"""
This type stub file was generated by pyright.
"""
from nltk.translate.api import AlignedSent, Alignment, PhraseTable
from nltk.translate.ibm_model import IBMModel
from nltk.translate.ibm1 import IBMModel1
from nltk.translate.ibm2 import IBMModel2
from nltk.translate.ibm3 import IBMModel3
from nltk.translate.ibm4 import IBMModel4
from nltk.translate.ibm5 import IBMModel5
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.translate.ribes_score import sentence_ribes as ribes
from nltk.translate.meteor_score import meteor_score as meteor
from nltk.translate.metrics import alignment_error_rate
from nltk.translate.stack_decoder import StackDecoder
"""
Experimental features for machine translation.
These interfaces are prone to change.
"""
|
[
"manas.dash@tesco.com"
] |
manas.dash@tesco.com
|
ee75934b54a7c419ea4df630c94ae680bfee4f92
|
ba0e07b34def26c37ee22b9dac1714867f001fa5
|
/azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/error_detail.py
|
08890398d70c2163092510b29f7f60ffe5e56300
|
[
"MIT"
] |
permissive
|
CharaD7/azure-sdk-for-python
|
b11a08ac7d24a22a808a18203072b4c7bd264dfa
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
refs/heads/master
| 2023-05-12T12:34:26.172873
| 2016-10-26T21:35:20
| 2016-10-26T21:35:20
| 72,448,760
| 1
| 0
|
MIT
| 2023-05-04T17:15:01
| 2016-10-31T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ErrorDetail(Model):
"""ErrorDetail.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, code=None, message=None, target=None):
self.code = code
self.message = message
self.target = target
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
33161c34e78739d53ded91e468cf82f429dfef1d
|
b170d37a81c09fd0dbb0edf3cff6296084b32af9
|
/cexbot/command_utils.py
|
7d0382b5e4f8d343853e41df961287aa984532fe
|
[
"MIT"
] |
permissive
|
metaperl/cexbot
|
8e17a7d5063a82675e002d926324e3c4a6eb6745
|
0dd0b60415afd9c1feb959186d32b1a683887975
|
refs/heads/master
| 2020-12-29T01:11:50.768031
| 2013-12-10T17:13:18
| 2013-12-10T17:13:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,754
|
py
|
#!/usr/bin/env python
""" cexbot - command_utils.py
Default command line utitlities to run cexbot
"""
import os, sys, logging
import cexbot, config, parser, db, cexapi, updater, timer, cex
def main(argv=[]):
args = parser.get_parser()
verbose = 1
if args.verbose:
verbose = 2
if args.debug:
verbose = 3
if verbose>2:
log_level=logging.DEBUG
elif verbose==2:
log_level=logging.INFO
elif verbose==1:
log_level=logging.WARNING
elif verbose<1:
log_level=logging.ERROR
logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s: %(message)s")
if args.command == 'version':
print cexbot.get_version()
return True
# make sure this is always above command parsing
# print config
config.first_run()
if verbose == 3:
print args
if args.command == 'config':
if args.list:
return config.list()
elif args.edit:
return config.edit_config()
elif args.testauth:
return config.test_auth()
elif args.name and args.value:
v = config.set(args.name, args.value)
return config.cprint(args.name)
elif args.name:
return config.cprint(args.name)
logging.error('Invalid config option')
return 1
elif args.command == 'update':
return updater.check_update()
# not implemented
elif args.command == 'cleardata':
return config.clear_userdata()
ac = cexapi.CexAPI(config.get('cex.username'), config.get('cex.apikey'), config.get('cex.secret'))
dbi = db.DbManager()
cx = CexMethods(ac, dbi)
if args.command == 'balance':
print "Balance: %s BTC" % ac.get_balance()
return True
elif args.command == 'initdb':
return dbi.initdb()
elif args.command == 'getmarket':
return ac.get_market()
elif args.command == 'getprice':
return ac.get_market_quote()
elif args.command == 'order':
amount = args.amount
price = args.price
r = ac.place_order(amount, price)
logging.info("Ordered: %s" % r)
elif args.command == 'updatequotes':
logging.info('Running updatequotes')
ticker_timer = timer.ReqTimer(2, cx.update_ticker)
ticker_timer.start()
elif args.command == 'buybalance':
logging.info('Running buybalance')
balance_timer = timer.ReqTimer(5, ac.buy_balance)
balance_timer.start()
# @TODO __import__
# if args.task in cexbot.tasks:
# cexbot.tasks[args.task]()
def cl_error(msg=""):
print >> sys.stderr, msg
def run_cl(argv=[]):
try:
raise SystemExit(main(sys.argv))
except KeyboardInterrupt:
cl_error('Interrupted.')
raise SystemExit(-1)
def run_gui(argv=[]):
print "GUI coming soon."
# return None
try:
import cexbot.gui
cexbot.gui.main()
except Exception, e:
print "Error: %s" % str(e)
|
[
"nikcub@gmail.com"
] |
nikcub@gmail.com
|
20faeb3af99098aeae7f42e863b981e32e75deb0
|
f8a053f287c66652adffd15624c85dcc0850d898
|
/setup.py
|
424d2c9837ce0ca5390c3445ddf06d2283a94b46
|
[
"MIT"
] |
permissive
|
heyongwei/zvt
|
cce9e9bac78c6acc5e73b517f80d1fa464342817
|
051106955a6a01707847ee56a447e2502a25ff46
|
refs/heads/master
| 2023-04-23T16:36:58.631045
| 2021-05-16T16:01:18
| 2021-05-16T16:01:18
| 363,716,402
| 0
| 0
|
MIT
| 2021-05-16T16:01:19
| 2021-05-02T17:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
#!/usr/bin/env python
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
install_reqs = parse_requirements("requirements.txt", session=False)
try:
requirements = [str(ir.req) for ir in install_reqs]
except:
requirements = [str(ir.requirement) for ir in install_reqs]
setup(
name='zvt',
version='0.9.3',
description='unified,modular quant framework for human beings ',
long_description=long_description,
url='https://github.com/zvtvz/zvt',
author='foolcage',
author_email='5533061@qq.com',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords='quant stock finance fintech big-data zvt technical-analysis trading-platform pandas fundamental-analysis',
packages=find_packages(include=['zvt.*', 'zvt']),
python_requires='>=3.5, <4',
include_package_data=True,
install_requires=requirements,
project_urls={ # Optional
'Bug Reports': 'https://github.com/zvtvz/zvt/issues',
'Funding': 'https://www.foolcage.com/zvt',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/zvtvz/zvt',
},
long_description_content_type="text/markdown",
entry_points={
'console_scripts': [
'zvt = zvt.main:main',
'zvt_plugin = zvt.plugin:main',
'zvt_export = zvt.plugin:export',
],
},
)
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
0a261a997e8b133dd2f20809de2b05a9df10aa1a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03041/s690420831.py
|
d69751f59907935676518728b9785bda095c49de
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
n, k = map(int, input().split())
s = str(input())
ans = ''
if s[k-1] == 'A':
ans = s[:k-1] + 'a' + s[k:]
print(ans)
exit()
elif s[k-1] == 'B':
ans = s[:k-1] + 'b' + s[k:]
print(ans)
exit()
elif s[k-1] == 'C':
ans = s[:k-1] + 'c' + s[k:]
print(ans)
exit()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
feb5b5b9942b836a874b3a07264b9012e4b7df0b
|
3f9bec3201cc255c5ad6023cc746488306224015
|
/Chapter 13/Example_13-2.py
|
08ddcf523baaba0c0e1dc8735da55bee0e9ae257
|
[] |
no_license
|
afettouhi/FluentPython-py37
|
64927a908c5804d8970ea3f4b667c109c5867a6a
|
a14a721d738b8908f9e8e78552d70fbb2d6dd74f
|
refs/heads/master
| 2022-06-14T18:26:47.456090
| 2020-05-08T04:13:51
| 2020-05-08T04:13:51
| 259,222,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import decimal
ctx = decimal.getcontext()
ctx.prec = 40
one_third = decimal.Decimal('1') / decimal.Decimal('3')
one_third
one_third == +one_third
ctx.prec = 28
one_third == +one_third
+one_third
|
[
"A.Fettouhi@gmail.com"
] |
A.Fettouhi@gmail.com
|
ea566c781d0e6f7ed3612211e0138868a141630c
|
780af071416ece1e1f6ead426e95155c3de209e9
|
/notebooks/rv/__init__.py
|
5918d5994e584d77b4e31b849f510e080fa8203b
|
[] |
no_license
|
o-smirnov/docker-notebook
|
b2afd38cf16a1db9d3049c4ce79f7bc61c6183fb
|
9cdb1f3fbaaca8edb94d9706a1e62410942a2f1a
|
refs/heads/master
| 2021-01-22T17:14:03.346539
| 2015-05-07T12:31:01
| 2015-05-07T12:31:01
| 35,032,895
| 0
| 0
| null | 2015-05-04T12:15:27
| 2015-05-04T12:15:27
| null |
UTF-8
|
Python
| false
| false
| 7,750
|
py
|
import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
from rv.FITSFile import FITSFile
from rv.ImageFile import ImageFile
import matplotlib.pyplot as plt
NOTEBOOK_DIR = os.environ.get('RVNB_NOTEBOOK_DIR', '/notebooks')
RESULTDIR = os.environ.get('RVNB_DATA_DIR', '/notebooks/data')
ORIGINAL_RESULTDIR = os.environ.get('RVNB_ORIGINAL_DIR', '/notebooks/data')
WIDTH = None # globally fix a plot width (inches)
MINCOL = 2 # default min # of columns to display in thumbnail view
MAXCOL = 4 # default max # of columns to display in thumbnail view
MAXWIDTH = 16 # default width of thumbnail view (inches)
DPI = 80 # screen DPI
TIMEFORMAT = "%H:%M:%S %b %d"
astropy.log.setLevel('ERROR')
import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
import matplotlib.pyplot as plt
from rv.File import DataFile
from rv.Render import renderTitle,renderTable
class FileList(list):
_sort_attributes=dict(x="ext",n="basename",s="size",t="mtime")
def __init__(self, files=[], extcol=True, thumbs=None, title="", sort="xnt"):
list.__init__(self, files)
self._extcol = extcol
self._thumbs = thumbs
self._title = title
if sort:
self.sort(sort)
def sort(self, opt="xnt"):
"""Sort the filelist by name, eXtension, Time, Size, optionally Reverse"""
opt = opt.lower()
# build up order of comparison
cmpattr = []
for attr in opt:
if attr in self._sort_attributes:
cmpattr.append(self._sort_attributes[attr])
def compare(a, b, attrs=cmpattr):
for attr in attrs:
result = cmp(getattr(a,attr),getattr(b,attr))
if result:
return result
return 0
list.sort(self, cmp=compare, reverse='r' in opt)
self._init_df()
return self
def _init_df(self):
if self._extcol:
df_files = [(f.basename, f.ext, f.size, f.mtime_str) for f in self]
self._df = DataFrame(df_files,
columns=('name', 'ext', 'size',
'modified')) if df_files else None
else:
df_files = [(f.name, f.size, f.mtime_str) for f in self]
self._df = DataFrame(
df_files,
columns=('name', 'size', 'modified')) if df_files else None
def _repr_html_(self,ncol=1):
html = renderTitle(self._title)
if self._extcol:
labels = "name", "ext", "size", "modified"
data = [ (df.basename, df.ext, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, df.fullpath, None, None) for df in self ]
else:
labels = "name", "size", "modified"
data = [ (df.basename, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, None, None) for df in self ]
html += renderTable(data,labels,links=links,ncol=ncol)
return html
def show(self,ncol=1):
return IPython.display.display(HTML(self._repr_html_(ncol=ncol)))
def show_all(self):
for f in self:
f.show()
def __call__(self, pattern):
files = [f for f in self if fnmatch.fnmatch(f.name, pattern)]
return FileList(files,
extcol=self._extcol,
thumbs=self._thumbs,
title=os.path.join(self._title, pattern))
def thumbs(self, **kw):
kw['title'] = self._title
return self._thumbs(self, **kw) if self._thumbs else None
def __getslice__(self, *slc):
return FileList(list.__getslice__(self, *slc),
extcol=self._extcol,
thumbs=self._thumbs,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
class DataDir(object):
"""This class represents a directory in the data folder"""
def __init__(self, name, files=[], root=""):
self.fullpath = name
if root and name.startswith(root):
name = name[len(root):]
if name.startswith("/"):
name = name[1:]
name = name or "."
self.name = self.path = name
self.mtime = os.path.getmtime(self.fullpath)
files = [ f for f in files if not f.startswith('.') ]
# our title, in HTML
self._title = os.path.join(ORIGINAL_RESULTDIR, self.path
if self.path is not "." else "")
# make list of DataFiles and sort by time
self.files = FileList([ DataFile(os.path.join(self.fullpath, f),
root=root) for f in files],
title=self._title)
# make separate lists of fits files and image files
self.fits = FileList([ f for f in self.files
if type(f) is FITSFile],
extcol=False,
thumbs=FITSFile._show_thumbs,
title="FITS files, " + self._title);
self.images = FileList([ f for f in self.files
if type(f) is ImageFile],
extcol=False,
thumbs=ImageFile._show_thumbs,
title="Images, " + self._title)
def sort(self, opt):
for f in self.files, self.fits, self.images:
f.sort(opt)
return self
def show(self):
return IPython.display.display(self)
def _repr_html_(self):
return renderTitle(self._title) + self.files._repr_html_()
class DirList(list):
def __init__(self, rootfolder=None, pattern="*", scan=True, title=None):
self._root = rootfolder = rootfolder or RESULTDIR
self._title = title or ORIGINAL_RESULTDIR
if scan:
for dir_, _, files in os.walk(rootfolder):
basename = os.path.basename(dir_)
if fnmatch.fnmatch(basename, pattern) and not basename.startswith("."):
self.append(DataDir(dir_, files, root=rootfolder))
self._sort()
def _sort(self):
self.sort(cmp=lambda x, y: cmp(x.name, y.name))
def _repr_html_(self):
html = renderTitle(self._title)
dirlist = []
for dir_ in self:
nfits = len(dir_.fits)
nimg = len(dir_.images)
nother = len(dir_.files) - nfits - nimg
dirlist.append(
(dir_.name, nfits, nimg, nother, time.strftime(TIMEFORMAT,time.localtime(dir_.mtime))))
html += renderTable(dirlist,
labels=("name", "# FITS", "# img", "# others", "modified"))
return html
def show(self):
return IPython.display.display(self)
def __call__(self, pattern):
return DirList(self._root, pattern,
title=os.path.join(self._title, pattern))
def __getslice__(self, *slc):
newlist = DirList(self._root, scan=False,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
newlist += list.__getslice__(self, *slc)
newlist._sort()
return newlist
# def scandirs (datafolder=DATAFOLDER):
# """Scans all directories under datafolder and populates the DIRS list"""
# global DIRS;
# DIRS = DirList(datafolder);
# for name,ds in sorted(all_dirs):
# print "Contents of",name
# display(d)
|
[
"osmirnov@gmail.com"
] |
osmirnov@gmail.com
|
99cbf86713b07499e57c02d95ba061f54909e2b4
|
0aa150f1bfe3fdbdeaaeeaef5754c3e90378e935
|
/yearapp/migrations/0034_auto_20191008_0609.py
|
75095d6fe84241d240057f54d63809fb82a11f8f
|
[] |
no_license
|
arshpreetsingh12/yearbook
|
6232eba52330b36a7404317985aea4482befd101
|
dac303e3cc448985256b44baae6e9baa4c8d8292
|
refs/heads/master
| 2020-08-07T19:57:00.281613
| 2019-10-11T13:41:49
| 2019-10-11T13:41:49
| 213,571,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# Generated by Django 2.2.5 on 2019-10-08 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yearapp', '0033_sale'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='address',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='invitation',
name='name_of_venue',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='sale',
name='description',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"director@avioxtechnologies.com"
] |
director@avioxtechnologies.com
|
ef94b32dec93fe156549f2e821e7e2798f65812c
|
5b0aebb53c33124b87c8655a5923858d6a2a5bc7
|
/bm_preproc.py
|
266dc2e049dc3c7569d58d10d38f24412cdec468
|
[] |
no_license
|
corylstewart/DNA-Class
|
440e8c0304ea568347d2dad77424ee77a74f9e01
|
5706b95181ef7dd73a6a9d97cc879a50663ca60a
|
refs/heads/master
| 2021-01-10T13:18:07.538528
| 2016-03-29T18:50:26
| 2016-03-29T18:50:26
| 55,001,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,110
|
py
|
"""bm_preproc.py: Boyer-Moore preprocessing."""
__author__ = "Ben Langmead"
def z_array(s):
""" Use Z algorithm (Gusfield theorem 1.4.1) to preprocess s """
assert len(s) > 1
z = [len(s)] + [0] * (len(s)-1)
# Initial comparison of s[1:] with prefix
for i in range(1, len(s)):
if s[i] == s[i-1]:
z[1] += 1
else:
break
r, l = 0, 0
if z[1] > 0:
r, l = z[1], 1
for k in range(2, len(s)):
assert z[k] == 0
if k > r:
# Case 1
for i in range(k, len(s)):
if s[i] == s[i-k]:
z[k] += 1
else:
break
r, l = k + z[k] - 1, k
else:
# Case 2
# Calculate length of beta
nbeta = r - k + 1
zkp = z[k - l]
if nbeta > zkp:
# Case 2a: zkp wins
z[k] = zkp
else:
# Case 2b: Compare characters just past r
nmatch = 0
for i in range(r+1, len(s)):
if s[i] == s[i - k]:
nmatch += 1
else:
break
l, r = k, r + nmatch
z[k] = r - k + 1
return z
def n_array(s):
""" Compile the N array (Gusfield theorem 2.2.2) from the Z array """
return z_array(s[::-1])[::-1]
def big_l_prime_array(p, n):
""" Compile L' array (Gusfield theorem 2.2.2) using p and N array.
L'[i] = largest index j less than n such that N[j] = |P[i:]| """
lp = [0] * len(p)
for j in range(len(p)-1):
i = len(p) - n[j]
if i < len(p):
lp[i] = j + 1
return lp
def big_l_array(p, lp):
""" Compile L array (Gusfield theorem 2.2.2) using p and L' array.
L[i] = largest index j less than n such that N[j] >= |P[i:]| """
l = [0] * len(p)
l[1] = lp[1]
for i in range(2, len(p)):
l[i] = max(l[i-1], lp[i])
return l
def small_l_prime_array(n):
""" Compile lp' array (Gusfield theorem 2.2.4) using N array. """
small_lp = [0] * len(n)
for i in range(len(n)):
if n[i] == i+1: # prefix matching a suffix
small_lp[len(n)-i-1] = i+1
for i in range(len(n)-2, -1, -1): # "smear" them out to the left
if small_lp[i] == 0:
small_lp[i] = small_lp[i+1]
return small_lp
def good_suffix_table(p):
""" Return tables needed to apply good suffix rule. """
n = n_array(p)
lp = big_l_prime_array(p, n)
return lp, big_l_array(p, lp), small_l_prime_array(n)
def good_suffix_mismatch(i, big_l_prime, small_l_prime):
""" Given a mismatch at offset i, and given L/L' and l' arrays,
return amount to shift as determined by good suffix rule. """
length = len(big_l_prime)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if big_l_prime[i] > 0:
return length - big_l_prime[i]
return length - small_l_prime[i]
def good_suffix_match(small_l_prime):
""" Given a full match of P to T, return amount to shift as
determined by good suffix rule. """
return len(small_l_prime) - small_l_prime[1]
def dense_bad_char_tab(p, amap):
""" Given pattern string and list with ordered alphabet characters, create
and return a dense bad character table. Table is indexed by offset
then by character. """
tab = []
nxt = [0] * len(amap)
for i in range(0, len(p)):
c = p[i]
assert c in amap
tab.append(nxt[:])
nxt[amap[c]] = i+1
return tab
class BoyerMoore(object):
""" Encapsulates pattern and associated Boyer-Moore preprocessing. """
def __init__(self, p, alphabet='ACGT'):
# Create map from alphabet characters to integers
self.amap = {alphabet[i]: i for i in range(len(alphabet))}
# Make bad character rule table
self.bad_char = dense_bad_char_tab(p, self.amap)
# Create good suffix rule table
_, self.big_l, self.small_l_prime = good_suffix_table(p)
def bad_character_rule(self, i, c):
""" Return # skips given by bad character rule at offset i """
assert c in self.amap
assert i < len(self.bad_char)
ci = self.amap[c]
return i - (self.bad_char[i][ci]-1)
def good_suffix_rule(self, i):
""" Given a mismatch at offset i, return amount to shift
as determined by (weak) good suffix rule. """
length = len(self.big_l)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if self.big_l[i] > 0:
return length - self.big_l[i]
return length - self.small_l_prime[i]
def match_skip(self):
""" Return amount to shift in case where P matches T """
return len(self.small_l_prime) - self.small_l_prime[1]
def naive_find_matches_with_counter(p, t):
matches = list()
total_comps = 0
for i in xrange(len(t)-len(p)+1):
matched = True
for j in range(len(p)):
total_comps += 1
if p[j] != t[i+j]:
matched = False
break
if matched:
matches.append(i)
return (total_comps, matches)
def boyer_moore_with_counter(p, p_bm, t):
""" Do Boyer-Moore matching. p=pattern, t=text,
p_bm=BoyerMoore object for p """
i = 0
total_comps = 0
while i < len(t) - len(p) + 1:
total_comps += 1
shift = 1
mismatched = False
for j in range(len(p)-1, -1, -1):
if p[j] != t[i+j]:
skip_bc = p_bm.bad_character_rule(j, t[i+j])
skip_gs = p_bm.good_suffix_rule(j)
shift = max(shift, skip_bc, skip_gs)
mismatched = True
break
if not mismatched:
skip_gs = p_bm.match_skip()
shift = max(shift, skip_gs)
i += shift
return total_comps
|
[
"corylstewart@gmail.com"
] |
corylstewart@gmail.com
|
3b33c6da73e70bcb25b56b4fd175de4ac366f2a8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9S8qp4XKG2qwQMdrb_2.py
|
07908c297beae33944959e2c40e6e492d0f35bf6
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
"""
Write a function that returns the number of ways a person can climb **n
stairs** , where the person may only climb **1** or **2** steps at a time.
To illustrate, if **n = 4** there are **5** ways to climb:
[1, 1, 1, 1]
[2, 1, 1]
[1, 2, 1]
[1, 1, 2]
[2, 2]
### Examples
ways_to_climb(1) ➞ 1
ways_to_climb(2) ➞ 2
ways_to_climb(5) ➞ 8
### Notes
A staircase of height `0` should return `1`.
"""
def ways_to_climb(n):
r=(1+5**.5)/2
return round((r**(n+1)-(1-r)**(n+1))/(5**.5))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
9fe4cb94c81a6b0a10f86ec898adfb99833b6625
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_nicking.py
|
8ade774452ec36eabf9b8b12da80103b68a5a982
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from xai.brain.wordbase.verbs._nick import _NICK
#calss header
class _NICKING(_NICK, ):
def __init__(self,):
_NICK.__init__(self)
self.name = "NICKING"
self.specie = 'verbs'
self.basic = "nick"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
44e5115d831d8f11ee4ec8b575906d3138700fbf
|
348aeccddd5fdb48fb91a63d170b7f0453f70e36
|
/libcloud/utils/files.py
|
201e94a4e3a873553fc3a035aa2b8953785c0c0e
|
[
"Apache-2.0"
] |
permissive
|
lelou6666/libcloud
|
4eb08e236cb9f4b787fa73ce963347f708faf092
|
bff26fe27fdd53979e32e08038ecd2fc108b6083
|
refs/heads/trunk
| 2021-01-14T14:02:16.661579
| 2013-10-28T11:18:08
| 2013-10-28T11:18:08
| 55,902,523
| 0
| 0
| null | 2016-04-10T14:08:20
| 2016-04-10T14:08:20
| null |
UTF-8
|
Python
| false
| false
| 3,437
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mimetypes
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
CHUNK_SIZE = 8096
if PY3:
from io import FileIO as file
def read_in_chunks(iterator, chunk_size=None, fill_size=False):
"""
Return a generator which yields data in chunks.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:type chunk_size: ``int``
:param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
:type fill_size: ``bool``
:param fill_size: If True, make sure chunks are chunk_size in length
(except for last chunk).
TODO: At some point in the future we could use byte arrays here if version
>= Python 3. This should speed things up a bit and reduce memory usage.
"""
chunk_size = chunk_size or CHUNK_SIZE
if isinstance(iterator, (file, httplib.HTTPResponse)):
get_data = iterator.read
args = (chunk_size, )
else:
get_data = next
args = (iterator, )
data = b('')
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = b(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
raise StopIteration
if fill_size:
if empty or len(data) >= chunk_size:
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = b('')
def exhaust_iterator(iterator):
"""
Exhaust an iterator and return all data returned by it.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:rtype ``str``
:return Data returned by the iterator.
"""
data = b('')
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
while len(chunk) > 0:
data += chunk
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
return data
def guess_file_mime_type(file_path):
filename = os.path.basename(file_path)
(mimetype, encoding) = mimetypes.guess_type(filename)
return mimetype, encoding
|
[
"tomaz@apache.org"
] |
tomaz@apache.org
|
cead28e09d8898e94fd635d1ede4ab5cabf171fe
|
16b77438b7a7923a391a12f1f4bc12b49429bb73
|
/src/PIPE/PIPE.py
|
afa369355271987d911ce5454c61b803916fa8aa
|
[] |
no_license
|
OpenJ92/zebra
|
eb582c36fd7110ccf5866eb34418ff9e725efd5d
|
2d3d3d42bb0461901f2418069a55e47cf8450c50
|
refs/heads/master
| 2020-11-29T14:29:37.279589
| 2020-01-18T19:54:07
| 2020-01-18T19:54:07
| 230,138,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from src.NODE.NODE import NODE
class PIPE(object):
def __init__(self, form):
self._name, self._kwargs = *form.keys(), *form.values()
self.__gen_nodes__();
self._transformed = self.__execute__({'Data1':1, 'Data2':1})
def __gen_nodes__(self):
self._nodes = [NODE(kw) for kw in self._kwargs]
self._nodes = {f"{self._name}_{node._name}": node \
for node in self._nodes}
def __execute__(self, Xs):
node = self._nodes[f"{self._name}_HEAD"]
while True:
print(Xs, node._name)
Xs = { \
name: \
(node._map._apply_(data) if name in node._on else data)\
for name, data in Xs.items() \
}
if "TAIL" in node._name:
return Xs
node = self._nodes[f"{self._name}_{next(node)}"]
return Xs
|
[
"jacob.vartuli.92@gmail.com"
] |
jacob.vartuli.92@gmail.com
|
19b365204ddcf74e34ab42a5f2b0d756622c9ad5
|
ca55dcaa64ea9db4068e13091321cfebecc0ff41
|
/codeUp/codeUpBasic/1990.py
|
bca5b69987f830843fdbdeecd27fbb8549319697
|
[] |
no_license
|
gomtinQQ/algorithm-python
|
8fb8343594b945099ae2a4dfa794ecb47e54ab0b
|
751562922b66e335f621d366bb73dacdc7125140
|
refs/heads/master
| 2022-12-07T23:05:44.535593
| 2020-08-21T12:29:58
| 2020-08-21T12:29:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
'''
1990 : 3의 배수 판별하기
자연수 n이 입력되면 3의 배수인지 아닌지 판별하시오.
3의 배수이면 1을 출력하고, 아니면 0을 출력한다.
'''
n = int(input())
if(n%3==0):
print(1)
else:
print(0)
|
[
"minhyeonlee1@gmail.com"
] |
minhyeonlee1@gmail.com
|
0f035ba1c524afe06432726820c34655340ac8c6
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GL/ARB/texture_storage_multisample.py
|
fdcdbc160823f7a5f0c538918cf1a7c652b4e9a0
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
# End users want this...
from OpenGL.raw.GL import _errors
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
_EXTENSION_NAME = 'GL_ARB_texture_storage_multisample'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_ARB_texture_storage_multisample',
error_checker=_errors._error_checker)
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations): pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage3DMultisample(target,samples,internalformat,width,height,depth,fixedsamplelocations):pass
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
8a6874d0099dce3f2d73698422596393937926c4
|
1422a57e98aba02321b772d72f8f0ada6d8b8cba
|
/mm/models/shared/augmentation.py
|
91ccf3fae4c30c7c4b6af2cc19bd690100302532
|
[
"MIT"
] |
permissive
|
JonasRSV/Friday
|
e1908a411aa133bc5bd2f383b0a995f7e028092d
|
f959eff95ba7b11525f97099c8f5ea0e325face7
|
refs/heads/main
| 2023-05-15T03:33:21.542621
| 2021-06-12T10:34:50
| 2021-06-12T10:34:50
| 315,309,991
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from typing import List
import tensorflow as tf
import numpy as np
import models.shared.augmentations as a
import random
def create_audio_augmentations(aug: List[a.Augmentation], p: np.ndarray):
if len(aug) != len(p):
raise ValueError(f"Length of augmentations must match distribution {len(aug)} != {len(p)}")
def audio_augmentations(audio: np.ndarray, sample_rate: int):
for aug_to_apply, with_prob in zip(aug, p):
if np.random.rand() < with_prob:
audio = aug_to_apply.apply(audio, sample_rate)
return audio
return audio_augmentations
|
[
"jonas@valfridsson.net"
] |
jonas@valfridsson.net
|
d613832fb1e4fbf8daf1f43cb77c47396088f146
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_181/ch4_2020_03_05_16_07_05_989464.py
|
dff125cf8d4a74499e2b22478368603f7e78b838
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
def classifica_idade(i):
if i<12:
return 'crianca'
if 18>i>12:
return 'adolescente'
else:
return 'adulto'
|
[
"you@example.com"
] |
you@example.com
|
83b9b89602f94805f1ff6283f7237c42100ead2a
|
f5a7de717f41f8379ccdee7d06de838fdf1d0a0b
|
/soloperformance-api/apps/catalog/management/commands/exercises.py
|
b73d1df31fb2d914106dd6d80bd4253425dbe55c
|
[] |
no_license
|
jimmy818/mexico-angular
|
977e4d1d0cab2ff8c10c9892d9c72ca2f4f9ac49
|
005ed3729b807d77a8fd97a3b5469a42ceefdaad
|
refs/heads/main
| 2023-08-10T21:37:53.614298
| 2021-05-11T19:04:29
| 2021-05-11T19:04:29
| 366,485,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.http import HttpRequest
import requests
import xlrd
from apps.catalog import utils
class Command(BaseCommand):
help = 'Add exercises'
def handle(self, *args, **options):
request = HttpRequest()
r = requests.get('https://d2femlmiaazi1b.cloudfront.net/media/excel/DB_Drills.xlsx')
with open('/tmp/excel.xlsx', 'wb') as f:
f.write(r.content)
path = '/tmp/excel.xlsx'
book = xlrd.open_workbook(path)
# sheets = book.sheet_names()
sheet_0 = book.sheet_by_index(0) # Open the first tab
## this range is for excercices length
for row_index in range(1012):
if row_index > 3:
excercice = None
for col_index in range(sheet_0.ncols):
item = sheet_0.cell(rowx=row_index,colx=col_index).value
if excercice == None:
excercice = item
excercice_item = utils.get_or_add_excercice(excercice)
else:
if item != None and item != '':
utils.add_sub_excercice(excercice_item,sheet_0.cell(rowx=3,colx=col_index).value)
print(excercice)
print(sheet_0.cell(rowx=3,colx=col_index).value)
self.stdout.write(self.style.SUCCESS('Successfully.....'))
|
[
"45069768+itsrocketfuel@users.noreply.github.com"
] |
45069768+itsrocketfuel@users.noreply.github.com
|
84555327ae07d2945fac7b3d7ca618e1946fb291
|
e56214188faae8ebfb36a463e34fc8324935b3c2
|
/intersight/models/workflow_default_value_ref.py
|
18613e62146e7f7c285e489454fb63c30fab824b
|
[
"Apache-2.0"
] |
permissive
|
CiscoUcs/intersight-python
|
866d6c63e0cb8c33440771efd93541d679bb1ecc
|
a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4
|
refs/heads/master
| 2021-11-07T12:54:41.888973
| 2021-10-25T16:15:50
| 2021-10-25T16:15:50
| 115,440,875
| 25
| 18
|
Apache-2.0
| 2020-03-02T16:19:49
| 2017-12-26T17:14:03
|
Python
|
UTF-8
|
Python
| false
| false
| 5,734
|
py
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkflowDefaultValueRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
WorkflowDefaultValueRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:return: The object_type of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this WorkflowDefaultValueRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:return: The moid of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:param moid: The moid of this WorkflowDefaultValueRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this WorkflowDefaultValueRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowDefaultValueRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"ucs-build@github.com"
] |
ucs-build@github.com
|
1727d04b8a7d1014b6e1d7a1ae539f023ea9f601
|
1713334f9b68255f9adab70175c21f399d0460f3
|
/python/125_Valid_Palindrome.py
|
4d198f026b9d9fad4550fee87f5e98972fb8c355
|
[
"MIT"
] |
permissive
|
coy0725/leetcode
|
0a798b7adafe80f726e51c06c34835c4aa51b563
|
743a0bfa22402ec39858dc9c4c7dc531f825b953
|
refs/heads/master
| 2020-05-21T18:25:09.683714
| 2019-05-11T13:00:40
| 2019-05-11T13:00:40
| 186,132,894
| 2
| 0
|
MIT
| 2019-05-11T12:55:22
| 2019-05-11T12:55:21
| null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
alnum_s = [t.lower() for t in s if t.isalnum()]
ls = len(alnum_s)
if ls <= 1:
return True
mid = ls / 2
for i in range(mid):
if alnum_s[i] != alnum_s[ls - 1 - i]:
return False
return True
|
[
"qiyuangong@gmail.com"
] |
qiyuangong@gmail.com
|
d9e06504505b6a186387d2ff84264d0ecf0308fb
|
83d657c787529f01a8ecc8a874421738a7eecec7
|
/Paths/Harmonise Curve to Line.py
|
753600a50daceb8ddc9121810ba918269ff339b9
|
[
"Apache-2.0"
] |
permissive
|
BurgAndOeden/Glyphs-Scripts
|
e31b5164b491dfe0cd2d57f6cf1422c4aadda104
|
f0195d6b8f0a6c055e4e44d5ef41ba48bdd1e3a6
|
refs/heads/master
| 2020-09-16T08:01:06.345898
| 2019-11-24T00:15:44
| 2019-11-24T00:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
#MenuTitle: Harmonise Curve to Line
# -*- coding: utf-8 -*-
__doc__="""
Maximises opposing handles and reduces adjacent handles of line segments.
"""
from Foundation import NSPoint
def intersectionWithNSPoints( pointA, pointB, pointC, pointD ):
"""
Returns an NSPoint of the intersection AB with CD.
Or False if there is no intersection
"""
try:
x1, y1 = pointA.x, pointA.y
x2, y2 = pointB.x, pointB.y
x3, y3 = pointC.x, pointC.y
x4, y4 = pointD.x, pointD.y
try:
slope12 = ( float(y2) - float(y1) ) / ( float(x2) - float(x1) )
except:
# division by zero if vertical
slope12 = None
try:
slope34 = ( float(y4) - float(y3) ) / ( float(x4) - float(x3) )
except:
# division by zero if vertical
slope34 = None
if slope12 == slope34:
# parallel, no intersection
return None
elif slope12 is None:
# first line is vertical
x = x1
y = slope34 * ( x - x3 ) + y3
elif slope34 is None:
# second line is vertical
x = x3
y = slope12 * ( x - x1 ) + y1
else:
# both lines have an angle
x = ( slope12 * x1 - y1 - slope34 * x3 + y3 ) / ( slope12 - slope34 )
y = slope12 * ( x - x1 ) + y1
intersectionPoint = NSPoint( x, y )
if bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointB, pointA ) and bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointC, pointD ):
if pointIsBetweenOtherPoints( intersectionPoint, pointB, pointA ) or pointIsBetweenOtherPoints( intersectionPoint, pointC, pointD ):
return None
return intersectionPoint
else:
return None
except Exception as e:
print str(e)
import traceback
print traceback.format_exc()
return None
def pointDistance( P1, P2 ):
"""Calculates the distance between P1 and P2."""
x1, y1 = P1.x, P1.y
x2, y2 = P2.x, P2.y
dist = ( ( float(x2) - float(x1) ) ** 2 + ( float(y2) - float(y1) ) **2 ) ** 0.5
return dist
def bezier( x1, y1, x2,y2, x3,y3, x4,y4, t ):
x = x1*(1-t)**3 + x2*3*t*(1-t)**2 + x3*3*t**2*(1-t) + x4*t**3
y = y1*(1-t)**3 + y2*3*t*(1-t)**2 + y3*3*t**2*(1-t) + y4*t**3
return x, y
def bothPointsAreOnSameSideOfOrigin( pointA, pointB, pointOrigin ):
returnValue = True
xDiff = (pointA.x-pointOrigin.x) * (pointB.x-pointOrigin.x)
yDiff = (pointA.y-pointOrigin.y) * (pointB.y-pointOrigin.y)
if xDiff <= 0.0 and yDiff <= 0.0:
returnValue = False
return returnValue
def pointIsBetweenOtherPoints( thisPoint, otherPointA, otherPointB) :
returnValue = False
xDiffAB = otherPointB.x - otherPointA.x
yDiffAB = otherPointB.y - otherPointA.y
xDiffAP = thisPoint.x - otherPointA.x
yDiffAP = thisPoint.y - otherPointA.y
xDiffFactor = divideAndTolerateZero( xDiffAP, xDiffAB )
yDiffFactor = divideAndTolerateZero( yDiffAP, yDiffAB )
if xDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
if yDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
return returnValue
def divideAndTolerateZero( dividend, divisor ):
if float(divisor) == 0.0:
return None
else:
return dividend/divisor
def handleLength(a,b,intersection):
return pointDistance(a,b)/pointDistance(a,intersection)
def moveHandle(a,b,intersection,bPercentage):
x = a.x + (intersection.x-a.x) * bPercentage
y = a.y + (intersection.y-a.y) * bPercentage
return NSPoint(x,y)
Font = Glyphs.font
if len(Font.selectedLayers) > 1:
selectionCounts = False
elif not Font.selectedLayers[0].selection:
selectionCounts = False
else:
selectionCounts = True
for selectedLayer in Font.selectedLayers:
selectedGlyph = selectedLayer.parent
selectedGlyph.beginUndo()
# put original state in background:
selectedLayer.contentToBackgroundCheckSelection_keepOldBackground_(False,False)
for path in selectedLayer.paths:
for n in path.nodes:
processedHandles = []
if (n.selected or not selectionCounts) and n.type == OFFCURVE:
# determine the segment:
if n.prevNode.type == OFFCURVE:
a = n.prevNode.prevNode
b = n.prevNode
c = n
d = n.nextNode
else:
a = n.prevNode
b = n
c = n.nextNode
d = n.nextNode.nextNode
if not a in processedHandles and not b in processedHandles:
# intersection of the magic triangle:
intersection = intersectionWithNSPoints( a.position, b.position, c.position, d.position )
if intersection:
# calculate percentages:
bLength = handleLength(a,b,intersection)
cLength = handleLength(d,c,intersection)
shortLength = (abs(bLength) + abs(cLength) - 1.0) - (1.0-abs(bLength))*(1.0-abs(cLength))
if d.nextNode.type == LINE and a.prevNode.type != LINE and d.connection == GSSMOOTH:
# max handle:
b.position = intersection
# reduced handle:
c.position = moveHandle(d,c,intersection,shortLength)
elif a.prevNode.type == LINE and d.nextNode.type != LINE and a.connection == GSSMOOTH:
# max handle:
c.position = intersection
# reduced handle:
b.position = moveHandle(a,b,intersection,shortLength)
# mark handles as processed:
processedHandles.append(a)
processedHandles.append(b)
selectedGlyph.endUndo()
|
[
"res@glyphsapp.com"
] |
res@glyphsapp.com
|
998dbc4a900cf93aa3ee0d2e520aed575aca4de5
|
02ad25c4ac78a98b5493a2aa7f744a77f381aaae
|
/dashboard_app/migrations/0010_auto_20201211_0846.py
|
2168834a1f6db118e06a45e41521adce387ce856
|
[] |
no_license
|
cavidanhasanli/Havayi
|
1f85d0d7608c964b0ddc80e3b526b32cdb81e8bf
|
bd30c9e3e700c7381b5961b5051cbcb398adc449
|
refs/heads/main
| 2023-02-03T09:25:03.866784
| 2020-12-22T18:09:07
| 2020-12-22T18:09:07
| 316,319,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Generated by Django 3.1.3 on 2020-12-11 08:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0009_auto_20201211_0839'),
]
operations = [
migrations.DeleteModel(
name='CreditTypeInterest',
),
migrations.AddField(
model_name='banklist',
name='credit_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dashboard_app.creditfields'),
),
migrations.AddField(
model_name='banklist',
name='interest',
field=models.FloatField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0.1), django.core.validators.MaxValueValidator(100)]),
),
]
|
[
"cavidan.hasanli@mail.ru"
] |
cavidan.hasanli@mail.ru
|
d4e96ddfa8c091f87bd220375da45cf8ce6295f4
|
679ce4b323f79b2425976201324c6c1f88b95199
|
/Python/Stanley Cup/csv_parser.py
|
53294c7db661e390948575da2be855cee905e598
|
[] |
no_license
|
abriggs914/Coding_Practice
|
ff690fb5f145a11f4da144f3882b37f473b10450
|
3afd7c59e0d90f0ef5f6203853e69f853312019b
|
refs/heads/master
| 2023-08-31T04:04:58.048554
| 2023-08-29T13:23:29
| 2023-08-29T13:23:29
| 161,865,421
| 0
| 1
| null | 2022-10-27T08:35:29
| 2018-12-15T03:20:14
|
Python
|
UTF-8
|
Python
| false
| false
| 7,689
|
py
|
import csv
from utility import *
file_name = "past winners.csv"
# skip 2005 back fill
with open(file_name) as csv_file:
lines = csv.DictReader(csv_file)
data_by_year = {}
header = lines.fieldnames
print("header", header)
last = None
for i, line in enumerate(lines):
if last is not None:
if any([val is None or val == "" for val in line.values()]):
#print("missing values, check last:", last)
if line["Year"] == "2005":
continue
for last_key, curr_key in zip(last, line):
last_val = last[last_key]
curr_val = line[curr_key]
if curr_val is None or curr_val == "":
line[curr_key] = last_val
line["Winning Team"] = line["Winning Team"].split("(")[0].strip()
line["Losing Team"] = line["Losing Team"].split("(")[0].strip()
print(dict_print(line))
data_by_year[str(line["Year"])] = line
if 0 < i:
last = line
data_by_year = {k:v for k, v in data_by_year.items() if "1995" <= k}
print(dict_print(data_by_year, "data_by_year"))
data_by_team = {}
data_by_coach = {}
first_year = None
last_year = None
for key, val in data_by_year.items():
year = int(key)
if first_year is None:
first_year = year
if last_year is None or year > last_year:
last_year = year
w_team = val["Winning Team"]
l_team = val["Losing Team"]
if w_team not in data_by_team:
data_by_team[w_team] = {"WYear": [], "LYear": [], "appearances": 0}
if l_team not in data_by_team:
data_by_team[l_team] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_team[w_team]["WYear"].append(key)
data_by_team[l_team]["LYear"].append(key)
data_by_team[w_team]["appearances"] += 1
data_by_team[l_team]["appearances"] += 1
data_by_team[w_team]["W% (per appearance)"] = len(data_by_team[w_team]["WYear"]) / data_by_team[w_team]["appearances"]
data_by_team[l_team]["W% (per appearance)"] = len(data_by_team[l_team]["WYear"]) / data_by_team[l_team]["appearances"]
data_by_team[l_team]["L% (per appearance)"] = len(data_by_team[l_team]["LYear"]) / data_by_team[l_team]["appearances"]
data_by_team[w_team]["L% (per appearance)"] = len(data_by_team[w_team]["LYear"]) / data_by_team[w_team]["appearances"]
w_coach = val["WCoach"]
l_coach = val["LCoach"]
if w_coach not in data_by_coach:
data_by_coach[w_coach] = {"WYear": [], "LYear": [], "appearances": 0}
if l_coach not in data_by_coach:
data_by_coach[l_coach] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_coach[w_coach]["WYear"].append(key)
data_by_coach[l_coach]["LYear"].append(key)
data_by_coach[w_coach]["appearances"] += 1
data_by_coach[l_coach]["appearances"] += 1
data_by_coach[w_coach]["W% (per appearance)"] = percent(len(data_by_coach[w_coach]["WYear"]) / data_by_coach[w_coach]["appearances"])
data_by_coach[l_coach]["W% (per appearance)"] = percent(len(data_by_coach[l_coach]["WYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[l_coach]["L% (per appearance)"] = percent(len(data_by_coach[l_coach]["LYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[w_coach]["L% (per appearance)"] = percent(len(data_by_coach[w_coach]["LYear"]) / data_by_coach[w_coach]["appearances"])
teams_list = list(data_by_team.keys())
teams_list.sort()
for team in data_by_team:
w_list = data_by_team[team]["WYear"]
l_list = data_by_team[team]["LYear"]
data_by_team[team]["Appearance % ({} to {})".format(first_year, last_year)] = percent((len(w_list) + len(l_list)) / (last_year - first_year))
data_by_team[team]["Appearance W% ({} to {})".format(first_year, last_year)] = percent(len(w_list) / (last_year - first_year))
data_by_team[team]["Appearance L% ({} to {})".format(first_year, last_year)] = percent(len(l_list) / (last_year - first_year))
#data_by_team[team]["won_against"] = []
#data_by_team[team]["lost_against"] = []
greatest_rival = None
most_lost_to = None
most_won_against = None
for team_b in teams_list:
# if team != team_b:
if team_b not in data_by_team[team]:
data_by_team[team][team_b] = {"won_against": [], "lost_against": []}
for year in data_by_team[team]["WYear"]:
if data_by_year[year]["Losing Team"] == team_b:
data_by_team[team][team_b]["won_against"].append(year)
for year in data_by_team[team]["LYear"]:
if data_by_year[year]["Winning Team"] == team_b:
data_by_team[team][team_b]["lost_against"].append(year)
if greatest_rival is None:
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) > len(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) == len(greatest_rival[1]):
if data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]) > max(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
if most_lost_to is None:
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) > len(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) == len(most_lost_to[1]):
if data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["lost_against"]) > max(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
if most_won_against is None:
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) > len(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) == len(most_won_against[1]):
if data_by_team[team][team_b]["won_against"]:
if max(data_by_team[team][team_b]["won_against"]) > max(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
data_by_team[team]["greatest_rival"] = greatest_rival
if most_lost_to[1]:
data_by_team[team]["most_lost_to"] = most_lost_to
if most_won_against[1]:
data_by_team[team]["most_won_against"] = most_won_against
print(dict_print(data_by_team, "Data By Team"))
print("parsed teams:\n", "\n".join(teams_list))
for coach in data_by_coach:
w_list = data_by_coach[coach]["WYear"]
l_list = data_by_coach[coach]["LYear"]
data_by_coach[coach]["Appearance % ({} to {})".format(first_year, last_year)] = (len(w_list) + len(l_list)) / (last_year - first_year)
data_by_coach[coach]["Appearance W% ({} to {})".format(first_year, last_year)] = len(w_list) / (last_year - first_year)
data_by_coach[coach]["Appearance L% ({} to {})".format(first_year, last_year)] = len(l_list) / (last_year - first_year)
print(dict_print(data_by_coach, "Data By Team"))
coaches_list = list(data_by_coach.keys())
coaches_list.sort()
print("parsed coaches:\n", "\n".join(coaches_list))
# count # time each team / coach has won.
# count # time each team met and won/lost against each other team.
# count # GWG -> period, timeOfPeriod
|
[
"abriggs1@unb.ca"
] |
abriggs1@unb.ca
|
4eb48a87e664b4cabd5416d2d6729ed9a88b43a1
|
49cd9ba075ed2ab6b267f6e012bfb03267b7bc08
|
/project_42_formsModelpagination/app42/forms.py
|
99db23b3c75ea231d95bd12b4e9224ed18e651db
|
[] |
no_license
|
Satputev/DjangoApps
|
4d47a76f20815b2b1313e8b3e3c61b5406f5da60
|
c6fb5e9fa131f07d1f5920e98699f9daaa49d424
|
refs/heads/master
| 2023-02-14T00:42:36.037749
| 2020-12-24T07:39:54
| 2020-12-24T07:39:54
| 323,857,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from django import forms
from app42.models import ProductsModel
from django.forms import ValidationError
class ProductForm(forms.ModelForm):
class Meta:
model=ProductsModel
fields='__all__'
exclude=('pid',)
labels={'pname':'Product Name','pprice':'Product Price','pimg':'Product Image'}
def clean_pprice(self):
price=self.cleaned_data['pprice']
if price < 1:
raise ValidationError('price should be greater than "0"')
else:
return price
|
[
"satputevishal8@gmail.com"
] |
satputevishal8@gmail.com
|
694b8b138f3b4862d4b35953cdb3675a91e2a179
|
fd25231975acd147e04dc3ed3627c92cb1a4f86c
|
/FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/spatial/tests/test_distance.py
|
c0b831a2879fa2a21e753350d7b7edefe48591cf
|
[] |
no_license
|
sumitkutty/Flight-Price-Prediction
|
832a2802a3367e655b46d3b44f073d917abd2320
|
d974a8b75fbcbfa42f11703602af3e45a3f08b3c
|
refs/heads/master
| 2022-12-25T07:13:06.375888
| 2020-10-08T18:46:44
| 2020-10-08T18:46:44
| 302,366,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a99a0a8e8696f85040b18a7b95996060265dec4c0607ab9bc90551e2f2dc9bd2
size 81424
|
[
"sumitkutty37@gmail.com"
] |
sumitkutty37@gmail.com
|
035f453b189a37c9677088804e6c18447aabdbbe
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetCode/733 Flood Fill.py
|
4350e4e56af74a61b1f948707760e1b580de0573
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
#!/usr/bin/python3
"""
An image is represented by a 2-D array of integers, each integer representing
the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of
the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels
connected 4-directionally to the starting pixel of the same color as the
starting pixel, plus any pixels connected 4-directionally to those pixels (also
with the same color as the starting pixel), and so on. Replace the color of all
of the aforementioned pixels with the newColor.
At the end, return the modified image.
Example 1:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels
connected
by a path of the same color as the starting pixel are colored with the new
color.
Note the bottom corner is not colored 2, because it is not 4-directionally
connected
to the starting pixel.
Note:
The length of image and image[0] will be in the range [1, 50].
The given starting pixel will satisfy 0 <= sr < image.length and 0 <= sc <
image[0].length.
The value of each color in image[i][j] and newColor will be an integer in
[0, 65535].
"""
from typing import List
dirs = ((-1, 0), (1, 0), (0, -1), (0, 1))
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
"""
dfs fill
mistake: corner case image == new color
"""
cur_color = image[sr][sc]
if cur_color == newColor:
return image
self.dfs(image, sr, sc, cur_color, newColor)
return image
def dfs(self, image, i, j, cur_color, new_color):
image[i][j] = new_color
m, n = len(image), len(image[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n and image[I][J] == cur_color:
self.dfs(image, I, J, cur_color, new_color)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
7d375196ec6a89c43b9391ff60129464324ce322
|
f4fdb0c1213bbb403b87c2dbbde390918ac08861
|
/convert_uk_decl_num3.py
|
accb16c1dd9181350a97f4be6023784d4fd9b64a
|
[] |
no_license
|
benwing2/RuNounChanges
|
0d5076e576237f10b50049ed52b91f96c95cca95
|
048dfed5abe09b8d5629c5772292027ce0a170f2
|
refs/heads/master
| 2023-09-03T22:48:06.972127
| 2023-09-03T06:27:56
| 2023-09-03T06:27:56
| 41,480,942
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pywikibot, re, sys, argparse
import blib
from blib import getparam, rmparam, tname, pname, msg, errandmsg, site
def process_text_on_page(index, pagetitle, text):
global args
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
notes = []
parsed = blib.parse_text(text)
for t in parsed.filter_templates():
tn = tname(t)
origt = str(t)
def getp(param):
return getparam(t, param)
if tn == "uk-decl-num3":
def clean_part(part):
return blib.remove_links(part).replace(" ", "").strip()
acc = clean_part(getp("4"))
if "," in acc:
nom = clean_part(getp("1"))
gen = clean_part(getp("2"))
dat = clean_part(getp("3"))
ins = clean_part(getp("5"))
loc = clean_part(getp("6"))
acc_parts = acc.split(",")
if len(acc_parts) == 2:
acc_in, acc_an = acc_parts
for param in t.params:
pn = pname(param)
pv = str(param.value)
if not re.search("^[1-6]$", pn):
pagemsg("WARNING: Unrecognized param: %s=%s" % (pn, pv))
return
del t.params[:]
blib.set_template_name(t, "uk-adecl-manual")
t.add("special", "plonly\n", preserve_spacing=False)
t.add("nom_p", nom + "\n", preserve_spacing=False)
t.add("gen_p", gen + "\n", preserve_spacing=False)
t.add("dat_p", dat + "\n", preserve_spacing=False)
t.add("acc_p_in", acc_in + "\n", preserve_spacing=False)
t.add("acc_p_an", "%s,%s\n" % (acc_in, acc_an), preserve_spacing=False)
t.add("ins_p", ins + "\n", preserve_spacing=False)
t.add("loc_p", loc + "\n", preserve_spacing=False)
notes.append("replace {{uk-decl-num3}} with {{uk-adecl-manual}}")
pagemsg("Replaced %s with %s" % (origt, str(t)))
return str(parsed), notes
parser = blib.create_argparser("Convert {{uk-decl-num3}} to {{uk-adecl-manual}}", include_pagefile=True, include_stdin=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,
default_refs=["Template:uk-decl-num3"])
|
[
"ben@benwing.com"
] |
ben@benwing.com
|
fbfa4af6739e251fef1d94b0ce852a6cb2c6cca3
|
c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec
|
/ostPython4/context_mgr.py
|
5d67ab14436a6f258a36aef585b8624eba812c9d
|
[] |
no_license
|
deepbsd/OST_Python
|
836d4fae3d98661a60334f66af5ba3255a0cda5c
|
b32f83aa1b705a5ad384b73c618f04f7d2622753
|
refs/heads/master
| 2023-02-14T17:17:28.186060
| 2023-01-31T02:09:05
| 2023-01-31T02:09:05
| 49,534,454
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
#!/usr/bin/env python3
#
#
# context_mgr.py
#
# Lesson 14: Context Managers
#
# by David S. Jackson
# 8/17/15
#
# OST Python4: Advanced Python
# for Pat Barton, Instructor
#
"""
Project:
Write a context manager class that suppresses any ValueError
exceptions that occur in the controlled suite, but allows any
other exception to be raised in the surrounding context.
"""
class ctx_mgr:
def __init__(self, raising=True):
self.raising = raising
def __enter__(self):
cm = object()
return cm
def __exit__(self, exc_type, exc_val, exc_tb):
"Self.raising can be overridden, so I reset it excplicitly."
self.raising = True
if exc_type == ValueError:
return self.raising
elif exc_type:
raise
if __name__ == "__main__":
with ctx_mgr(raising=True) as cm:
print('To create ValueError, enter a float or string.')
num = int(input("Enter a number: "))
print('To create an IndexError, enter an int greater than 4.')
myindex = int(input('lst1 = [1,2,3,4,5]. What index is number 4? '))
lst1 = [1,2,3,4,5]
print("The value you selected is: ", lst1[myindex])
print("Divide by zero!", 3/0)
|
[
"deepbsd@yahoo.com"
] |
deepbsd@yahoo.com
|
49de7e6ce41f348e586e2eefc9b9a5e0127f92ad
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03574/s538402697.py
|
a100b6d62d5fdc1b9953e127ac04d0761a0d8b81
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
h,w=map(int,input().split())
s=["."*(w+2)]
for i in range(h):
s.append("."+input()+".")
s.append("."*(w+2))
dx=[-1,0,1,1,1,0,-1,-1]
dy=[1,1,1,0,-1,-1,-1,0]
ans=[]
for i in range(1,h+1):
wp=""
for j in range(1,w+1):
if s[i][j]=="#":
wp+="#"
continue
count=0
for k in range(8):
if s[i+dy[k]][j+dx[k]]=="#":
count+=1
wp+=str(count)
ans.append(wp)
print(*ans,sep="\n")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6099e986b2054b690030adc9e7e17a767ae0e2b4
|
c6fa248ec5a7e3c67afac98e365cac850c511473
|
/generative_adversarial_networks/code/chapter_08/04_train_discriminator.py
|
c79e832de127b1bae5f94a1889e27d01ecef99ac
|
[] |
no_license
|
shenjnigxing/deep-learning-material
|
44830e07cc2a5bd47b07ca903c1f2b65beef22bb
|
24dfee3b9fe1a40303cb2dfe256028d35113babf
|
refs/heads/master
| 2022-12-23T10:08:05.881432
| 2020-09-16T02:24:38
| 2020-09-16T02:24:38
| 295,900,907
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
# example of training the discriminator model on real and random cifar10 images
from numpy import ones
from numpy import zeros
from numpy.random import rand
from numpy.random import randint
from keras.datasets.cifar10 import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LeakyReLU
# define the standalone discriminator model
def define_discriminator(in_shape=(32,32,3)):
model = Sequential()
# normal
model.add(Conv2D(64, (3,3), padding='same', input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# load and prepare cifar10 training images
def load_real_samples():
# load cifar10 dataset
(trainX, _), (_, _) = load_data()
# convert from unsigned ints to floats
X = trainX.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = ones((n_samples, 1))
return X, y
# generate n fake samples with class labels
def generate_fake_samples(n_samples):
# generate uniform random numbers in [0,1]
X = rand(32 * 32 * 3 * n_samples)
# update to have the range [-1, 1]
X = -1 + X * 2
# reshape into a batch of color images
X = X.reshape((n_samples, 32, 32, 3))
# generate 'fake' class labels (0)
y = zeros((n_samples, 1))
return X, y
# train the discriminator model
def train_discriminator(model, dataset, n_iter=20, n_batch=128):
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in range(n_iter):
# get randomly selected 'real' samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator on real samples
_, real_acc = model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(half_batch)
# update discriminator on fake samples
_, fake_acc = model.train_on_batch(X_fake, y_fake)
# summarize performance
print('>%d real=%.0f%% fake=%.0f%%' % (i+1, real_acc*100, fake_acc*100))
# define the discriminator model
model = define_discriminator()
# load image data
dataset = load_real_samples()
# fit the model
train_discriminator(model, dataset)
|
[
"Shenjx161212@gmail.com"
] |
Shenjx161212@gmail.com
|
4cd79181130987db75faf7e250e83b9863e339bb
|
5d6dd782e0b29817b3c27d5d6984909152813444
|
/dbbase/urls.py
|
3d183271c6790a11b27359533230ad4817dbcaab
|
[] |
no_license
|
smartslee/hospacc
|
387d8a7e42e068080738e365045a23d6d8a1f222
|
5bd42a9e729f3c90ff4b87185167f64fe79aac01
|
refs/heads/master
| 2020-04-01T12:59:50.743213
| 2019-10-07T08:13:41
| 2019-10-07T08:13:41
| 153,232,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from django.urls import path
from . import views
from .views import (HdbUpdateView,IndexView, SearchFormView,HdbCreateView,HdbDeleteView, HdbprintView)
urlpatterns = [
path('list/', views.index, name ='list'),
# url(r'^dbedit/', views.hospdb_list, name ='edit'),
path('input/', views.inputdb, name ='inputdbn'),
path('', views.homep, name ='home'),
path('dblistView/', views.IndexView.as_view(), name ='indexview'),
path('<int:pk>/', views.HdbdetailView.as_view(), name="detail"),
path('print(<int:pk>)/', views.HdbprintView.as_view(), name="print"),
path('hdb/add/', views.HdbCreateView.as_view(), name="hdb_add"),
path('update/<int:pk>/', HdbUpdateView.as_view(), name='update'),
path('delete/<int:pk>/', HdbDeleteView.as_view(), name='delete'),
#url(r'^list$',ProductListView.as_view(), name="ProductListView"),
# url(r'^list/(?P<pk>\d+)/$',ProductDetailView.as_view(), name="ProductDetailview"),
path('search',SearchFormView.as_view(),name='search'),
path('login/', views.signin, name='login'),
path('logout/', views.logout, name='logout'),
]
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.