blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82271a49c22deb170f63fd3232c33d3a7f82602e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyDunderSlotsInspection/inheritedClassAttrAssignmentAndOwnWithAttrAndInheritedSlots.py
|
307acdbdb1c8afd488293f4deb2a1b2e092d9960
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
class B(object):
attr = 'baz'
__slots__ = ['f', 'b']
class C(B):
__slots__ = ['attr', 'bar']
C.attr = 'spam'
print(C.attr)
c = C()
<warning descr="'C' object attribute 'attr' is read-only">c.attr</warning> = 'spam'
print(c.attr)
|
[
"Semyon.Proshev@jetbrains.com"
] |
Semyon.Proshev@jetbrains.com
|
93f3d82a3dbde659163043e13cd766201e977797
|
6b05bddf2e294c8e1b39846aecadfa06b4ff805d
|
/test/test_v1_guest_agent_ping.py
|
b5518c61004a78ef0ce9d3cb39339b04acf71066
|
[
"Apache-2.0"
] |
permissive
|
kubevirt/client-python
|
5ca82fe55d48c07f62796d2bed3605a7c189922c
|
235fe17f58d41165010be7e4122cb67bdc866fe7
|
refs/heads/master
| 2023-09-03T12:25:27.272479
| 2023-08-17T00:33:31
| 2023-08-17T00:33:31
| 105,017,761
| 29
| 25
|
Apache-2.0
| 2022-10-20T13:52:10
| 2017-09-27T12:51:32
|
Python
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_guest_agent_ping import V1GuestAgentPing
class TestV1GuestAgentPing(unittest.TestCase):
""" V1GuestAgentPing unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GuestAgentPing(self):
"""
Test V1GuestAgentPing
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_guest_agent_ping.V1GuestAgentPing()
pass
if __name__ == '__main__':
unittest.main()
|
[
"kubevirt-bot"
] |
kubevirt-bot
|
2ff528d76ec3c032363cc59f587b0f6da4f410dc
|
6e373b40393fb56be4437c37b9bfd218841333a8
|
/Level_3/Lecture_21/enroll/models.py
|
82f2f10c875633e48efc381b7887773f0c960169
|
[] |
no_license
|
mahto4you/Django-Framework
|
6e56ac21fc76b6d0352f004a5969f9d4331defe4
|
ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b
|
refs/heads/master
| 2023-01-22T01:39:21.734613
| 2020-12-04T03:01:17
| 2020-12-04T03:01:17
| 318,383,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=70)
email = models.EmailField(max_length=100)
password =models.CharField(max_length=100)
|
[
"mahto4you@gmail.com"
] |
mahto4you@gmail.com
|
a84829ae8a55aa1d175e4dcacd447f99e538bea7
|
49201afc8c3515d9f5cb569f45cd34ba291e84ca
|
/autobahntestsuite/autobahntestsuite/caseset.py
|
2611fd0aadbb7fe5e8808a6db96dedfd3862fc7f
|
[
"Apache-2.0"
] |
permissive
|
crossbario/autobahn-testsuite
|
2f3fe9a46a806550dddb23ed7bc98a94c47d5bd8
|
09cfbf74b0c8e335c6fc7df88e5c88349ca66879
|
refs/heads/master
| 2023-09-06T01:16:06.357182
| 2022-11-02T18:00:25
| 2022-11-02T18:00:25
| 3,762,517
| 718
| 74
|
Apache-2.0
| 2022-01-26T11:07:29
| 2012-03-19T09:59:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,570
|
py
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("CaseSet",)
import re
class CaseSet:
def __init__(self, CaseSetName, CaseBaseName, Cases, CaseCategories, CaseSubCategories):
self.CaseSetName = CaseSetName
self.CaseBaseName = CaseBaseName
self.Cases = Cases
self.CaseCategories = CaseCategories
self.CaseSubCategories = CaseSubCategories
## Index:
## "1.2.3" => Index (1-based) of Case1_2_3 in Cases
##
self.CasesIndices = {}
i = 1
for c in self.Cases:
self.CasesIndices[self.caseClasstoId(c)] = i
i += 1
## Index:
## "1.2.3" => Case1_2_3
##
self.CasesById = {}
for c in self.Cases:
self.CasesById[self.caseClasstoId(c)] = c
def caseClasstoId(self, klass):
"""
Class1_2_3 => '1.2.3'
"""
l = len(self.CaseBaseName)
return '.'.join(klass.__name__[l:].split("_"))
def caseClasstoIdTuple(self, klass):
"""
Class1_2_3 => (1, 2, 3)
"""
l = len(self.CaseBaseName)
return tuple([int(x) for x in klass.__name__[l:].split("_")])
def caseIdtoIdTuple(self, id):
"""
'1.2.3' => (1, 2, 3)
"""
return tuple([int(x) for x in id.split('.')])
def caseIdTupletoId(self, idt):
"""
(1, 2, 3) => '1.2.3'
"""
return '.'.join([str(x) for x in list(idt)])
def caseClassToPrettyDescription(self, klass):
"""
Truncates the rest of the description after the first HTML tag
and coalesces whitespace
"""
return ' '.join(klass.DESCRIPTION.split('<')[0].split())
def resolveCasePatternList(self, patterns):
"""
Return list of test cases that match against a list of case patterns.
"""
specCases = []
for c in patterns:
if c.find('*') >= 0:
s = c.replace('.', '\.').replace('*', '.*')
p = re.compile(s)
t = []
for x in self.CasesIndices.keys():
if p.match(x):
t.append(self.caseIdtoIdTuple(x))
for h in sorted(t):
specCases.append(self.caseIdTupletoId(h))
else:
specCases.append(c)
return specCases
def parseSpecCases(self, spec):
"""
Return list of test cases that match against case patterns, minus exclude patterns.
"""
specCases = self.resolveCasePatternList(spec["cases"])
if spec.has_key("exclude-cases"):
excludeCases = self.resolveCasePatternList(spec["exclude-cases"])
else:
excludeCases = []
c = list(set(specCases) - set(excludeCases))
cases = [self.caseIdTupletoId(y) for y in sorted([self.caseIdtoIdTuple(x) for x in c])]
return cases
def parseExcludeAgentCases(self, spec):
"""
Parses "exclude-agent-cases" from the spec into a list of pairs
of agent pattern and case pattern list.
"""
if spec.has_key("exclude-agent-cases"):
ee = spec["exclude-agent-cases"]
pats1 = []
for e in ee:
s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$"
p1 = re.compile(s1)
pats2 = []
for z in ee[e]:
s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$"
p2 = re.compile(s2)
pats2.append(p2)
pats1.append((p1, pats2))
return pats1
else:
return []
def checkAgentCaseExclude(self, patterns, agent, case):
"""
Check if we should exclude a specific case for given agent.
"""
for p in patterns:
if p[0].match(agent):
for pp in p[1]:
if pp.match(case):
return True
return False
def getCasesByAgent(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = []
for server in spec['testees']:
agent = server['name']
res2 = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, agent, caseId):
res2.append(self.CasesById[caseId])
if len(res2) > 0:
o = {}
o['name'] = str(server['name'])
o['url'] = str(server['url'])
o['auth'] = server.get('auth', None)
o['cases'] = res2
res.append(o)
return res
def generateCasesByTestee(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = {}
for obj in spec['testees']:
testee = obj['name']
res[testee] = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, testee, caseId):
res[testee].append(self.CasesById[caseId])
return res
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
3c37470e6687cc51f01b3bfb39c7f931f854f693
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/Gio/SocketServiceClass.py
|
8c18c95238ae487ac715dd801bd46c959b88b0ce
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 5,419
|
py
|
# encoding: utf-8
# module gi.repository.Gio
# from /usr/lib64/girepository-1.0/Gio-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class SocketServiceClass(__gi.Struct):
"""
:Constructors:
::
SocketServiceClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
incoming = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved4 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved5 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_g_reserved6 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(SocketServiceClass), '__module__': 'gi.repository.Gio', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'SocketServiceClass' objects>, '__weakref__': <attribute '__weakref__' of 'SocketServiceClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f4b87fc8810>, 'incoming': <property object at 0x7f4b87fc8900>, '_g_reserved1': <property object at 0x7f4b87fc89f0>, '_g_reserved2': <property object at 0x7f4b87fc8ae0>, '_g_reserved3': <property object at 0x7f4b87fc8bd0>, '_g_reserved4': <property object at 0x7f4b87fc8cc0>, '_g_reserved5': <property object at 0x7f4b87fc8db0>, '_g_reserved6': <property object at 0x7f4b87fc8ea0>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(SocketServiceClass)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
816ae873b0b90fcf3321f06f6a70489ed6eaeaa6
|
c07380914a44df334194f234c33858f357365c19
|
/ENV/lib/python2.7/site-packages/theano/tensor/tests/test_gc.py
|
d1304de7e268985aa6ba0543e87bf76860c9f26b
|
[] |
no_license
|
damianpolan/Music-Genre-Classification
|
318952ae7de5d0b0bdf5676e28071c7b38d0e1c5
|
acd723ae1432ce798866ebb97ef3c484db37e971
|
refs/heads/master
| 2022-12-24T09:23:55.514337
| 2016-03-22T14:49:28
| 2016-03-22T14:49:28
| 42,965,899
| 4
| 4
| null | 2022-12-12T20:26:24
| 2015-09-22T23:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
import cPickle
import sys
import numpy
import theano
from theano import tensor as T
import time
def test_no_reuse():
x = T.lvector()
y = T.lvector()
f = theano.function([x, y], x + y)
#provide both inputs in the first call
f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))
try:
f(numpy.ones(10))
except TypeError:
return
assert not 'should not get here'
def test_gc_never_pickles_temporaries():
x = T.dvector()
#print >> sys.stderr, 'BUILDING GRAPH'
for i in xrange(2): # TODO: 30 causes like LONG compilation due to MERGE
if i:
r = r + r/10
else:
r = x
optimizer = None
optimizer = 'fast_run'
for f_linker, g_linker in [
(theano.PerformLinker(allow_gc=True),
theano.PerformLinker(allow_gc=False)),
(theano.OpWiseCLinker(allow_gc=True),
theano.OpWiseCLinker(allow_gc=False))]:
#f_linker has garbage collection
#g_linker has no garbage collection
#print >> sys.stderr, 'COMPILING'
f = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=f_linker))
g = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=g_linker))
len_pre_f = len(cPickle.dumps(f))
len_pre_g = len(cPickle.dumps(g))
# We can't compare the content or the length of the string
# between f and g. 2 reason, we store some timming information
# in float. They won't be the same each time. Different float
# can have different lenght when printed.
def a(fn):
return len(cPickle.dumps(fn.maker))
assert a(f) == a(f) # some sanity checks on the pickling mechanism
assert a(g) == a(g) # some sanity checks on the pickling mechanism
def b(fn):
return len(
cPickle.dumps(
theano.compile.function_module._pickle_Function(
fn)))
assert b(f) == b(f) # some sanity checks on the pickling mechanism
def c(fn):
return len(cPickle.dumps(fn))
assert c(f) == c(f) # some sanity checks on the pickling mechanism
assert c(g) == c(g) # some sanity checks on the pickling mechanism
# now run the function once to create temporaries within the no-gc
# linker
f(numpy.ones(100, dtype='float64'))
g(numpy.ones(100, dtype='float64'))
# serialize the functions again
post_f = cPickle.dumps(f)
post_g = cPickle.dumps(g)
len_post_f = len(post_f)
len_post_g = len(post_g)
# assert that f() didn't cause the function to grow
# allow_gc should leave the function un-changed by calling
assert len_pre_f == len_post_f
# assert that g() didn't cause g to grow because temporaries
# that weren't collected shouldn't be pickled anyway
# Allow for a couple of bytes of difference, since timing info,
# for instance, can be represented as text of varying size.
assert abs(len_post_f - len_post_g) < 16, (
f_linker, len_post_f, len_post_g)
def test_merge_opt_runtime():
"""In the original merge optimization, the following graph took
like caused the MERGE optimizer to exhibit really bad performance
(quadratic? exponential?)
Ironically, there is actually no merging to do in this graph.
"""
x = T.dvector()
for i in xrange(50):
if i:
r = r + r/10
else:
r = x
t = time.time()
f = theano.function([x], r, mode='FAST_COMPILE')
# FAST_RUN does in-place optimizer which requires a lot of
# toposorting, which is actually pretty slow at the moment. This
# test was designed to test MergeOptimizer... so I'm leaving
# toposort optimizations for a later date.
dt = time.time() - t
# it should never take longer than 5 seconds to compile this graph
assert dt < 5.0
|
[
"damian.polan@gmail.com"
] |
damian.polan@gmail.com
|
a2c2e07a8afdcf2c8f91018caceb18c216081b48
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/kivy/examples/canvas/fbo_canvas.py
|
dd06928bdb98fedb7c9f34cb533e75a733227641
|
[
"MIT"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0cc3f5272ba46eb262d440a5c297b24905c455a2aa930e0baaa5f7f37b3486e6
size 2544
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
3a89353fe1bf9bc2c3a18a54b8aa626d89c3dc77
|
15978aacf0e44a890e36ff94c305aca5a056e5e8
|
/13day/10-有返回的装饰器和通用的装饰器.py
|
49f8d4065d8cba8ccf18b0da1614f1193e0a14d8
|
[] |
no_license
|
ittoyou/1805_python_2
|
ffbe613d893208b2454ef4f25cc2b8a9951ff047
|
1d6331a83598863042912bb26205d34417abed73
|
refs/heads/master
| 2020-03-24T13:58:12.276827
| 2018-07-27T07:58:57
| 2018-07-27T07:58:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
def w1(fun):
def inner(*args,**kwargs):
print("验证登录")
return fun(*args,**kwargs)
return inner
@w1
def play(a,b):
print("------------%s-----%s----------"%(a,b))
return "hehe"
ret = play("1","2")
print(ret)
@w1
def play1():
print("哈哈哈")
play1()
@w1
def play2(a):
print("哈哈哈2%s"%a)
play2("嘎嘎")
@w1
def play3():
return "hahah3"
ret = play3()
print(ret)
|
[
"qingyuan@geekniu.com"
] |
qingyuan@geekniu.com
|
3679dbbc8bc44685045edec9a6d71a1e00d53833
|
45ee96b582d7b3e045819db510088d2cb640dfde
|
/BOJ/Previous/Implementation/완전제곱수.py
|
e78f92dafcc73f0e1bfc49baa5f3d15bd4298468
|
[] |
no_license
|
tom9744/Algorithms
|
e54b649014f3b478bfbc7a0f9e8e56ad5dbc1304
|
4496b1c992ab4322289e5a200567f3df00478917
|
refs/heads/master
| 2023-05-06T00:59:12.767655
| 2021-05-26T16:26:50
| 2021-05-26T16:26:50
| 330,401,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# 1977 : 완전제곱수
import math
M = int(input())
N = int(input())
perfect_square_numbers = []
for number in range(M, N + 1):
if math.sqrt(number).is_integer():
perfect_square_numbers.append(number)
if len(perfect_square_numbers) == 0:
print(-1)
else:
print(sum(perfect_square_numbers))
print(perfect_square_numbers[0])
|
[
"tom9744@gmail.com"
] |
tom9744@gmail.com
|
f2257a66a17f8b82a0bb0a42260b553d534f2889
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616203432.py
|
d8f31778d62ce6a0b3ed7002c575bb9870500ea2
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
def valid(str):
address = str.split(".")
numbers = range()
for a in address:
if a
print(address)
valid("172.16.254.01")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
f695b79388c07e89cfa05c0175e698eadc9d3daa
|
8523daaf19e0250962b454d9c4f87f4c7d71ab9d
|
/models.py
|
d70630bbfa64fe60497c69c7bc15cf28c945160d
|
[] |
no_license
|
madtyn/cris
|
ad2fd35a05efb6829e96bd1aa39c86a0efa8102f
|
a45410e6a67f589ac7d392bebc1ee9725ff4cd1b
|
refs/heads/master
| 2020-11-30T17:42:09.675319
| 2020-01-19T10:36:32
| 2020-01-19T10:36:32
| 230,450,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,617
|
py
|
import datetime as dt
from enum import Enum
from collections import namedtuple
from indexes import FIRST_MONTH_COL, COLS_PER_MONTH
StudentMonth = namedtuple('StudentMonth', ['quota', 'number', 'processed'])
class Months(Enum):
OCTOBER = ('Octubre', 10)
NOVEMBER = ('Noviembre', 11)
DECEMBER = ('Diciembre', 12)
JANUARY = ('Enero', 1)
FEBRUARY = ('Febrero', 2)
MARCH = ('Marzo', 3)
APRIL = ('Abril', 4)
MAY = ('Mayo', 5)
JUNE = ('Junio', 6)
JULY = ('Julio', 7)
AUGUST = ('Agosto', 8)
SEPTEMBER = ('Septiembre', 9)
def __new__(cls, *args, **kwargs):
idx = FIRST_MONTH_COL + (len(cls.__members__) * COLS_PER_MONTH)
obj = object.__new__(cls)
obj._value_ = idx
obj.quota_idx = idx
obj.number_idx = idx + 1
obj.processed_idx = idx + 2
obj.trans = args[0]
obj.ordinal = args[1]
return obj
@classmethod
def get_month(cls, ordinal):
for m in cls:
if ordinal == m.ordinal:
return f'{m!s}'
def get_student_month(self, row):
return StudentMonth(row[self.quota_idx], row[self.number_idx], row[self.processed_idx])
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __str__(self):
return self.trans
class CommonInfo(object):
def __init__(self, teacher, nif, school_year, activity):
self.teacher = teacher
self.nif = nif
self.school_year = school_year
self.activity = activity
class Receipt(object):
header_tag = [
"Nombre del escolar: {student}",
"Número de recibo: {number}",
"Precio mensualidad: {quota}",
]
body_tag = [
"{teacher}, con NIF {nif}, ha recibido de los responsables del alumno / a anteriormente citado las",
"cantidades que se desglosan en este recibo en concepto de pago de la actividad \"{activity}\",",
"realizada durante el curso {school_year}",
]
sign_tag = ["A Coruña, {day} de {month} del {year}", ]
def __init__(self, info, student, student_month):
self.info = info
self.student = student
self.number = student_month.number
self.quota = student_month.quota
def header(self):
d = {
'student': self.student,
'number': self.number,
'quota': self.quota,
}
for line in self.header_tag:
yield line.format(**d)
def body(self):
d = {
'teacher': self.info.teacher,
'nif': self.info.nif,
'activity': self.info.activity,
'school_year': self.info.school_year,
}
for line in self.body_tag:
yield line.format(**d)
def sign(self):
d = {
'day': dt.datetime.today().day,
'month': Months.get_month(dt.datetime.today().month),
'year': dt.datetime.today().year
}
for line in self.sign_tag:
yield line.format(**d)
if __name__ == '__main__':
print()
print()
print()
|
[
"madtyn@gmail.com"
] |
madtyn@gmail.com
|
249d0fc847698e8656f69bffdac9648ab002c339
|
45614a944ffbdb75a0bef955582a722da5ce7492
|
/python/selenium/delta_time.py
|
f3a2a4edc43929e36dcdc6408809e7ed0457801f
|
[] |
no_license
|
wccgoog/pass
|
1c8ab5393547634a27c7543556a75dec771a9e3d
|
0ec01536ae10b3d99707002c0e726072acb50231
|
refs/heads/2
| 2023-01-15T13:27:26.312648
| 2019-10-23T09:30:45
| 2019-10-23T09:30:45
| 122,595,075
| 0
| 2
| null | 2023-01-07T10:42:38
| 2018-02-23T08:38:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
# -*- coding: utf-8 -*-
import datetime,time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def write_delta_time(n):
driver=webdriver.Chrome()
driver.get('http://192.168.0.138:9998')
driver.maximize_window()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,'account')))
driver.find_element_by_id('account').send_keys('5815') #账号
driver.find_element_by_id('password').send_keys('WW5815') #密码
start=driver.find_element_by_css_selector('div.handler.handler_bg')
action=ActionChains(driver)
action.drag_and_drop_by_offset(start,250,0)
action.perform() #拖动滑块
driver.find_element_by_id('loginbutton').click()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,"tabwindow_0")))
f=open('C:/Users/Administrator/Desktop/time.txt','a')
for i in range(n):
for tab in driver.find_elements_by_css_selector('div.tab_close'):
tab.click()
driver.find_element_by_xpath("//ul[@id='jMenu']/li/a/span").click()
driver.find_element_by_css_selector("li.jmenu-level-0 > ul > li > a > span").click()
time_start=datetime.datetime.now()
WebDriverWait(driver,30).until(EC.frame_to_be_available_and_switch_to_it(0))
time.sleep(1) #不加会报错
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.XPATH,"//div[@id='ListTable']/div[5]/div/div[5]/div[8]")))
time_end=datetime.datetime.now()
time_total=time_end-time_start
f.write(str(time_total)+'\n')
driver.switch_to.default_content()
f.close()
if __name__=='__main__':
n=input('输入希望运行的次数: ')
write_delta_time(int(n))
|
[
"wcc3@sina.com"
] |
wcc3@sina.com
|
11d91f7682d807291ec8c6d20fa64f3166ad3a77
|
f682c74fb65f0d951821b77bf96cee28d00ae3dd
|
/博物馆网数据采集子系统/展览爬取/广东省博物馆展览爬取.py
|
a267ae318e81038908bb00ebc4349ddfeb6944bd
|
[] |
no_license
|
1806-1/Software-engineering
|
7e5add7b40d123dca0daa39d83a8fc4c16f8cb0d
|
0a75ed857410bb8e1f882bd8e49504c43590ffd8
|
refs/heads/main
| 2023-05-13T00:07:58.579811
| 2021-06-06T08:09:41
| 2021-06-06T08:09:41
| 354,178,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,947
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 16 09:21:11 2021
@author: lenovo
"""
import requests
import pandas as pd
import csv
from bs4 import BeautifulSoup
hdrs = {'User-Agent':'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'}
# 博物馆活动列表页网址
url = "http://www.gdmuseum.com/"
r = requests.get(url, headers = hdrs)
soup = BeautifulSoup(r.content.decode('utf8', 'ignore'), 'lxml')
# class_ ='maintxt' 活动列表正文部分 根据网页tag修改
div_list = soup.find_all('div', class_ ='tz_r_first')
#查找下一级网址 即各个活动详情页的网址
anchors = soup.findAll('a')
links = [] #存取各个活动详情页网址
for tag in soup.find_all('ul', class_='xwdt'):
anchors = tag.findAll('a')
#print("anchors",anchors)
for a in anchors:
links.append(a['href'])
#print(links)
#从活动列表页爬取活动名称
TitleList = []# 存取活动名称 这个网址爬出来后十个字符刚好是活动时间
k = 0
for tag in soup.find_all('ul', class_='xwdt'):
k = k+1
title = tag.get_text()
TitleList.append(title)
#print(TitleList)
#
IntroList = []#存取简介(爬取结束后存的是大段文字,后面根据句号只取第一句上传数据库)
ImgList = []# 存取图片地址(爬取结束后与最终写入csv的Photolist一致,直接复制)
for kk in links:#遍历详情页链接
Detailurl = kk
Detailr = requests.get(Detailurl, headers = hdrs)
Detailsoup = BeautifulSoup(Detailr.content.decode('utf8', 'ignore'), 'lxml')
for tag in Detailsoup.findAll('div', class_ = 'yknr_mav'):#详情页活动介绍正文
img_link = tag.findAll('img') #查找所有img字段
print(img_link)
for a in img_link:#遍历img字段
ImgList.append("http://www.gdmuseum.com/" + a['href'])#网页给的img链接没有"http://www.sxhm.com/"自己加上
print("http://www.gdmuseum.com/" + a['href'])
break#只取第一张图片
i = 0#计数
for tag in Detailsoup.select('p', calss_ = 'content'):#<p class="MsoNormal">字段是文字介绍
i = i+1
if(i <= 2):#前两个是时间和杂项不需要用, 第三个才是介绍第一句,存入Introlist
continue
Introduce = tag.get_text()
# print(Introduce)
if(len(Introduce) > 5):#大于5个字的保存并且结束(即只保存第一句)
IntroList.append(Introduce)
break
else:
continue#可能是空格,太短的不保存
# print(IntroList)
# =============================================================================
# 爬取完成
# 开始数据格式处理
# =============================================================================
#最终写入csv的list
Name_Act_List = [] # 活动名
Time_Act_List = [] # 活动时间
Intro_Act_List = [] # 活动简介
Photo_Act_List = [] # 活动图片链接
newTitleList = TitleList[0].split('\n')#之前得到的titlelist是一整句,list中只有一个元素,各活动用‘\n'分割 通过这个语句从每个\n分开成新的元素
print(newTitleList)
for name in newTitleList:
lenth = len(name)
if(lenth < 2):#split可能截取出空格作为一个元素 太短的跳过
continue
Time = name[lenth-10:]#取后十个字符,刚好是时间
# if(len(Time) == 10):
# Time_Act_List.append(Time)
Time_Act_List.append(Time)
Title = name[:lenth-10]#后十个之外的是活动名
Name_Act_List.append(Title)
print(Time_Act_List)
print(Name_Act_List)
for intro in IntroList:
lenth = len(intro)
a = intro.find('。')#找第一个句号的位置
intro = intro[:a+1]#取第一个句号之前的作为简介
out = "".join(intro.split())#去掉’\x0xa‘等格式控制符只提取文本
Intro_Act_List.append(out)
print(out)
print(Intro_Act_List)
Photo_Act_List = ImgList
help_x_list = []
Museum_list = []
for i in range(0, len(Name_Act_List)):
help_x_list.append(str(i))
Museum_list.append("广东省博物馆
")
# =============================================================================
# 开始向CSV中写数据
# =============================================================================
dataframe = pd.DataFrame({
'博物馆名称':Museum_list,
'活动名字':Name_Act_List,
'活动时间':Time_Act_List,
'活动介绍':Intro_Act_List,
'活动图片地址':Photo_Act_List
})
dataframe.to_csv(r"广东省博物馆活动.csv",sep=',')
|
[
"noreply@github.com"
] |
1806-1.noreply@github.com
|
7ffa82f194c3ea745e4353afbfb80085484f5606
|
dd256415176fc8ab4b63ce06d616c153dffb729f
|
/aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/23-Jul-2019/method_examples/class_methods_2.py
|
05e1064eb9949454b9956604a1def6df3fba359e
|
[] |
no_license
|
adityapatel329/python_works
|
6d9c6b4a64cccbe2717231a7cfd07cb350553df3
|
6cb8b2e7f691401b1d2b980f6d1def848b0a71eb
|
refs/heads/master
| 2020-07-24T17:15:39.839826
| 2019-09-12T07:53:28
| 2019-09-12T07:53:28
| 207,993,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
class DecoratorExample:
def __init__(self):
print('Hello, world')
@classmethod
def example_function(cls):
print("In a class method ")
cls.some_other_function()
@staticmethod
def some_other_function():
print('Hello')
de = DecoratorExample()
de.example_function()
|
[
"aditya.patel@1rivet.local"
] |
aditya.patel@1rivet.local
|
722e532abb9d183c9faeb239a798949f7cbb32e0
|
a75b7fd002a9f8b4823dcc9cd6c2c5291ea31fe8
|
/ir_datasets/datasets/wikir.py
|
cfa056b832e3aa089533038d543bd5ee028d47f4
|
[
"Apache-2.0"
] |
permissive
|
FRD898/ir_datasets
|
3edadc3859eb3c3c7a3f7c33c14aebe709aad2f2
|
e4bfec64d41cc09c84315f675f2af768ea26f5b4
|
refs/heads/master
| 2023-06-16T10:32:12.367257
| 2021-07-18T10:41:20
| 2021-07-18T10:41:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import ZipExtractCache, DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import CsvQueries, CsvDocs, TrecQrels, TrecScoredDocs
NAME = 'wikir'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
2: "Query is the article title",
1: "There is a link to the article with the query as its title in the first sentence",
0: "Otherwise",
}
class File:
def __init__(self, dlc, relative_path):
self.dlc = dlc
self.relative_path = relative_path
def path(self):
return str(next(Path(self.dlc.path()).glob(self.relative_path)))
@contextlib.contextmanager
def stream(self):
with open(self.path(), 'rb') as f:
yield f
def _init():
base_path = ir_datasets.util.home_path()/NAME
dlc = DownloadConfig.context(NAME, base_path)
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
subsets = {}
sources = [
('en1k', 369721),
('en59k', 2454785),
('fr14k', 736616),
('es13k', 645901),
('it16k', 503012),
]
for source, count_hint in sources:
source_dlc = ZipExtractCache(dlc[source], base_path/source)
docs = CsvDocs(File(source_dlc, "*/documents.csv"), namespace=source, lang=source[:2], count_hint=count_hint)
subsets[source] = Dataset(docs, documentation(source))
for split in ['training', 'validation', 'test']:
subsets[f'{source}/{split}'] = Dataset(
docs,
CsvQueries(File(source_dlc, f"*/{split}/queries.csv"), lang=source[:2]),
TrecQrels(File(source_dlc, f"*/{split}/qrels"), qrels_defs=QRELS_DEFS),
TrecScoredDocs(File(source_dlc, f"*/{split}/BM25.res")),
documentation(f'{source}/{split}')
)
base = Dataset(documentation('_'))
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
collection, subsets = _init()
|
[
"sean.macavaney@gmail.com"
] |
sean.macavaney@gmail.com
|
93d973806b72476402c087079c684e78920c1e44
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/letters.py
|
4d6a222a879f80298b4d6ad5f5d5743deb44e15d
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('letters', __name__, url_prefix='/letters')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
71a1d35afe3081aaa5e44192447c7494b4a5050e
|
0a2cc497665f2a14460577f129405f6e4f793791
|
/sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/models/_container_registry_enums.py
|
8ca5cfea37c17dd1bd1b22ec0ca9d9f1a79ba8bd
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
hivyas/azure-sdk-for-python
|
112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b
|
8b3258fa45f5dc25236c22ad950e48aa4e1c181c
|
refs/heads/master
| 2023-06-17T12:01:26.392186
| 2021-05-18T19:56:01
| 2021-05-18T19:56:01
| 313,761,277
| 1
| 1
|
MIT
| 2020-12-02T17:48:22
| 2020-11-17T22:42:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,910
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.4.1, generator: @autorest/python@5.6.4)
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ArtifactArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
I386 = "386"
AMD64 = "amd64"
ARM = "arm"
ARM64 = "arm64"
MIPS = "mips"
MIPS_LE = "mipsle"
MIPS64 = "mips64"
MIPS64_LE = "mips64le"
PPC64 = "ppc64"
PPC64_LE = "ppc64le"
RISC_V64 = "riscv64"
S390_X = "s390x"
WASM = "wasm"
class ArtifactOperatingSystem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AIX = "aix"
ANDROID = "android"
DARWIN = "darwin"
DRAGONFLY = "dragonfly"
FREE_BSD = "freebsd"
ILLUMOS = "illumos"
I_OS = "ios"
JS = "js"
LINUX = "linux"
NET_BSD = "netbsd"
OPEN_BSD = "openbsd"
PLAN9 = "plan9"
SOLARIS = "solaris"
WINDOWS = "windows"
class ManifestOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Sort options for ordering manifests in a collection.
"""
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order manifests by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order manifest by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TagOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order tags by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order tags by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TokenGrantType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Grant type is expected to be refresh_token
"""
REFRESH_TOKEN = "refresh_token"
PASSWORD = "password"
|
[
"noreply@github.com"
] |
hivyas.noreply@github.com
|
c9a499e0b0d202e5ea52f5ef6a9c4580d811345f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_45/72.py
|
8cf783ea15df40bf45a0fc4e0429b4f48fca706b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,556
|
py
|
#!/usr/bin/env python
#
# jam.py
#
# Copyright 2009 Denis <denis@denis-desktop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#import re
import itertools
tor = []
p = 0
q = 0
def solve(left, right):
global tor
if not tor:
return 0
if left > right:
return 0
i = 0
middle = float(right + left)/float(2)
'''
goon = True
l = len(tor)
while goon:
if i >= l:
goon = False
i -= 1
if tor[i] > middle:
goon = False
i += 1
i -= 1
if i > 0 and abs(middle - tor[i-1]) <= abs(middle - tor[i]) and tor[i-1] >= left:
i -= 1
'''
min = {'diff': 99999, 'pos': -1}
for i in xrange(0,len(tor)):
newdiff = abs(middle-tor[i])
if newdiff < min['diff']:
min['diff'] = newdiff
min['pos'] = i
released = tor[min['pos']]
if released < left or released > right:
return 0
#print left,' ',middle,' ',right
#print 'of',tor,'choose',released
del tor[min['pos']]
answer = right-left
answer += solve(left, released-1)
answer += solve(released+1, right)
return answer
def force(to, left, right):
aaa = 99999
if not to:
return 0
if left == right:
return 0
i = 0
#print 'Got',to,left,right
l = len(to)
while i < l and to[i] < left:
i += 1
#print 'Skipped to',i,'(',to[i],')'
while i < l and to[i] <= right:
answer = right-left
if i > 0:
answer += force(to[:i], left, to[i]-1)
if i < l:
answer += force(to[i+1:], to[i]+1, right)
aaa = min(aaa, answer)
i += 1
return aaa
def main():
global tor, p, q
with open("C-small-attempt5.in") as f:
n = f.readline()
n = int(n)
for case in xrange(1, n+1):
p, q = map(int, f.readline().strip().split(' '))
tor = map(int, f.readline().strip().split(' '))
#answer = solve(1, p)
answer = force(tor, 1, p)
print "Case #%d: %d" % (case, answer)
return 0
if __name__ == '__main__': main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
137982ad4fabf053ac21d39abd872101e3ece56c
|
b124d99a5d7a139d31405aefdbfed09f6eb3d55b
|
/beebcn/spiders/beebcn.py
|
0ee6535f8b6015585dac04bef036e47860cb503b
|
[] |
no_license
|
daniel-kanchev/beebcn
|
26efaab276e525b919b4fbeb06251a2850573de4
|
d7e8142b41501b2586e0f8e01f8a690355701268
|
refs/heads/main
| 2023-04-04T05:08:37.732275
| 2021-04-15T11:13:28
| 2021-04-15T11:13:28
| 358,230,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from beebcn.items import Article
import requests
import json
import re
class beebcnSpider(scrapy.Spider):
name = 'beebcn'
start_urls = ['http://www.beeb.com.cn/#/home/banknews']
def parse(self, response):
json_response = json.loads(requests.get(
"http://www.beeb.com.cn/beebPortal/data/content/banknews.json?MmEwMD=5RroZJL4EsQSA_im0lwzRvTmJYy8PJ4cOClXNiNribCHRHjumBO3uBMMxoJzIJ3r62_9HrN9.tr70HIghQ5aKUXz1cuP4ESFycL1xKjK_Na4.JFV_a8PKOxBOF0DcMGoWbpFpqiVpl2aZy2VGwcostDBYt9hUkpu3u7a7ICHNf_K32mxnn0_.wxIMLtrYIf7PM3bZt993kiMI8Nyen.9unNqhUhblx0ILi5cJrPveYNJPVtvuppJobjGdG6nFKcBtQ_nFPjWN0kounYjSEQWn0O.t.BuCKWKbuGZkMNlyziFmT02JgsR0BLc4tfTEvv36").text)
articles = json_response["articleList"]
for article in articles:
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = article["title"]
date = article["createTime"]
p = re.compile(r'<.*?>')
content = p.sub('', article["content"])
item.add_value('title', title)
item.add_value('date', date)
item.add_value('content', content)
yield item.load_item()
|
[
"daniel.kanchev@adata.pro"
] |
daniel.kanchev@adata.pro
|
4d4c3f3cfe74ab3c276e571e2d0a0a5b1a44d225
|
0e3a9758175f37e4d702ff6ccd6d2ee2e91f727f
|
/deepiu/util/input_flags.py
|
dc4a03cedbc127ecd0561e778b87fc24b49be5f1
|
[] |
no_license
|
hitfad/hasky
|
94d7248f21a1ec557a838b77987e34b77fb9a0c7
|
c1d2d640643037c62d64890c40de36ba516eb167
|
refs/heads/master
| 2021-01-20T22:55:36.778378
| 2017-08-29T13:23:50
| 2017-08-29T13:23:50
| 101,830,092
| 1
| 0
| null | 2017-08-30T02:48:35
| 2017-08-30T02:48:35
| null |
UTF-8
|
Python
| false
| false
| 5,849
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file input_flags.py
# \author chenghuige
# \date 2016-12-25 00:17:18.268341
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
#--------- read data
flags.DEFINE_integer('batch_size', 32, 'Batch size. default as im2text default')
flags.DEFINE_integer('eval_batch_size', 100, 'Batch size.')
flags.DEFINE_integer('fixed_eval_batch_size', 30, """must >= num_fixed_evaluate_examples
if == real dataset len then fix sequence show
if not == can be show different fixed each time
usefull if you want only show see 2 and
be different each time
if you want see 2 by 2 seq
then num_fixed_evaluate_example = 2
fixed_eval_batch_size = 2
""")
flags.DEFINE_integer('num_fixed_evaluate_examples', 30, '')
flags.DEFINE_integer('num_evaluate_examples', 1, '')
flags.DEFINE_integer('num_threads', 12, """threads for reading input tfrecords,
setting to 1 may be faster but less randomness
""")
flags.DEFINE_boolean('shuffle_files', True, '')
flags.DEFINE_boolean('batch_join', True, '')
flags.DEFINE_boolean('shuffle_batch', True, '')
flags.DEFINE_boolean('shuffle_then_decode', True,
""" actually this is decided by is_sequence_example..
if is_sequence_example then False, if just example not sequence then True since is sparse
TODO remove this
""")
flags.DEFINE_boolean('is_sequence_example', False, '')
flags.DEFINE_string('buckets', '', 'empty meaning not use, other wise looks like 5,10,15,30')
flags.DEFINE_boolean('dynamic_batch_length', True,
"""very important False means all batch same size!
otherwise use dynamic batch size
Now only not sequence_example data will support dyanmic_batch_length=False
Also for cnn you might need to set to False to make all equal length batch used
""")
flags.DEFINE_integer('num_negs', 1, '0 means no neg')
flags.DEFINE_boolean('feed_dict', False, 'depreciated, too complex, just prepare your data at first for simple')
#---------- input dirs
#@TODO will not use input pattern but use dir since hdfs now can not support glob well
flags.DEFINE_string('train_input', '/tmp/train/train_*', 'must provide')
flags.DEFINE_string('valid_input', '', 'if empty will train only')
flags.DEFINE_string('fixed_valid_input', '', 'if empty wil not eval fixed images')
flags.DEFINE_string('num_records_file', '', '')
flags.DEFINE_integer('min_records', 12, '')
flags.DEFINE_integer('num_records', 0, 'if not 0, will check equal')
#---------- input reader
flags.DEFINE_integer('min_after_dequeue', 0, """by deafualt will be 500,
set to large number for production training
for better randomness""")
flags.DEFINE_integer('num_prefetch_batches', 0, '')
#----------eval
flags.DEFINE_boolean('legacy_rnn_decoder', False, '')
flags.DEFINE_boolean('experiment_rnn_decoder', False, '')
flags.DEFINE_boolean('show_eval', True, '')
flags.DEFINE_boolean('eval_shuffle_files', True, '')
flags.DEFINE_boolean('eval_fix_random', True, '')
flags.DEFINE_integer('eval_seed', 1024, '')
flags.DEFINE_integer('seed', 1024, '')
flags.DEFINE_boolean('fix_sequence', False, '')
#----------strategy
flags.DEFINE_string('seg_method', 'default', '')
flags.DEFINE_boolean('feed_single', False, '')
flags.DEFINE_boolean('gen_predict', True, '')
flags.DEFINE_string('decode_name', 'text', '')
flags.DEFINE_string('decode_str_name', 'text_str', '')
#--------for image caption TODO move to image_caption/input.py ?
flags.DEFINE_boolean('pre_calc_image_feature', True, '')
flags.DEFINE_boolean('distort_image', False, '')
flags.DEFINE_string('image_model_name', 'InceptionV3', '')
flags.DEFINE_integer('image_width', 299, 'default width of inception v3')
flags.DEFINE_integer('image_height', 299, 'default height of inception v3')
flags.DEFINE_string('image_checkpoint_file', '/home/gezi/data/inceptionv3/inception_v3.ckpt', '')
#---in melt.apps.image_processing.py
#flags.DEFINE_string('image_model_name', 'InceptionV3', '')
flags.DEFINE_string('one_image', '/home/gezi/data/flickr/flickr30k-images/1000092795.jpg', '')
flags.DEFINE_string('image_feature_name', 'image_feature', '')
#---------negative smapling
flags.DEFINE_boolean('neg_left', False, 'ltext or image')
flags.DEFINE_boolean('neg_right', True, 'rtext or text')
#---------discriminant trainer
flags.DEFINE_string('activation', 'relu',
"""relu/tanh/sigmoid seems sigmoid will not work here not convergent
and relu slightly better than tanh and convrgence speed faster""")
flags.DEFINE_boolean('bias', False, 'wether to use bias. Not using bias can speedup a bit')
flags.DEFINE_boolean('elementwise_predict', False, '')
flags.DEFINE_float('keep_prob', 1., 'or 0.9 0.8 0.5')
flags.DEFINE_float('dropout', 0., 'or 0.9 0.8 0.5')
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
047d839364b362aa6a76bfe9643bcb4b78963590
|
ab1c920583995f372748ff69d38a823edd9a06af
|
/shultais_courses/dictionaries/intro_to_dictionaries/digits_rus_eng.py
|
8c5217d96a9dfe04a252496ac2455eacff1ddcc8
|
[] |
no_license
|
adyadyat/pyprojects
|
5e15f4e33892f9581b8ebe518b82806f0cd019dc
|
c8f79c4249c22eb9e3e19998d5b504153faae31f
|
refs/heads/master
| 2022-11-12T16:59:17.482303
| 2020-07-04T09:08:18
| 2020-07-04T09:08:18
| 265,461,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
import sys
key = int(sys.argv[1])
value = sys.argv[2]
digits = {
1: {"ru": "один", "en": "one"},
2: {"ru": "два", "en": "two"},
3: {"ru": "три", "en": "three"},
4: {"ru": "четыре", "en": "four"},
5: {"ru": "пять", "en": "five"},
6: {"ru": "шесть", "en": "six"},
7: {"ru": "семь", "en": "seven"},
8: {"ru": "восемь", "en": "eight"},
9: {"ru": "девять", "en": "nine"},
0: {"ru": "ноль", "en": "zero"}
}
print(digits[key][value])
"""
ЧИСЛА НА РУССКОМ И АНГЛИЙСКОМ
Ниже в редакторе находится словарь digits,
который содержит набор чисел и их названия
на русском и английском языках.
Обратите внимание,
что ключами словаря выступают целые числа (так тоже можно),
а значениями вложенные словари.
Напишите программу,
которая принимает из аргументов командной строки два параметра:
цифру и язык, а затем выводит название цифры на этом языке.
Учитывайте, что если ключ словаря задан числом,
то при доступе по ключу,
в квадратных скобках нужно также указывать число.
Пример использования:
> python program.py 4 ru
> четыре
"""
|
[
"omorbekov.a@gmail.com"
] |
omorbekov.a@gmail.com
|
cf53ef5ed08b07917f1bafebfd98837aa6df5e39
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/site-packages/django/contrib/auth/migrations/0004_alter_user_username_opts.py
|
8f8441f88f5e0f3b2074e39c01c7ef863cb3c28a
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947
| 2020-10-07T15:26:32
| 2020-10-07T15:26:32
| 290,163,043
| 0
| 1
| null | 2020-08-25T09:15:40
| 2020-08-25T08:47:36
|
C#
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:68dd281e8039ab66caa4937c4a723c4fd18db5304accb445a332fceed361f3f2
size 785
|
[
"aqp1234@naver.com"
] |
aqp1234@naver.com
|
4dafd2675375326d00071f92b91080bea9677ef3
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/t001481.py
|
4bca6b1596cd106695153b484bdcabd65c9b8121
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447
| 2023-09-01T09:09:56
| 2023-09-01T09:09:56
| 148,560,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
for T in range(int(input())):
b = list(bin(int(input())).replace("0b", ""))
if b[-1] == "1":
for i in range(len(b) - 1, -1, -1):
if b[i] == "1":
b[i] = "0"
else:
break
print("".join(b))
|
[
"feiyanshiren@163.com"
] |
feiyanshiren@163.com
|
6f3f18539c8923851681793d40f4dcb3f50d3d64
|
60d2212eb2e287a0795d58c7f16165fd5315c441
|
/app01/migrations/0001_initial.py
|
3a9dc3818912587831f59c416cdcc28191857ff6
|
[] |
no_license
|
zhouf1234/mysite2
|
29145ceb470273f39fc11dd91945203db7fe0238
|
63747c789d39cf752f2b80509d8e3db9145b3492
|
refs/heads/master
| 2020-05-05T03:11:31.696639
| 2019-04-05T10:41:42
| 2019-04-05T10:41:42
| 179,663,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Generated by Django 2.1.2 on 2018-11-01 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=32)),
],
),
]
|
[
"="
] |
=
|
6409ffff6a083b3c48d050cf0b0da4cd4e24c754
|
98811c0c149c1873c12322f20345dab1488a1870
|
/nnet/hue/split_data.py
|
421bc33d6fd8d8aa179440fa714ee4c730371b24
|
[] |
no_license
|
mverleg/kaggle_otto
|
682d5f83a070b7e88054401e6fba221d8e1b6227
|
b23beb58a1a0652e9eb98f5db31eae52303b6f85
|
refs/heads/main
| 2021-01-17T08:54:39.096781
| 2016-04-12T09:25:26
| 2016-04-12T09:25:26
| 37,781,556
| 0
| 1
| null | 2016-04-12T09:25:27
| 2015-06-20T18:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
def split_data(data, labels, test_frac = 0.1):
N = int(len(labels) * test_frac)
train = data[N:, :]
test = data[:N, :]
train_labels = labels[N:]
test_labels = labels[:N]
return train, train_labels, test, test_labels
|
[
"mark@rafiki"
] |
mark@rafiki
|
c477ff81c9b1feba08d0ef6621a1c2c2e4a1acac
|
b5c5c27d71348937322b77b24fe9e581cdd3a6c4
|
/tests/pyutils/test_is_invalid.py
|
d39c12e2c935eb05fc776988bbe838d3d98d9059
|
[
"MIT"
] |
permissive
|
dfee/graphql-core-next
|
92bc6b4e5a39bd43def8397bbb2d5b924d5436d9
|
1ada7146bd0510171ae931b68f6c77dbdf5d5c63
|
refs/heads/master
| 2020-03-27T10:30:43.486607
| 2018-08-30T20:26:42
| 2018-08-30T20:26:42
| 146,425,198
| 0
| 0
|
MIT
| 2018-08-28T09:40:09
| 2018-08-28T09:40:09
| null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
from math import inf, nan
from graphql.error import INVALID
from graphql.pyutils import is_invalid
def describe_is_invalid():
def null_is_not_invalid():
assert is_invalid(None) is False
def falsy_objects_are_not_invalid():
assert is_invalid('') is False
assert is_invalid(0) is False
assert is_invalid([]) is False
assert is_invalid({}) is False
def truthy_objects_are_not_invalid():
assert is_invalid('str') is False
assert is_invalid(1) is False
assert is_invalid([0]) is False
assert is_invalid({None: None}) is False
def inf_is_not_invalid():
assert is_invalid(inf) is False
assert is_invalid(-inf) is False
def undefined_is_invalid():
assert is_invalid(INVALID) is True
def nan_is_invalid():
assert is_invalid(nan) is True
|
[
"cito@online.de"
] |
cito@online.de
|
5628b540ad53bf7290b179cb3f6de1f245706da2
|
bd3528cc321dc37f8c47ac63e57561fd6432c7cc
|
/transformer/tensor2tensor/models/xception.py
|
2452a7d4ff23d06b687e61f5eea6106e13c22930
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
oskopek/cil
|
92bbf52f130a1ed89bbe93b74eef74027bb2b37e
|
4c1fd464b5af52aff7a0509f56e21a2671fb8ce8
|
refs/heads/master
| 2023-04-15T10:23:57.056162
| 2021-01-31T14:51:51
| 2021-01-31T14:51:51
| 139,629,560
| 2
| 5
|
MIT
| 2023-03-24T22:34:39
| 2018-07-03T19:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def residual_block(x, hparams):
"""A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width)
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels,
padding="SAME",
separability=0,
name="residual_block")
x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm")
return tf.nn.dropout(x, 1.0 - hparams.dropout)
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur)
def xception_entry(inputs, hidden_dim):
"""Xception entry flow."""
with tf.variable_scope("xception_entry"):
def xnet_resblock(x, filters, res_relu, name):
"""Resblock."""
with tf.variable_scope(name):
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
tf.summary.image("inputs", inputs, max_outputs=2)
x = common_layers.conv_block(
inputs,
32, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
strides=(2, 2),
force2d=True,
name="conv0")
x = common_layers.conv_block(
x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1")
x = xnet_resblock(x, min(128, hidden_dim), True, "block0")
x = xnet_resblock(x, min(256, hidden_dim), False, "block1")
return xnet_resblock(x, hidden_dim, False, "block2")
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x)
@registry.register_model
class Xception(t2t_model.T2TModel):
def body(self, features):
return xception_internal(features["inputs"], self._hparams)
@registry.register_hparams
def xception_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.hidden_size = 768
hparams.dropout = 0.2
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 8
hparams.kernel_height = 3
hparams.kernel_width = 3
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = "argmax"
hparams.optimizer_adam_epsilon = 1e-6
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
return hparams
@registry.register_hparams
def xception_tiny():
hparams = xception_base()
hparams.batch_size = 2
hparams.hidden_size = 64
hparams.num_hidden_layers = 2
hparams.learning_rate_decay_scheme = "none"
return hparams
@registry.register_hparams
def xception_tiny_tpu():
hparams = xception_base()
hparams.batch_size = 2
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.optimizer = "TrueAdam"
return hparams
|
[
"lukas.jendele@gmail.com"
] |
lukas.jendele@gmail.com
|
2f816bb890383cc7f178bf5be4d2290e2fbdfa61
|
4e81512b34223788559ea1c84acb2ef0aa4d899d
|
/booktracker/settings.py
|
6831cbc70ea526b979e29eb6fc1e105511cae832
|
[] |
no_license
|
arsummers/book-tracker-django
|
75a2e559c5dd05be67287a40514533a699889368
|
012fa821288ee99f45665e017bc8b7ab4db54a1f
|
refs/heads/master
| 2022-12-11T16:28:27.393199
| 2019-10-15T21:28:37
| 2019-10-15T21:28:37
| 209,672,800
| 0
| 0
| null | 2022-12-08T06:38:29
| 2019-09-20T00:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,428
|
py
|
"""
Django settings for booktracker project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v9j%)jsi$x1sp8oqfgln@m0a^1*0%z&4defyjpd#0ld@=^5vdx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'books.apps.BooksConfig',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'booktracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'booktracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
django_heroku.settings(locals())
|
[
"aliyasummers1@gmail.com"
] |
aliyasummers1@gmail.com
|
5748492e1ac68fbb9456a149c63bf5d73cb70cb7
|
1edb8304c6429729ffc2bab8a13f4123e19d2b32
|
/azure-export/settings.py
|
c35f8f6b2df73b7a2759d73c4be11b093fe95853
|
[] |
no_license
|
watchdogpolska/docker-images
|
d8292fc03df806f5be3a976cf87272f2d46e0b13
|
7a569e1d0cef4a4f57517daeac0456a59a25d021
|
refs/heads/master
| 2021-09-22T00:26:11.317526
| 2021-08-14T02:40:43
| 2021-08-14T02:41:33
| 157,301,522
| 0
| 4
| null | 2021-07-15T23:54:21
| 2018-11-13T01:26:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
import os
import dataset
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.resource import ResourceManagementClient
from msrestazure.azure_active_directory import ServicePrincipalCredentials
# $ az ad sp create-for-rbac --name "MY-PRINCIPAL-NAME2" --password "XXX" --verbose
# $ az role assignment create --assignee {app_id} --role Reader
tenant_id = os.environ.get('AZURE_TENANT_ID', '7dbd59e5-e4d9-499b-b5cb-005289cc158a')
app_id = os.environ.get('AZURE_APP_ID', 'bfeb6f69-5a18-4d0c-a669-2e7eb3798fdd')
password = os.environ['AZURE_APP_PASSWORD']
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', 'efeb9457-bf38-460f-a1e5-bb5ecc817987')
credentials = ServicePrincipalCredentials(
client_id=app_id,
secret=password,
tenant=tenant_id
)
storage_client = StorageManagementClient(
credentials,
subscription_id
)
resource_client = ResourceManagementClient(
credentials,
subscription_id
)
db = dataset.connect(os.environ.get('DATABASE_URL', 'sqlite:///:memory:'))
|
[
"naczelnik@jawnosc.tk"
] |
naczelnik@jawnosc.tk
|
410f343e06b5a2e46e0ac58189f5fc2337669859
|
15a992391375efd487b6442daf4e9dd963167379
|
/monai/networks/nets/__init__.py
|
cd9329f61baf93158a6a3aa20992150937c07ed3
|
[
"Apache-2.0"
] |
permissive
|
Bala93/MONAI
|
b0e68e1b513adcd20eab5158d4a0e5c56347a2cd
|
e0a7eff5066da307a73df9145077f6f1fec7a514
|
refs/heads/master
| 2022-08-22T18:01:25.892982
| 2022-08-12T18:13:53
| 2022-08-12T18:13:53
| 259,398,958
| 2
| 0
| null | 2020-04-27T17:09:12
| 2020-04-27T17:09:11
| null |
UTF-8
|
Python
| false
| false
| 2,805
|
py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ahnet import AHnet, Ahnet, AHNet
from .attentionunet import AttentionUnet
from .autoencoder import AutoEncoder
from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet
from .classifier import Classifier, Critic, Discriminator
from .densenet import (
DenseNet,
Densenet,
DenseNet121,
Densenet121,
DenseNet169,
Densenet169,
DenseNet201,
Densenet201,
DenseNet264,
Densenet264,
densenet121,
densenet169,
densenet201,
densenet264,
)
from .dints import DiNTS, TopologyConstruction, TopologyInstance, TopologySearch
from .dynunet import DynUNet, DynUnet, Dynunet
from .efficientnet import (
BlockArgs,
EfficientNet,
EfficientNetBN,
EfficientNetBNFeatures,
drop_connect,
get_efficientnet_image_size,
)
from .flexible_unet import FlexibleUNet
from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet
from .generator import Generator
from .highresnet import HighResBlock, HighResNet
from .hovernet import Hovernet, HoVernet, HoVerNet, HoverNet
from .milmodel import MILModel
from .netadapter import NetAdapter
from .regressor import Regressor
from .regunet import GlobalNet, LocalNet, RegUNet
from .resnet import ResNet, resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200
from .segresnet import SegResNet, SegResNetVAE
from .senet import (
SENet,
SEnet,
Senet,
SENet154,
SEnet154,
Senet154,
SEResNet50,
SEresnet50,
Seresnet50,
SEResNet101,
SEresnet101,
Seresnet101,
SEResNet152,
SEresnet152,
Seresnet152,
SEResNext50,
SEResNeXt50,
SEresnext50,
Seresnext50,
SEResNext101,
SEResNeXt101,
SEresnext101,
Seresnext101,
senet154,
seresnet50,
seresnet101,
seresnet152,
seresnext50,
seresnext101,
)
from .swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR
from .torchvision_fc import TorchVisionFCModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
from .unet import UNet, Unet
from .unetr import UNETR
from .varautoencoder import VarAutoEncoder
from .vit import ViT
from .vitautoenc import ViTAutoEnc
from .vnet import VNet
|
[
"noreply@github.com"
] |
Bala93.noreply@github.com
|
2b3c16897a3b35cc9e66306da93eacb32c23e5ef
|
0d5de943909877c01b485d8a918d8bef0cf9e196
|
/plugins/CompleteLikeEclipse/scribes/edit/complete_like_eclipse/__init__.py
|
1bf51577114ffc6ae996e45832814ec68d606743
|
[
"MIT"
] |
permissive
|
baverman/scribes-goodies
|
31e2017d81f04cc01e9738e96ceb19f872a3d280
|
f6ebfe62e5103d5337929648109b4e610950bced
|
refs/heads/master
| 2021-01-21T10:13:08.397980
| 2013-09-25T16:33:05
| 2013-09-25T16:33:05
| 854,207
| 2
| 1
| null | 2013-09-25T16:33:05
| 2010-08-22T03:12:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,949
|
py
|
from gettext import gettext as _
from string import whitespace
from scribes.helpers import TriggerManager, Trigger, connect_external, connect_all
from signals import Signals
from IndexerProcessManager import Manager as IndexerProcessManager
from DictionaryManager import Manager as DictionaryManager
from ProcessCommunicator import Communicator as ProcessCommunicator
from TextExtractor import Extractor as TextExtractor
from BufferMonitor import Monitor as BufferMonitor
trigger = Trigger('complete-word', '<alt>slash',
'Eclipse like word completition', 'Text Operations')
class Plugin(object):
def __init__(self, editor):
self.editor = editor
self.signals = Signals()
self.triggers = TriggerManager(editor)
connect_all(self, self.signals, self.triggers, textbuffer=self.editor.textbuffer)
self.block_word_reset = False
self.words = None
self.start_word = None
self.start_offset = None
self.indexer = IndexerProcessManager(self.signals.sender, editor)
self.dictionary_manager = DictionaryManager(self.signals.sender, editor)
self.communicator = ProcessCommunicator(self.signals.sender, editor)
self.extractor = TextExtractor(self.signals.sender, editor)
self.buffer_monitor = BufferMonitor(self.signals.sender, editor)
def unload(self):
self.signals.destroy.emit()
return False
def is_valid_character(self, c):
if c in whitespace:
return False
return c.isalpha() or c.isdigit() or (c in ("-", "_"))
def backward_to_word_begin(self, iterator):
if iterator.starts_line(): return iterator
iterator.backward_char()
while self.is_valid_character(iterator.get_char()):
iterator.backward_char()
if iterator.starts_line(): return iterator
iterator.forward_char()
return iterator
def forward_to_word_end(self, iterator):
if iterator.ends_line(): return iterator
if not self.is_valid_character(iterator.get_char()): return iterator
while self.is_valid_character(iterator.get_char()):
iterator.forward_char()
if iterator.ends_line(): return iterator
return iterator
def get_word_before_cursor(self):
iterator = self.editor.cursor.copy()
# If the cursor is in front of a valid character we ignore
# word completion.
if self.is_valid_character(iterator.get_char()):
return None, None
if iterator.starts_line():
return None, None
iterator.backward_char()
if not self.is_valid_character(iterator.get_char()):
return None, None
start = self.backward_to_word_begin(iterator.copy())
end = self.forward_to_word_end(iterator.copy())
word = self.editor.textbuffer.get_text(start, end).strip()
return word, start
def get_matches(self, string):
if not self.words:
return None
result = []
for word, count in self.words.iteritems():
if word != string and word.startswith(string):
result.append((word.encode('utf8'), count))
result.sort(key=lambda r: r[1], reverse=True)
return [r[0] for r in result]
@trigger
def cycle(self, *args):
word_to_complete, start = self.get_word_before_cursor()
if not word_to_complete:
return False
if not self.start_word or self.start_offset != start.get_offset():
self.start_word = word_to_complete
self.start_offset = start.get_offset()
matches = self.get_matches(self.start_word)
if matches:
idx = 0
try:
idx = matches.index(word_to_complete)
idx = (idx + 1) % len(matches)
except ValueError:
pass
if matches[idx] == word_to_complete:
self.editor.update_message(_("Word completed already"), "yes", 1)
return False
self.buffer_changed_handler.block()
end = self.editor.cursor.copy()
self.editor.textbuffer.delete(start, end)
self.editor.textbuffer.insert(start, matches[idx])
self.editor.response()
self.buffer_changed_handler.unblock()
else:
self.editor.update_message(_("No word to complete"), "no", 1)
return False
@Signals.dictionary
def word_list_updated(self, sender, words):
self.words = words
return False
@connect_external('textbuffer', 'changed')
def buffer_changed(self, *args):
self.start_word = None
self.start_iter = None
return False
|
[
"bobrov@vl.ru"
] |
bobrov@vl.ru
|
d8e032b3398ca8b4d5089d70996f8278fc086e9d
|
123cf58c5dc4800d5d50fd2934cc63be1080e093
|
/models/string_cluster_model/encoder_network.py
|
33989eff917210d26d9f229c5dc93a45db8912b7
|
[] |
no_license
|
nitishgupta/char-encode-decode
|
dd303a9aa77a3af9000e275bcb86abb18d0b7d84
|
eb4bbb8be701c3cbb4476a779094c45458a1daef
|
refs/heads/master
| 2021-04-30T23:25:49.895472
| 2017-09-03T06:37:55
| 2017-09-03T06:37:55
| 66,794,519
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
import time
import tensorflow as tf
import numpy as np
from models.base import Model
class EncoderModel(Model):
"""Unsupervised Clustering using Discrete-State VAE"""
def __init__(self, num_layers, batch_size, h_dim, input_batch, input_lengths,
char_embeddings, scope_name, dropout_keep_prob=1.0):
self.num_layers = num_layers # Num of layers in the encoder and decoder network
# Size of hidden layers in the encoder and decoder networks. This will also
# be the dimensionality in which each string is represented when encoding
self.h_dim = h_dim
self.batch_size = batch_size
with tf.variable_scope(scope_name) as scope:
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(h_dim, state_is_tuple=True)
encoder_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell=encoder_cell,
input_keep_prob=dropout_keep_prob,
output_keep_prob=1.0)
self.encoder_network = tf.nn.rnn_cell.MultiRNNCell(
[encoder_dropout_cell] * self.num_layers, state_is_tuple=True)
#[batch_size, decoder_max_length, embed_dim]
self.embedded_encoder_sequences = tf.nn.embedding_lookup(char_embeddings,
input_batch)
self.encoder_outputs, self.encoder_states = tf.nn.dynamic_rnn(
cell=self.encoder_network, inputs=self.embedded_encoder_sequences,
sequence_length=input_lengths, dtype=tf.float32)
# To get the last output of the encoder_network
reverse_output = tf.reverse_sequence(input=self.encoder_outputs,
seq_lengths=tf.to_int64(input_lengths),
seq_dim=1,
batch_dim=0)
en_last_output = tf.slice(input_=reverse_output,
begin=[0,0,0],
size=[self.batch_size, 1, -1])
# [batch_size, h_dim]
self.encoder_last_output = tf.reshape(en_last_output,
shape=[self.batch_size, -1],
name="encoder_last_output")
|
[
"gnnitish@gmail.com"
] |
gnnitish@gmail.com
|
5a4102380ceda801c33ba27df61c91998ba24ab0
|
c4943748c504f26e197ce391c747bb5a4c146be2
|
/trade_data_get/future_daily_point_data.py
|
242777b92ad8080f11fc2e523a24c024c3dba7a1
|
[] |
no_license
|
NewLanded/security_data_store
|
88919c233d6bd22b20d0d9918c8e2ffcafc33c3e
|
d23c68777e6ecb0641cb5c6f7061b1c11d208886
|
refs/heads/master
| 2021-07-21T12:55:47.650454
| 2021-06-30T07:32:00
| 2021-06-30T07:32:00
| 133,665,767
| 1
| 0
| null | 2018-05-16T13:03:35
| 2018-05-16T12:56:05
| null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
import datetime
import time
import tushare as ts
from conf import PRO_KEY
from util_base.date_util import convert_datetime_to_str, convert_str_to_datetime, get_date_range
from util_base.db_util import engine
from util_base.db_util import store_failed_message
from util_data.date import Date
ts.set_token(PRO_KEY)
pro = ts.pro_api()
def get_all_future_daily_point_data(data_date_str):
time.sleep(2)
all_future_daily_point_data = pro.fut_daily(trade_date=data_date_str)
time.sleep(2)
return all_future_daily_point_data
def store_future_daily_point_data(future_daily_point_data):
future_daily_point_data["update_date"] = datetime.datetime.now()
future_daily_point_data["trade_date"] = future_daily_point_data["trade_date"].apply(convert_str_to_datetime)
future_daily_point_data.to_sql("future_daily_point_data", engine, index=False, if_exists="append")
def start(date_now=None):
date_now = datetime.datetime.now() if date_now is None else date_now
date_now = datetime.datetime(date_now.year, date_now.month, date_now.day)
if Date().is_workday(date_now):
try:
all_future_daily_point_data = get_all_future_daily_point_data(convert_datetime_to_str(date_now))
store_future_daily_point_data(all_future_daily_point_data)
except Exception as e:
store_failed_message("", "future_daily_point_data", str(e), date_now)
if __name__ == "__main__":
pass
for date_now in get_date_range(datetime.datetime(2015, 1, 1), datetime.datetime(2021, 6, 18)):
print(date_now)
start(date_now)
# start(datetime.datetime(2020, 5, 19))
# all_future_daily_point_data = pro.daily(trade_date="20181008")
pass
|
[
"l1141041@163.com"
] |
l1141041@163.com
|
658188357a420a967626a633ab73119b6a6a95f5
|
f89b26d9c53b1d5cc6b14d7f20c57772c98fb53d
|
/plus minus.py
|
3620c88e652db9cf30d344d0e8462e9fc3708813
|
[] |
no_license
|
Mityun/Analitic_of_my_book
|
9be73824b0d218f87619e938ef0b0ceeb57e1310
|
dd9842925205b3ec55179ae00df798031dcf8c26
|
refs/heads/main
| 2023-08-14T10:41:33.105877
| 2021-10-10T07:32:23
| 2021-10-10T07:32:23
| 326,292,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
q = float(input())
w = float(input())
e = input()
if e == "-" and w != 0:
print(q - w)
elif e == "+" and w != 0:
print(q + w)
elif e == "*" and w != 0:
print(q * w)
elif e == "/" and w != 0:
print(q / w)
elif e != "+" and e != "-" and e != "*" and e != "/":
print(888888)
elif w == 0:
print(888888)
|
[
"you@example.com"
] |
you@example.com
|
c74942de61e4a32ff2a0a0be62da3f16bf3c27a3
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/BuildLinks1.10/test_input/CJ_16_2/16_2_1_anthrocoder_digits.py
|
595c608a01ecda0f5fcd93bfb768e0ff0aab1314
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 944
|
py
|
import sys
try:
f = open(sys.argv[1])
out = open(sys.argv[1].rpartition("\\")[2]+".out", 'w')
numTests = int(f.readline())
for i in range (0, numTests):
note = f.readline()
# print (note)
phoneNo = ""
zeros = note.count("Z")
# print ("found zeros: " + str(zeros))
twos = note.count("W")
# print ("found twos: " + str(twos))
fours = note.count("U")
sixes = note.count("X")
eights = note.count("G")
ones = note.count("O") - twos - fours - zeros
threes = note.count("H") - eights
fives = note.count("F") - fours
sevens = note.count("V") - fives
nines = note.count("I") - fives - sixes - eights
phoneNo = ("0" * zeros) + ("1" * ones) + ("2" * twos) + ("3"*threes)+("4"*fours)+("5"*fives)+("6"*sixes)+("7"*sevens)+("8"*eights)+("9"*nines)
out.write("Case #" + str(i+1) +": " + phoneNo + "\n")
except IOError as e:
print ('Error:', err)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
189234dba477920e20978a90104fe63bbe85f33a
|
ccce57307a499b49b14c8b16706166b08df1c5c1
|
/database.py
|
e454dfecf3c38702f9373d274b585f469e9ff64e
|
[
"MIT"
] |
permissive
|
simrit1/CubeTimer
|
6ea1ca4549865317c947a3a91d3a57f1786f198c
|
b226ae875cde35fb573c618d70a408421e0e9f07
|
refs/heads/master
| 2023-07-01T20:38:20.983300
| 2021-07-18T02:04:33
| 2021-07-18T02:04:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,306
|
py
|
import sqlite3
from CubeUtilities import Time, MultiPhaseTime
class Database:
def __init__(self, table_name, db_dir):
self.database_path = db_dir
self.table_name = table_name
self.closed = False
try:
self.conn = sqlite3.connect(self.database_path)
except sqlite3.Error:
raise Exception(f"'{self.database_path}' doesn't exist.")
self.cursor = self.conn.cursor()
self.create_table()
def create_table(self):
"""
Attempts to create the table
:returns: bool
"""
with self.conn:
if self.table_name == "times":
try:
self.cursor.execute("""CREATE TABLE times ( time float, scramble text, date text, DNF integer, multiphase text )""")
except sqlite3.OperationalError:
return False
elif self.table_name == "settings":
try:
self.cursor.execute("""CREATE TABLE settings ( inspection integer, display_time integer, scramble_len integer, multiphase integer, puzzle_type text )""")
except sqlite3.OperationalError:
return False
else:
raise ValueError(f"Invalid table name, couldn't create table with name '{self.table_name}'")
return True
def insert_record(self, record):
"""
Adds a new record to the database
:param record: Time, MultiPhaseTime, dict
:returns: bool
"""
if self.table_name == "settings":
with self.conn:
self.cursor.execute("INSERT INTO settings VALUES (:inspection, :display_time, :scramble_len, :multiphase, :puzzle_type)", record)
elif self.table_name == "times" and isinstance(record, MultiPhaseTime):
with self.conn:
times = record.get_times()
for index in range(len(times)):
times[index] = str(times[index])
times = ", ".join(times)
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)", (record.time, record.scramble, record.date, int(record.DNF), times))
elif self.table_name == "times" and isinstance(record, Time):
print ("saving")
with self.conn:
self.cursor.execute("INSERT INTO times VALUES (?, ?, ?, ?, ?)",
(record.time, record.scramble, record.date, int(record.DNF), ""))
def delete_record(self, oid=None):
"""
Deletes the record with the oid provided, if oid is None, and the table name is settings
then all records in the database are deleted.
:param oid: int, None
:param bool
"""
if self.table_name == "settings":
self.delete_all_records()
return True
elif self.table_name == "times" and oid is not None:
with self.conn:
self.cursor.execute("DELETE FROM times WHERE oid = :oid",
{"oid": oid})
self.cursor.execute("VACUUM")
return True
return False
def update_record(self, record_attr, new_value, identifier):
"""
Updates a record in the database with the attribute record_attr, to new_value.
Identifier can be an oid, or a dictionary with a seperate record attribute along with it's known value
:param record_attr: str
:param new_value: str, int
:param identifier: int, dict
:returns: bool
"""
if self.table_name == "times":
with self.conn:
try:
self.cursor.execute(f"UPDATE times SET {record_attr}=:new_value WHERE oid=:oid", {"oid": identifier, "new_value": str(new_value)})
except sqlite3.Error as e:
return False
return True
elif self.table_name == "settings":
with self.conn:
try:
known_attr, known_val = list(identifier.keys())[0], identifier.get(list(identifier.keys())[0])
try:
known_val = int(known_val)
except ValueError:
pass
self.cursor.execute(f"UPDATE settings SET {record_attr}=:new_value WHERE {known_attr}=:known_val",
{"new_value": str(new_value), "known_val": known_val})
except sqlite3.Error:
return False
except (AttributeError, TypeError):
raise Exception("identifier argument must be a dictionary with a key of a seperate record attribute, and it's value is the record attributes known value. Ex: identifier={'puzzle_type': '3x3'}")
return True
return False
def get_record(self, oid=None):
"""
Gets the record with the specified oid, if no oid is specified,
then all records are returned
:param oid: int, None
:return: list[record_tuple]
"""
if self.table_name == "settings":
return self.get_all_records()
self.cursor.execute("SELECT * FROM times WHERE oid=:oid", {"oid": oid})
return self.cursor.fetchall()
def get_all_records(self):
"""
Gets every record in the database
:returns: list[record_tuple]
"""
with self.conn:
try:
self.cursor.execute(f"SELECT * FROM {self.table_name}")
except sqlite3.Error:
return []
return self.cursor.fetchall()
def delete_all_records(self):
"""
Deletes every record in the database
:returns: bool
"""
with self.conn:
try:
self.cursor.execute(f"DELETE FROM {self.table_name}")
self.create_table()
except sqlite3.Error:
return False
else:
self.cursor.execute("VACUUM")
return True
def close_connection(self):
"""
Closes the conection to the database:
:returns: None
"""
self.conn.close()
self.closed = True
|
[
"noreply@github.com"
] |
simrit1.noreply@github.com
|
3a9baf4f9122069e89d3d3e9c447adba687d8866
|
7942342d457276bb266228d0236af647b3d55477
|
/django/contrib/gis/gdal/geomtype.pyi
|
4d825dbc2a0344758cb103a9b71335753e67e32a
|
[
"MIT"
] |
permissive
|
AsymmetricVentures/mypy-django
|
847c4e521ce4dec9a10a1574f9c32b234dafd00b
|
f6e489f5cf5672ecede323132665ccc6306f50b8
|
refs/heads/master
| 2020-06-30T01:53:44.434394
| 2016-12-22T22:45:50
| 2016-12-22T22:45:50
| 74,397,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
pyi
|
# Stubs for django.contrib.gis.gdal.geomtype (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
class OGRGeomType:
wkb25bit = ... # type: int
num = ... # type: Any
def __init__(self, type_input) -> None: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
@property
def name(self): ...
@property
def django(self): ...
def to_multi(self): ...
|
[
"reames@asymmetricventures.com"
] |
reames@asymmetricventures.com
|
5f20947d37c40b225caf658aa24de35a3409eda0
|
1e9ad304868c2bda918c19eba3d7b122bac3923b
|
/kubernetes/client/models/v1_scale_spec.py
|
4cbe43889993ed0f39cd92d9f358c3267a860626
|
[
"Apache-2.0"
] |
permissive
|
pineking/client-python
|
c77e5bd3d476ac852e6dffa96056008baa0f597f
|
74a64d7325518f4298600d4bb300f92843c29347
|
refs/heads/master
| 2021-01-22T22:16:27.368406
| 2017-03-15T08:21:21
| 2017-03-15T08:21:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, replicas=None):
"""
V1ScaleSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'replicas': 'int'
}
self.attribute_map = {
'replicas': 'replicas'
}
self._replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
b3d3277c535eaa6f706a071f5b547c8b412419d8
|
c1d03f41b6c80ef1e0a42b1bb710ba90d680e4c2
|
/tests/unit/test_xmlgen.py
|
f4224d6cded6be0fe94660d2a9f52f5f3283b56e
|
[
"BSD-3-Clause"
] |
permissive
|
boxingbeetle/softfab
|
4f96fc389dec5cd3dc987a427c2f491a19cbbef4
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
refs/heads/master
| 2021-06-22T15:42:38.857018
| 2020-11-23T22:53:21
| 2020-11-23T22:53:21
| 169,245,088
| 20
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
# SPDX-License-Identifier: BSD-3-Clause
"""Test XML generation module."""
from pytest import raises
from softfab.xmlgen import parseHTML, xhtml
# Test text inside the <script> XHTML element:
def testScriptNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = 'if (a > b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">{text}</script>'
)
def testScriptCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'if (a < b) return c[3];'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
text = 'if (a = b) return c & 3;'
assert xhtml.script[text].flattenXML() == (
f'<script xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</script>'
)
def testScriptCDATAEnd():
"""Check that a CDATA block is not closed too early."""
text = 'var f = x[y[i]]>0 && z<0;'
# ^^^-- CDATA end marker
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var f = x[y[i]]\\>0 && z<0;/*]]>*/'
'</script>'
)
def testScriptTagEnd():
"""Check that a <script> tag is not closed too early."""
text = 'var s = "</script>";'
assert xhtml.script[text].flattenXML() == (
'<script xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/var s = "<\\/script>";/*]]>*/'
'</script>'
)
# Test text inside the <style> XHTML element.
# Since <script> is handled in the same way, we test fewer scenarios here.
def testStyleNoEscape():
"""Check that no escaping is performed when it is not necessary."""
text = '.nav > a[href] { color: #FFC000 }'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">{text}</style>'
)
def testStyleCDATA():
"""Check that a CDATA block is used when necessary."""
text = 'book.c /* K&R */'
assert xhtml.style[text].flattenXML() == (
f'<style xmlns="http://www.w3.org/1999/xhtml">'
f'/*<![CDATA[*/{text}/*]]>*/'
f'</style>'
)
def testStyleTagEnd():
"""Check that a <style> tag is not closed too early."""
text = '@import url(more.css); /* </StyLe */'
# HTML tags are case-insensitive: ^^^^^
assert xhtml.style[text].flattenXML() == (
'<style xmlns="http://www.w3.org/1999/xhtml">'
'/*<![CDATA[*/@import url(more.css); /* <\\/StyLe *//*]]>*/'
'</style>'
)
# Test parsing of HTML fragments:
def testBasic():
"""Check whether basic functionality works."""
parsed = parseHTML('<h1>Hello!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
)
def testMultiTopLevel():
"""Check whether we can handle multiple top-level tags."""
parsed = parseHTML('<h1>Hello!</h1><h1>Goodbye!</h1>')
assert parsed.flattenXML() == (
'<h1 xmlns="http://www.w3.org/1999/xhtml">Hello!</h1>'
'<h1 xmlns="http://www.w3.org/1999/xhtml">Goodbye!</h1>'
)
def testNested():
"""Check handling of nested content."""
parsed = parseHTML('<p>Text with <i>nested</i> tags.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with <i>nested</i> tags.'
'</p>'
)
def testVoid():
"""Check handling of void elements."""
parsed = parseHTML('<p>Text with<br/>a void element.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'Text with<br/>a void element.'
'</p>'
)
def testIgnorePI():
"""Check parsing of processing instruction with no handlers."""
parsed = parseHTML('<p>A processing <?jump> instruction.</p>')
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing instruction.'
'</p>'
)
def testRaisePI():
"""Check propagation of handler exceptions."""
def handler(name, arg):
raise KeyError(f'unknown PI: {name}')
with raises(KeyError):
parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
def testNoArgPI():
"""Check parsing of processing instruction with no arguments."""
def handler(name, arg):
assert name == 'jump'
assert arg == ''
return xhtml.br
parsed = parseHTML(
'<p>A processing <?jump> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <br/> instruction.'
'</p>'
)
def testArgPI():
"""Check parsing of processing instruction with an argument."""
def handler(name, arg):
assert name == 'jump'
return xhtml.span[arg]
parsed = parseHTML(
'<p>A processing <?jump a little higher> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing <span>a little higher</span> instruction.'
'</p>'
)
def testIgnoreXMLDecl():
"""Check parsing of XML declaration."""
def handler(name, arg):
assert False
parsed = parseHTML(
'<?xml version="1.0" encoding="UTF-8" ?>'
'<html><body><p>XHTML document.</p></body></html>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<html xmlns="http://www.w3.org/1999/xhtml">'
'<body><p>XHTML document.</p></body>'
'</html>'
)
def testIgnoreXMLSyntax():
"""Check parsing of a PI using XML syntax (question mark at end)."""
def handler(name, arg):
assert name == 'jump'
return arg.upper()
parsed = parseHTML(
'<p>A processing <?jump lazy fox?> instruction.</p>',
piHandler=handler
)
assert parsed.flattenXML() == (
'<p xmlns="http://www.w3.org/1999/xhtml">'
'A processing LAZY FOX instruction.'
'</p>'
)
|
[
"maarten@boxingbeetle.com"
] |
maarten@boxingbeetle.com
|
9074795f04fffda1859ceabffe3265b9dad61ac4
|
c7cba1dad777f461ea546d0437528c985be3c051
|
/client.py
|
559f6546c5344baecc2df329d11dee988617cc63
|
[
"MIT"
] |
permissive
|
elliotthwang/NLU
|
000127b561c5b99340b04bf78aa65ff6ea28c79a
|
0e6a96e4c2f363beb4241b4371244a5229e72811
|
refs/heads/master
| 2022-01-12T06:51:00.036787
| 2018-10-07T21:56:15
| 2018-10-07T21:56:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
############################################################################################
#
# The MIT License (MIT)
#
# GeniSys NLU Engine API Client
# Copyright (C) 2018 Adam Milton-Barker (AdamMiltonBarker.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Title: GeniSys NLU Engine API Client
# Description: API client for communicating with the GeniSys AI NLU API endpoint
# Configuration: required/confs.json
# Last Modified: 2018-09-08
#
# Example Usage:
#
# $ python3 client.py CLASSIFY 1 "Do you know what I am saying?"
#
############################################################################################
import sys, time, string, requests, json
from tools.Helpers import Helpers
from tools.Logging import Logging
class Client():
def __init__(self, user):
self.Helpers = Helpers()
self.Logging = Logging()
self._confs = self.Helpers.loadConfigs()
self.LogFile = self.Logging.setLogFile(self._confs["AI"]["Logs"]+"Client/")
self.apiUrl = self._confs["AI"]["FQDN"] + "/communicate/infer/"+user
self.headers = {"content-type": 'application/json'}
self.Logging.logMessage(
self.LogFile,
"CLIENT",
"INFO",
"GeniSys AI Client Ready")
if __name__ == "__main__":
if sys.argv[1] == "CLASSIFY":
Client = Client(sys.argv[2])
data = {"query": str(sys.argv[3])}
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"INFO",
"Sending string for classification...")
response = requests.post(
Client.apiUrl,
data=json.dumps(data),
headers=Client.headers)
Client.Logging.logMessage(
Client.LogFile,
"CLIENT",
"OK",
"Response: "+str(response))
|
[
"adammiltonbarker@eu.techbubbletechnologies.com"
] |
adammiltonbarker@eu.techbubbletechnologies.com
|
ca8705cc1f1359d399708435066d644118c8025c
|
eba283c7b7d07c9ff15abee322da8fea460ea6be
|
/__init__.py
|
a81e1e6e897d836c409125c7fc0208faa64f920a
|
[] |
no_license
|
ROB-Seismology/layeredbasemap
|
5bfa3daad9b2e47a1fea35c652309541ac88ac23
|
122464656d5534798c4bba38cdda2638e7d8948f
|
refs/heads/master
| 2021-01-20T17:33:02.596090
| 2020-12-16T10:30:54
| 2020-12-16T10:30:54
| 90,877,746
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
"""
layeredbasemap
Module to create maps with Basemap using the GIS layer philosophy,
where each layer is defined by a dataset and style.
Author: Kris Vanneste, Royal Observatory of Belgium
"""
from __future__ import absolute_import, division, print_function, unicode_literals
## Make relative imports work in Python 3
import importlib
## Reloading mechanism
try:
reloading
except NameError:
## Module is imported for the first time
reloading = False
else:
## Module is reloaded
reloading = True
try:
## Python 3
from importlib import reload
except ImportError:
## Python 2
pass
## Test GDAL environment
import os
#gdal_keys = ["GDAL_DATA", "GDAL_DRIVER_PATH"]
gdal_keys = ["GDAL_DATA"]
for key in gdal_keys:
if not key in os.environ.keys():
print("Warning: %s environment variable not set. This may cause errors" % key)
elif not os.path.exists(os.environ[key]):
print("Warning: %s points to non-existing directory %s" % (key, os.environ[key]))
## Import submodules
## styles
if not reloading:
styles = importlib.import_module('.styles', package=__name__)
else:
reload(styles)
from .styles import *
## data_types
if not reloading:
data_types = importlib.import_module('.data_types', package=__name__)
else:
reload(data_types)
from .data_types import *
## cm
if not reloading:
cm = importlib.import_module('.cm', package=__name__)
else:
reload(cm)
## layered_basemap
if not reloading:
layered_basemap = importlib.import_module('.layered_basemap', package=__name__)
else:
reload(layered_basemap)
from .layered_basemap import *
|
[
"kris.vanneste@oma.be"
] |
kris.vanneste@oma.be
|
b20d17916565894c0ad9d4c6695c25d8b0ded9b1
|
5b5d46b4a47ab365688af03afdbec24e885a2c90
|
/21/21.py
|
19a6901a33b382a6d732eace82edb63fc3f53e03
|
[] |
no_license
|
CA2528357431/python-base--Data-Structures
|
e9e24717ae016c4ca4a15805f261fd48f377ac6b
|
dccbcb27d82f2264947458686900addf2b83faad
|
refs/heads/main
| 2023-07-04T08:32:52.551200
| 2021-07-30T16:21:31
| 2021-07-30T16:21:31
| 386,671,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
# 二叉树
# 27非递归遍历
class tree:
def __init__(self, root, left=None, right=None):
self.nodes = []
self.root = root
self.left = left
self.right = right
self.data = None
# root作为排序依据
# data存数据
# 后续几个二叉树用例就不带数据了
@property
def lisp(self):
lisp = [self.root, None, None]
if self.left is not None:
lisp[1] = self.left.lisp
if self.right is not None:
lisp[2] = self.right.lisp
return lisp
# lisp 表达法
def __str__(self):
return str(self.lisp)
# 三种深度优先遍历
# 即三种周游
# 周游一定是 根、左周游、右周游的组合
def first(self):
l = []
r = []
if self.left is not None:
l = self.left.first()
if self.right is not None:
r = self.right.first()
res = [self.root] + l + r
return res
'''
def first(self):
res = []
cur = self
def do(cur):
if cur is not None:
res.append(cur.root)
do(cur.left)
do(cur.right)
do(cur)
return res
'''
def middle(self):
l = []
r = []
if self.left is not None:
l = self.left.middle()
if self.right is not None:
r = self.right.middle()
res = l + [self.root] + r
return res
'''
def middle(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
res.append(cur.root)
do(cur.right)
do(cur)
return res
'''
def last(self):
l = []
r = []
if self.left is not None:
l = self.left.last()
if self.right is not None:
r = self.right.last()
res = l + r + [self.root]
return res
'''
def last(self):
res = []
cur = self
def do(cur):
if cur is not None:
do(cur.left)
do(cur.right)
res.append(cur.root)
do(cur)
return res
'''
# 一种广度优先遍历
def layer(self):
res = []
queue = [self]
# queue中同层的数据相连
while queue:
cur = queue[0]
queue.pop(0)
res.append(cur.root)
for x in (cur.left,cur.right):
if x is not None:
queue.append(x)
return res
a = tree(1)
b = tree(2)
c = tree(3, a, b)
d = tree(6)
e = tree(4)
f = tree(10, d, e)
g = tree(13, c, f)
print(g.first())
print(g.middle())
print(g.last())
print(g.layer())
|
[
"2528357431@QQ.com"
] |
2528357431@QQ.com
|
5fdd01c76510a26587a3b1a59f24fc573d6df8f5
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/7dbcaa7c22297fe1b303/snippet.py
|
2378306d1ede8dd7979bb02a73d1b3106a44283a
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,969
|
py
|
#!/usr/bin/env python
"""
Pandoc filter to parse CriticMarkup into Spans for
Insertion and Deletion. The Docx writer will convert
these into Tracked Changes.
A comment immediately after a change will be parsed
for "author: The Author" and "date: 12-21-12", which
will be inserted into the Span as appropriate.
"""
from pandocfilters import Span, Str, RawInline, walk, attributes, stringify
import re
import sys
import json
regexes = {
'all': re.compile(r"([-+=~]{2}\}\{>>|\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
# 'all': re.compile(r"(\{[-+~>=]{2}|[-+=~<]{2}\}|~>)"),
}
def parseMarks (key, value, format, meta):
if key == 'Str':
if regexes['all'].search(value):
items = regexes['all'].split(value, 1)
result = [
Str(items[0]),
RawInline('critic', items[1])]
result.extend(walk([Str(items[2])], parseMarks, format, meta))
return result
spanstart = {
'{++' : 'insertion',
'{--' : 'deletion',
'{==' : 'hilite',
'{>>' : 'comment',
'{~~' : 'subdelete'
}
spanend = {
'insertion' : '++}',
'deletion' : '--}',
'hilite' : '==}',
# 'comment' : '<<}',
}
spancomment = {
'insertion' : '++}{>>',
'deletion' : '--}{>>',
'hilite' : '==}{>>',
'subadd' : '~~}{>>',
}
def makeSpan (contents, classes = "", author = "", date = ""):
attrs = {'classes' : classes.split(), 'author' : author, 'date' : date}
return Span (attributes(attrs), contents)
def findAuthor (comment):
author = re.search(r"(author:|@)\s*([\w\s]+)", comment)
if author:
return author.group(2)
else:
return ""
def findDate (comment):
date = re.search(r"date:\s*(\S+)", comment)
if date:
return date.group(1)
else:
return ""
inspan = False
spantype = None
lasttype = None
spancontents = []
priorspan = []
def spanify (key, value, format, meta):
global inspan
global spantype
global lasttype
global spancontents
global priorspan
if inspan:
# pass
if key == 'RawInline' and value[0] == 'critic':
if value[1] == spanend.get(spantype, ""):
newspan = makeSpan(spancontents, spantype)
inspan = False
spantype = None
spancontents = []
return walk([newspan], spanify, format, meta)
elif spantype == 'subdelete' and value[1] == '~>':
priorspan.append({'type': 'deletion', 'contents': spancontents})
spancontents = []
spantype = 'subadd'
return []
elif spantype == 'subadd' and value[1] == '~~}':
delspan = makeSpan(priorspan[0]['contents'], 'deletion')
addspan = makeSpan(spancontents, 'insertion')
inspan = False
spantype = None
priorspan = []
spancontents = []
return walk([delspan, addspan], spanify, format, meta)
elif value[1] == spancomment.get(spantype, ""):
thistype = spantype
if thistype == 'subadd': thistype = 'insertion'
priorspan.append({'type': thistype, 'contents': spancontents})
spancontents = []
spantype = 'comment'
return []
elif value[1] == '<<}' and spantype == 'comment':
commentstring = stringify(spancontents)
result = []
# if len(priorspan) > 0:
author = findAuthor(commentstring)
date = findDate(commentstring)
for item in priorspan:
result.append(makeSpan(item['contents'], item['type'], author, date))
comment = "<!-- %s -->" % commentstring
result.append(RawInline('html', comment))
priorspan = []
spancontents = []
spantype = None
inspan = False
return walk(result, spanify, format, meta)
else:
spancontents.append({'t': key, 'c': value})
return []
else:
spancontents.append({'t': key, 'c': value})
return []
else:
if key == 'RawInline' and value[0] == 'critic':
thetype = spanstart.get(value[1], "")
if thetype:
spantype = thetype
inspan = True
spancontents = []
return []
else:
#this is a user error, do not parse
pass
else:
pass
if __name__ == "__main__":
doc = json.loads(sys.stdin.read())
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
meta = doc[0]['unMeta']
parsed = walk(doc, parseMarks, format, meta)
altered = walk(parsed, spanify, format, meta)
json.dump(altered, sys.stdout)
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
4f388037513dc7157edd78c95a929b1b7d5c1ed8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/6/usersdata/131/2399/submittedfiles/investimento.py
|
b734cf524df522049516f8e80f2ef98958d66a91
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
#ENTRADA
a=input ('digite seu saldo 2016: ')
#proscessamento
b = float(a*0.045 + a)
c = float(b*0.045 + b)
d = float(c*0.045 + c)
e = float(d*0.045 + d)
f = float(e*0.045 + e)
g = flaot(f*0.045 + f)
h = float(g*0.045 + g)
i = float(h*0.045 + h)
j = float(i*0.045 + i)
k = float(j*0.045 + j)
#saida
print('seu saldo em 2017 %.2f' %(b))
print('seu saldo em 2018 %.2f' %(c))
print('seu saldo em 2019 %.2f' %(d))
print('seu saldo em 2020 %.2f' %(e))
print('seu saldo em 2021 %.2f' %(f))
print('seu saldo em 2022 %.2f' %(g))
print('seu saldo em 2023 %.2f' %(h))
print('seu saldo em 2024 %.2f' %(i))
print('seu saldo em 2025 %.2f' %(j))
print('seu saldo em 2026 %.2f' %(k))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3c327c89f0de7bec82025164c968faf2df12d343
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4191/codes/1716_2497.py
|
08cd8e7141262da535ce7f98751d1c4b82b7ce4d
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Instituto de Computacao - UFAM
# Lab 04 - Ex 04
# 20 / 06 / 2016
qi = float(input("Quantia inicial: "))
tempo = int(input("Tempo de investimento: "))
juros = 4.0
saldo = qi # Variavel acumuladora
# Valor inicial da variavel contadora
t = 0
rend=0
# Atualizacao de saldo
while(t<tempo):
rend = saldo * (juros/100)
saldo = saldo + rend
t =t+1
print(round(saldo, 2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
e9e6b193ada49c07eeba439047839ed6c513a166
|
7a31597f1359be11d2cc05d8107963f3dbe9e204
|
/Image_recognition/utils/model_dict.py
|
c4a7e7c37d00cc8aae670e50e091d41bc1d6d1b9
|
[] |
no_license
|
LIMr1209/machine-learn
|
9aac2b51a928a864ac3cf82368b3fe9694644cb2
|
56453dce6ae8ba5e7298dab99d5e6a6d114e4860
|
refs/heads/master
| 2022-07-12T14:17:07.536535
| 2021-12-20T06:57:54
| 2021-12-20T06:57:54
| 163,064,915
| 5
| 2
| null | 2020-08-31T03:09:10
| 2018-12-25T08:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
import torch as t
def save_oplaus():
state_dict = {}
checkpoint = t.load('../checkpoint/EfficientNet.pth.tar')
state_dict['state_dict'] = checkpoint['state_dict']
t.save(state_dict, '/opt/checkpoint/EfficientNet.pth')
save_oplaus()
|
[
"aaa1058169464@126.com"
] |
aaa1058169464@126.com
|
c5023ecc348a5f6d754ae717b924597515d9e466
|
c24fa89450cccb48fcd481c3cfa475ee0e412e09
|
/PythonTools/accToMatAcc.py
|
9b41f081bd69b214a00fd824ead8d6cca2702378
|
[] |
no_license
|
PhoenixYanrongLi/CareEcoSystem_ServerCodeNew
|
e95d1c552cdcc70aac09482dfda63e253e01fcb0
|
b627484694863c425483a04391eedc2ec2ec1098
|
refs/heads/master
| 2021-01-01T04:34:51.858543
| 2016-04-14T17:57:30
| 2016-04-14T17:57:30
| 56,258,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
__author__ = 'Brad'
import csv
import datetime
import scipy.io
import numpy
def writeFile(filename):
writeDict={}
f=open(filename,'r')
timeAr=[]
accAr=[]
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=" ")
for time, x, y, z, azimuth, pitch, roll in reader:
formatStr="%y-%m-%dT%H:%M:%S.%f"
timeC=datetime.datetime.strptime(time,formatStr)
epoch = datetime.datetime.utcfromtimestamp(0)
delta=timeC-epoch
delta=delta.total_seconds()*1000
if len(timeAr)==0:
timeAr=[delta]
accAr=numpy.array([float(x),float(y),float(z)])
else:
timeAr=numpy.vstack((timeAr,delta))
accAr=numpy.vstack([accAr,[float(x),float(y),float(z)]])
writeDict={'AccData':accAr,'UnixTime_ms':timeAr}
print accAr
scipy.io.savemat(filename+'_AccelerometerData.mat',writeDict)
filename='99000213875160_20141113-193740_MM_ACC_1103.txt'
writeFile(filename)
|
[
"phl_416cat@sjtu.edu.cn"
] |
phl_416cat@sjtu.edu.cn
|
d81d21379e5af810c27b2b1d3e4c8f32d8faec6d
|
9d454ae0d5dd1d7e96e904ced80ca502019bb659
|
/198_rob.py
|
9c17186c04b2ad05f74577de361aeef0ece28d64
|
[] |
no_license
|
zzz686970/leetcode-2018
|
dad2c3db3b6360662a90ea709e58d7facec5c797
|
16e4343922041929bc3021e152093425066620bb
|
refs/heads/master
| 2021-08-18T08:11:10.153394
| 2021-07-22T15:58:52
| 2021-07-22T15:58:52
| 135,581,395
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
def rob(nums):
## too naive
# return max(sum(nums[0::2]), sum(nums[1::2]), sum(nums[0::3], sum(nums[1::3])))
l =r=0
for n in nums:
l, r = r, max(n+l, r)
return r
assert 4 == rob([2,1,1,2])
|
[
"1564256031@qq.com"
] |
1564256031@qq.com
|
1eb4ea943bb10ccda036a8f2bbafcef91c5855ed
|
efd6a277c2d5bffdfba6ccb4d5efd555e652d29e
|
/chap2/2.12.py
|
f427f3f0b66eeba917d8798655d76ae107eb82bf
|
[] |
no_license
|
CavalcanteLucas/cookbook
|
dd57583c8b5271879bb086783c12795d1c0a7ee8
|
09ac71e291571e3add8d23d79b1684b356702a40
|
refs/heads/master
| 2020-03-25T03:09:39.608599
| 2019-09-13T04:43:23
| 2019-09-13T04:43:23
| 143,325,952
| 0
| 0
| null | 2020-09-25T05:46:30
| 2018-08-02T17:32:08
|
Python
|
UTF-8
|
Python
| false
| false
| 885
|
py
|
# Sanitizing and Cleaning Up Text
s = 'pýtĥöñ\fis\tawesome\r\n'
s
remap = {
ord('\t') : ' ',
ord('\f') : ' ',
ord('\r') : None # Deleted
}
a = s.translate(remap)
a
import unicodedata
import sys
sys.maxunicode
cmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode) if unicodedata.combining(chr(c)))
b = unicodedata.normalize('NFD', a)
b
b.translate(cmb_chrs)
digitmap = { c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
len(digitmap)
# Arabic digits
x = '\u0661\u0662\u0663'
x
x.translate(digitmap)
a
b = unicodedata.normalize('NFD', a)
b
b.encode('ascii', 'ignore').decode('ascii')
# Discussion
# on text processing; the simpler, the faster.
def clean_space(s):
s = s.replace('\r', '')
s = s.replace('\t', ' ')
s = s.replace('\f', ' ')
return s
|
[
"thesupervisar@gmail.com"
] |
thesupervisar@gmail.com
|
435ce25fccf4bd20dbf5ae423dd02ada727c70e2
|
b07ea8c5a075e3c7e7a0f9aca6bec73a22cdb7df
|
/PART 1/ch03/10_is_anagram_using_ord.py
|
469076cacf26d0facbbfc5e8a9ede66cabd8f11c
|
[] |
no_license
|
jaeehooon/data_structure_and_algorithm_python
|
bb721bdbcff1804c04b944b4a01ed6be93124462
|
6d07438bfaaa1ec5283cb350ef4904eb94826c48
|
refs/heads/master
| 2023-02-21T10:08:20.765399
| 2021-01-22T13:37:11
| 2021-01-22T13:37:11
| 323,367,191
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# 3.4.2 애너그램 (2)
"""
ord() 함수는 인수가 유니코드 객체일 때, 문자의 유니코드를 나타내는 정수를 반환
인수가 8바이트 문자열인 경우 바이트 값을 반환함
문자열에서 모든 문자의 ord() 함수 결과를 더했을 때 그 결과가 같으면 두 문자열은 애너그램
"""
import string
def hash_func(astring):
"""
:param astring:
:return:
"""
s = 0
for one in astring:
if one in string.whitespace:
continue
s += ord(one)
return s
def find_anagram_hash_function(word1, word2):
return hash_func(word1) == hash_func(word2)
def test_find_anagram_hash_function():
word1 = "buffy"
word2 = "bffyu"
word3 = "bffya"
assert(find_anagram_hash_function(word1, word2) is True)
assert(find_anagram_hash_function(word1, word3) is False)
print("테스트 통과!")
if __name__ == '__main__':
test_find_anagram_hash_function()
|
[
"qlenfr0922@gmail.com"
] |
qlenfr0922@gmail.com
|
abec0a4a92dc068a00f9f27d0c21709406b6641f
|
e47b87905872d92458512b0eda435f53f90b19cf
|
/movies/migrations/0003_alter_movie_author.py
|
f15bf19bee735f007ed42db65755c2622c2f495c
|
[] |
no_license
|
ephremworkeye/drf_demo
|
e08e2f2049b427497bad815e51247e27784b1f29
|
9f5ce84edd7841fd0456107d99485d2af44e1c49
|
refs/heads/master
| 2023-07-31T16:24:12.400218
| 2021-09-25T05:56:05
| 2021-09-25T05:56:05
| 409,107,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# Generated by Django 3.2.7 on 2021-09-23 00:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_alter_movie_author'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"ephremworkeye@gmail.com"
] |
ephremworkeye@gmail.com
|
9ff318e046b87d76579e6d5b06d8f22e909203d4
|
1b596568ef6ced06173e60c71f01141682329ac4
|
/version-example
|
0c6ba046e0d538c2d3d1a402526ebec6ad7fb3c5
|
[] |
no_license
|
pfuntner/gists
|
4eb1847ef22d3d9cb1e17e870a8434c376c4dbfc
|
3322c922bd43480b4cc2759b1c31e5c76668c7ef
|
refs/heads/master
| 2020-04-17T08:40:29.444378
| 2019-01-18T16:23:49
| 2019-01-18T16:23:49
| 166,421,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,957
|
#! /usr/bin/env python
import os
import re
import sys
import logging
import argparse
import datetime
import subprocess
def run(cmd):
(rc, stdout, stderr) = (None, '', '')
if isinstance(cmd, basestring):
cmd = cmd.split()
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
log.debug('Ignoring `{e!s}` from {cmd}'.format(**locals()))
else:
(stdout, stderr) = p.communicate()
rc = p.wait()
log.debug('{cmd}: {rc}, {stdout!r}, {stderr!r}'.format(**locals()))
if (rc == 0) and (not stdout):
rc = None
return (rc, stdout, stderr)
def get_version():
git_used = False
ret = '?'
dir = os.path.dirname(sys.argv[0])
base = os.path.basename(sys.argv[0])
cwd = os.getcwd()
try:
os.chdir(dir)
except:
pass
else:
(rc, stdout, stderr) = run(['git', 'log', '-1', base])
"""
commit {SHA1}
Author: {FIRST_NAME} {LAST_NAME} <{EMAIL_ADDRESS}>
Date: Wed Jan 16 09:32:03 2019 -0500
.
.
.
"""
match = re.search(r'^commit\s+(\S+).*\nDate:\s+(([A-Z][a-z]{2} ){2}[ 0123]\d (\d{2}:){2}\d{2} \d{4})', stdout, re.DOTALL)
log.debug('`git log -1` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
commit = match.group(1)[:6]
timestamp = datetime.datetime.strptime(match.group(2), '%a %b %d %H:%M:%S %Y')
log.debug('timestamp: {timestamp!s}'.format(**locals()))
(rc, stdout, stderr) = run('git branch')
match = re.search(r'\*\s(\S+)', stdout, re.DOTALL)
log.debug('`git branch` search groups: {groups}'.format(groups=match.groups() if match else None))
if match:
branch = match.group(1)
(rc, stdout, stderr) = run('git remote -v')
"""
origin https://github.com/pfuntner/gists.git (fetch)
"""
hits = list(re.finditer(r'(\S+)\s(https?://\S+)\s\(fetch\)', stdout))
log.debug('`git remote -v` hits: {hits}'.format(hits=[hit.groups() for hit in hits]))
if hits:
hits = ['{name}:{url}'.format(name=hit.group(1), url=hit.group(2)) for hit in hits]
ret = '{commit}, {branch}, {timestamp!s}, {hits}'.format(**locals())
git_used = True
os.chdir(cwd)
if not git_used:
ret = str(datetime.datetime.fromtimestamp(os.path.getmtime(sys.argv[0])))
return ret
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.WARNING)
parser = argparse.ArgumentParser(description='Example of doing a nifty --version')
parser.add_argument('-v', '--verbose', dest='verbose', action='count', help='Print more messages')
parser.add_argument('--version', action='version', version=get_version(), help='See wonderful version information')
args = parser.parse_args()
log.setLevel(logging.WARNING - (args.verbose or 0) * 10)
# print repr(get_version())
|
[
"jpfuntne@cisco.com"
] |
jpfuntne@cisco.com
|
|
d944222d39aa2c0f4eb6c53856e08e6f051fae7a
|
df541a802b2dfa89d3aab14af627358dc7c76e6e
|
/接口自动化/Frame5/httpUnittest.py
|
21a2012f8446211b06c3e9b5b336e248861a73a5
|
[] |
no_license
|
gupan2018/PyAutomation
|
de966aff91f750c7207c9d3f3dfb488698492342
|
230aebe3eca5799c621673afb647d35a175c74f1
|
refs/heads/master
| 2021-09-07T19:44:20.710574
| 2017-12-22T15:58:23
| 2017-12-22T15:58:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
__author__ = 'Administrator'
import unittest
import mysql.connector
class Http_Unittest(unittest.TestCase):
def __init__(self, test_case_id, test_method, http_method, http, test_url, test_data, cousor):
super(Http_Unittest,self).__init__(test_method)
self.test_case_id = test_case_id
self.test_method = test_method
self.http = http
self.test_url = test_url
self.test_data = test_data
self.http_method = http_method
self.mobilephone = test_data["mobilephone"]
self.regname = test_data["regname"]
self.cursor = cousor
def test_register(self):
if self.http_method == "GET":
response = self.http.get_req(self.test_url, self.test_data)
elif self.http_method == "POST":
response = self.http.post_req(self.test_url, self.test_data)
else:
print("error in class Http_Unittest")
try:
#将执行结果存到数据库中
sql_insert = 'INSERT INTO test_result ' \
'(case_id, http_method, request_name, request_url, mobilephone, regname, test_method, test_desc, status, code, msg) ' \
'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
insert_data = (self.test_case_id, self.http_method ,'register',self.test_url, self.mobilephone, self.regname, self.test_method, "测试注册接口",response["status"], response["code"], response["msg"])
self.cursor.execute(sql_insert, insert_data)
self.cursor.execute("commit")
except mysql.connector.Error as e:
print(e)
self.cursor.execute("rollback")
try:
self.assertEqual(response["code"], "10001", "register请求失败")
except AssertionError as e:
print(str(e))
#pass
#下面是测试代码
'''
path_http = "http.conf"
http = HttpRequest(path_http)
test_Demo = Http_Unittest("test_register", "GET", http)
test_Demo.test_register()'''
|
[
"610077670@qq.com"
] |
610077670@qq.com
|
518bc4aa64f4e5aac711a4ed163b4a5f8f2a09f8
|
0cf269af0e6f8266c26b3bc68e57368e8c3d9edb
|
/src/outpost/django/thesis/migrations/0002_discipline_doctoralschool_thesis.py
|
4dd83c63267a93ce7139bdb7ee8f8290691ea608
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
medunigraz/outpost.django.thesis
|
c1518aa516d2177b0cacf381432bcdde41f2b5e1
|
1f0dbaa6edb6d91216d9bd97c79ee8b3bbc153cc
|
refs/heads/master
| 2021-09-25T16:47:59.469921
| 2020-08-04T19:16:07
| 2020-08-04T19:16:07
| 184,580,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-08 09:47
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("thesis", "0001_initial")]
operations = [
migrations.CreateModel(
name="Discipline",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
("number", models.CharField(blank=True, max_length=256, null=True)),
("thesistype", models.CharField(blank=True, max_length=256, null=True)),
],
options={"db_table": "thesis_discipline", "managed": False},
),
migrations.CreateModel(
name="DoctoralSchool",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(blank=True, max_length=256, null=True)),
(
"emails",
django.contrib.postgres.fields.ArrayField(
base_field=models.EmailField(
blank=True, max_length=254, null=True
),
size=None,
),
),
],
options={"db_table": "thesis_doctoralschool", "managed": False},
),
migrations.CreateModel(
name="Thesis",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("topic", models.CharField(blank=True, max_length=256, null=True)),
("created", models.DateTimeField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("prerequisites", models.TextField(blank=True, null=True)),
("processstart", models.DateTimeField(blank=True, null=True)),
("goals", models.TextField(blank=True, null=True)),
("hypothesis", models.TextField(blank=True, null=True)),
("methods", models.TextField(blank=True, null=True)),
("schedule", models.TextField(blank=True, null=True)),
(
"milestones",
django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(blank=True, null=True), size=None
),
),
],
options={
"db_table": "thesis_thesis",
"permissions": (("view_thesis", "View thesis"),),
"managed": False,
},
),
]
|
[
"michael@fladi.at"
] |
michael@fladi.at
|
0b697bf8ee814996d74fb061231aeabb70a184c9
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/surface/compute/ssl_policies/describe.py
|
0546d3f6604bd3a747040e4520dae448783faf92
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe SSL policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.ssl_policies import ssl_policies_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.ssl_policies import flags
_SSL_POLICY_ARG = flags.GetSslPolicyArgument()
class Describe(base.DescribeCommand):
"""Describe a Google Compute Engine ssl policy.
*{command}* is used to display all data associated with a Google Compute
Engine SSL policy in a project.
An SSL policy specifies the server-side support for SSL features. An SSL
policy can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects
connections between clients and the HTTPS or SSL proxy load balancer. SSL
policies do not affect the connection between the load balancers and the
backends.
"""
@staticmethod
def Args(parser):
_SSL_POLICY_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
"""Issues the request to describe a SSL policy."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = ssl_policies_utils.SslPolicyHelper(holder)
ref = _SSL_POLICY_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
return helper.Describe(ref)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
065afde0ad990602c145f176bbbaf950115db7e7
|
4d03e487b60afc85d1f3372fe43f2a7b081f0e41
|
/file_list/thumbnail_cache.py
|
88b1134b24f906a6286c8193055960e31d5d945b
|
[] |
no_license
|
hal1932/Explorer
|
d051bd0bb09b0952bad35deeeec0d4ad00947666
|
869ce3323aee499048f98f33910fc05126947942
|
refs/heads/master
| 2021-01-19T13:27:22.485124
| 2017-04-18T14:03:17
| 2017-04-18T14:03:17
| 82,392,096
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
# encoding: utf-8
from lib import *
import cv2
import os
import threading
import Queue
class ThumbnailCache(QObject):
load_item_async = Signal()
def __init__(self, enable_load_async=False):
super(ThumbnailCache, self).__init__()
self.__items_dic = {}
if enable_load_async:
self.__load_queue = Queue.Queue()
self.__items_lock = threading.Lock()
self.__load_thread = threading.Thread(target=self.__load_async_impl)
self.__load_thread.daemon = True
self.__load_thread.start()
self.__enable_async = enable_load_async
def get_cached_pixmap(self, path):
if self.__enable_async:
with self.__items_lock:
if path not in self.__items_dic:
return None
image = self.__items_dic[path]
if isinstance(image, QPixmap):
return image
height, width, dim = image.shape
image = QImage(
image.data,
width, height, dim * width,
QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
with self.__items_lock:
self.__items_dic[path] = pixmap
return pixmap
else:
if path not in self.__items_dic:
return None
return self.__items_dic[path]
def load(self, path, size):
if self.__enable_async:
raise ValueError('load_sync is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
pixmap = QPixmap(path)
pixmap_size = qt.fitting_scale_down(size, pixmap.size())
pixmap = pixmap.scaled(pixmap_size)
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
self.__items_dic[path] = pixmap
return pixmap
def load_async(self, path, size):
if not self.__enable_async:
raise ValueError('load_async is not enabled')
if os.path.splitext(path)[1].lower() in ThumbnailCache.__image_exts:
self.__load_queue.put((path, size))
else:
icon = qt.get_file_icon(path)
size = icon.actualSize(size)
pixmap = icon.pixmap(size)
with self.__items_lock:
self.__items_dic[path] = pixmap
def __load_async_impl(self):
while True:
path, size = self.__load_queue.get()
image = cv2.imread(path)
height, width = image.shape[:2]
if width != size.width() or height != size.height():
size = qt.fitting_scale_down(size, QSize(width, height))
image = cv2.resize(image, (size.width(), size.height()))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with self.__items_lock:
self.__items_dic[path] = image
self.__load_queue.task_done()
self.load_item_async.emit()
print(path)
__initialized = False
__directory_thumbnail = None
__image_exts = (u'.png', u'.jpg', u'.jpeg', u'.gif', u'.bmp')
|
[
"yu.arai.19@gmail.com"
] |
yu.arai.19@gmail.com
|
a666762fd34411a901f443d2ec06dd10658e150c
|
e787a46d354e3bf9666cb0d8b0c7d5f8ed0a8169
|
/ccdproc/tests/make_mef.py
|
a871eaab6869b53192f486e57ffb6a99680fc3eb
|
[] |
permissive
|
astropy/ccdproc
|
25270fec41e64e635f7f22bcf340b2dee9ef88ac
|
5af6ee5eee16a99591dd9fcbe81735e70c1cc681
|
refs/heads/main
| 2023-09-01T11:48:06.969582
| 2023-06-08T18:01:43
| 2023-06-08T18:01:43
| 13,384,007
| 81
| 88
|
BSD-3-Clause
| 2023-06-08T18:01:45
| 2013-10-07T13:05:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from astropy.io import fits
from astropy.nddata import CCDData
from ccdproc import flat_correct
def make_sample_mef(science_name, flat_name, size=10, dtype='float32'):
"""
Make a multi-extension FITS image with random data
and a MEF flat.
Parameters
----------
science_name : str
Name of the science image created by this function.
flat_name : str
Name of the flat image created by this function.
size : int, optional
Size of each dimension of the image; images created are square.
dtype : str or numpy dtype, optional
dtype of the generated images.
"""
with NumpyRNGContext(1234):
number_of_image_extensions = 3
science_image = [fits.PrimaryHDU()]
flat_image = [fits.PrimaryHDU()]
for _ in range(number_of_image_extensions):
# Simulate a cloudy night, average pixel
# value of 100 with a read_noise of 1 electron.
data = np.random.normal(100., 1.0, [size, size]).astype(dtype)
hdu = fits.ImageHDU(data=data)
# Make a header that is at least somewhat realistic
hdu.header['unit'] = 'electron'
hdu.header['object'] = 'clouds'
hdu.header['exptime'] = 30.0
hdu.header['date-obs'] = '1928-07-23T21:03:27'
hdu.header['filter'] = 'B'
hdu.header['imagetyp'] = 'LIGHT'
science_image.append(hdu)
# Make a perfect flat
flat = np.ones_like(data, dtype=dtype)
flat_hdu = fits.ImageHDU(data=flat)
flat_hdu.header['unit'] = 'electron'
flat_hdu.header['filter'] = 'B'
flat_hdu.header['imagetyp'] = 'FLAT'
flat_hdu.header['date-obs'] = '1928-07-23T21:03:27'
flat_image.append(flat_hdu)
science_image = fits.HDUList(science_image)
science_image.writeto(science_name)
flat_image = fits.HDUList(flat_image)
flat_image.writeto(flat_name)
if __name__ == '__main__':
make_sample_mef('data/science-mef.fits', 'data/flat-mef.fits')
|
[
"mattwcraig@gmail.com"
] |
mattwcraig@gmail.com
|
c81f32fd9551171eca3f5765147895606e3573ff
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2759/60610/245707.py
|
0d1aca295915e41bef3bdf5a5262c94f0f29f52f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
num=input();
for i in range(num):
string=raw_input();
numList=string.split();
count=0
for j in range(int(numList[0]),int(numList[1])+1):
a=int(numList[2]);
b=int(numList[3]);
if (j%a==0) | (j%b==0):
count+=1;
print(count);
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
842240a63093b1ea755d9ef1824ad3d6792f4177
|
9e658976a6fdfbe031fc3452c69243dc66359f6a
|
/pythonExercise/four.py
|
b4fadd64058df0da705a77f23dd57f2e54e2cff1
|
[] |
no_license
|
zyyxydwl/Python-Learning
|
b2ed0f80121b284e5fb65cc212ccb84a0eb14cb6
|
6a5d36aa8805da647229fa747fa96452638d830e
|
refs/heads/master
| 2018-10-04T23:42:21.076668
| 2018-06-08T03:19:33
| 2018-06-08T03:19:33
| 107,348,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time :2017/12/2 9:57
#@Author :zhouyuyao
#@File :four.py
# 题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
# 程序源代码:
# 实例(题目:输入某年某月某日,判断这一天是这一年的第几天?
# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:
year = int(input('year:\n'))
month = int(input('month:\n'))
day = int(input('day:\n'))
months = (0,31,59,90,120,151,181,212,243,273,304,334)
if 0 < month <= 12:
sum = months[month - 1]
else:
print('data error')
sum += day
leap = 0
if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):
leap = 1
if (leap == 1) and (month > 2):
sum += 1
print('it is the %dth day.' % sum)
|
[
"1330101907@qq.com"
] |
1330101907@qq.com
|
5215a084044fb39cce1d96120767a0cf0684d3fe
|
72fd9d49d89a9fc23ca896154fa54cba836c41ca
|
/tasks.py
|
0ea3f55768a7233a886cb6707e616c923561b8c6
|
[
"MIT"
] |
permissive
|
envobe/pydash
|
15066046fbc07458c29b6b33b1489aaadda5d074
|
6c0f778f6a2535397706aab68636485702ff3565
|
refs/heads/master
| 2023-01-05T18:14:09.923169
| 2020-10-29T02:16:34
| 2020-10-29T02:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,593
|
py
|
"""
This module provides the CLI interface for invoke tasks.
All tasks can be executed from this file's directory using:
$ inv <task>
Where <task> is a function defined below with the @task decorator.
"""
from __future__ import print_function
from functools import partial
from invoke import Exit, UnexpectedExit, run as _run, task
PACKAGE_SOURCE = "src/pydash"
TEST_TARGETS = "{} tests".format(PACKAGE_SOURCE)
LINT_TARGETS = "{} tasks.py".format(PACKAGE_SOURCE)
EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit)
# Set pyt=True to enable colored output when available.
run = partial(_run, pty=True)
@task
def black(ctx, quiet=False):
"""Autoformat code using black."""
run("black {}".format(LINT_TARGETS), hide=quiet)
@task
def isort(ctx, quiet=False):
"""Autoformat Python imports."""
run("isort {}".format(LINT_TARGETS), hide=quiet)
@task
def docformatter(ctx):
"""Autoformat docstrings using docformatter."""
run(
"docformatter -r {} "
"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100".format(
LINT_TARGETS
)
)
@task
def fmt(ctx):
"""Autoformat code and docstrings."""
print("Running docformatter")
docformatter(ctx)
print("Running isort")
isort(ctx, quiet=True)
print("Running black")
black(ctx, quiet=True)
@task
def flake8(ctx):
"""Check code for PEP8 violations using flake8."""
run("flake8 --format=pylint {}".format(LINT_TARGETS))
@task
def pylint(ctx):
"""Check code for static errors using pylint."""
run("pylint {}".format(LINT_TARGETS))
@task
def lint(ctx):
"""Run linters."""
linters = {"flake8": flake8, "pylint": pylint}
failures = []
for name, linter in linters.items():
print("Running {}".format(name))
try:
linter(ctx)
except EXIT_EXCEPTIONS:
failures.append(name)
result = "FAILED"
else:
result = "PASSED"
print("{}\n".format(result))
if failures:
failed = ", ".join(failures)
raise Exit("ERROR: Linters that failed: {}".format(failed))
@task(help={"args": "Override default pytest arguments"})
def unit(ctx, args="--cov={} {}".format(PACKAGE_SOURCE, TEST_TARGETS)):
"""Run unit tests using pytest."""
run("pytest {}".format(args))
@task
def test(ctx):
"""Run linters and tests."""
print("Building package")
build(ctx)
print("Building docs")
docs(ctx)
print("Running unit tests")
unit(ctx)
@task
def docs(ctx, serve=False, bind="127.0.0.1", port=8000):
"""Build docs."""
run("rm -rf docs/_build")
run("sphinx-build -q -W -b html docs docs/_build/html")
if serve:
print(
"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...".format(
bind=bind, port=port
)
)
run(
"python -m http.server -b {bind} --directory docs/_build/html {port}".format(
bind=bind, port=port
),
hide=True,
)
@task
def build(ctx):
"""Build Python package."""
run("rm -rf dist build docs/_build")
run("python setup.py -q sdist bdist_wheel")
@task
def clean(ctx):
"""Remove temporary files related to development."""
run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete")
run("rm -rf .tox .coverage .cache .pytest_cache **/.egg* **/*.egg* dist build")
@task(pre=[build])
def release(ctx):
"""Release Python package."""
run("twine upload dist/*")
|
[
"dgilland@gmail.com"
] |
dgilland@gmail.com
|
979286ffb46a102ab49df74f8383e498329ab818
|
e5eec1428da1d24d3e9b86f5723c51cd2ca636cd
|
/dynamic_programming/백준/가장큰정사각형_백준.py
|
4db92f7d4eee1a5199ea97cc10a52e85fa483fca
|
[] |
no_license
|
jamwomsoo/Algorithm_prac
|
3c36c381f59277721517d331a8f1640399d80c1d
|
8393f3cc2f950214c47f3cf0b2c1271791f115d0
|
refs/heads/master
| 2023-06-09T06:49:14.739255
| 2021-06-18T06:41:01
| 2021-06-18T06:41:01
| 325,227,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
# 백준 DP 난이도 골드 5
# 전형적인 dp문제
# dp[i][j]는 위, 왼쪽, 대각선 위 중 작은 것중에 하나를 자신과 더한 값
# -> 정사각형이라면 변의 길이가 모두 같아야하므로
# 1 1 1 1 1 1
# 1 1 1 -> 1 2 2
# 1 1 1 1 2 3
n, m = map(int, input().split())
arr = []
dp = [[0]*(m+1) for _ in range(n+1)]
for i in range(n):
arr.append(list(map(int, input())))
for j in range(m):
dp[i+1][j+1] = arr[i][j]
for i in range(n+1):
for j in range(m+1):
if dp[i][j] != 0:
dp[i][j] += min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
res = 0
for row in dp:
res = max(res, max(row))
print(res**2)
|
[
"41579282+jamwomsoo@users.noreply.github.com"
] |
41579282+jamwomsoo@users.noreply.github.com
|
c5c561e0a70c1027a7c149cd7ffb4e4f5bb38d0f
|
9a9f31265c65bec0060271cd337580e7b4f3a7e9
|
/project/pokupka.py
|
81819a11dce3c4e08f65498d21c98238d72d5f98
|
[] |
no_license
|
AnatolyDomrachev/1kurs
|
efaabde4852172b61d3584237611fe19b9faa462
|
84ed0dceb670ec64c958bf1901636a02baf8f533
|
refs/heads/master
| 2023-02-19T21:42:53.286190
| 2021-01-19T07:41:15
| 2021-01-19T07:41:15
| 292,637,199
| 0
| 1
| null | 2020-09-16T02:29:14
| 2020-09-03T17:32:29
|
Python
|
UTF-8
|
Python
| false
| false
| 586
|
py
|
import magazin
import etc
magazin = magazin.Magazin('magazin.conf')
korzina = []
net_v_magazine = []
def pokupka(spisok):
for slovar in spisok:
est_v_mag = 'No'
for tovar in magazin.tovary:
if slovar['name'] == tovar['name']:
kupil = etc.beru(slovar, tovar)
korzina.append(kupil)
est_v_mag = 'Yes'
if est_v_mag == 'No':
print(slovar," нет в магазине")
print("Купили: ",korzina)
print()
print("Осталось: ",magazin.tovary)
|
[
"you@example.com"
] |
you@example.com
|
070d2ffacad8dbdcc16c98b9921ba3c9c2b5c0ca
|
3a21eac318260972a0f50aa6517bebd62d9634f3
|
/minimarket/settings.py
|
a6e40467c5437d3caa279c03850dc038c10d6db9
|
[] |
no_license
|
alviandk/ahp
|
adaf735c2ad14cfffee41eca37df5ff2452e8812
|
60764c12bb30cd134bbce53d62cda835503191d2
|
refs/heads/master
| 2016-09-05T19:16:02.907235
| 2015-03-27T09:42:43
| 2015-03-27T09:42:43
| 32,963,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
"""
Django settings for minimarket project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z+05z^%!e=1p&*2uyz^_tel^5($l##z8f80t^@=60%*4z$#4m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ahp_aps',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'minimarket.urls'
WSGI_APPLICATION = 'minimarket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ahp',
'USER': 'root',
'PASSWORD':'',
'HOST':'localhost'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
[
"alviandk@gmail.com"
] |
alviandk@gmail.com
|
3d5664d5e503269e5557e6b98623f3cb0c80edbc
|
e211000d0d843fd944266892f49a7649c7e8918d
|
/abc/065/python/code_c.py
|
fc52911095e41bda42258728f4b59ac2a5a9d1b0
|
[] |
no_license
|
habroptilus/atcoder-src
|
63dfa16c6d4b80d1e36618377d3201888183281f
|
4cd54202037996b3f4a4442b1bd19d42d8a46db1
|
refs/heads/master
| 2020-04-26T07:14:38.322156
| 2019-06-08T14:44:26
| 2019-06-08T14:44:26
| 173,388,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import math
N, M = map(int, input().split())
if abs(N - M) == 1:
print(math.factorial(N) * math.factorial(M) % (10**9 + 7))
elif N == M:
print(math.factorial(N) * math.factorial(M) * 2 % (10**9 + 7))
else:
print(0)
|
[
"x7deed53bc9k8@softbank.ne.jp"
] |
x7deed53bc9k8@softbank.ne.jp
|
84e8e2a34adc392dbabc3541f6defc2c829bdb23
|
a40f749cb8e876f49890ab8fbbbbf2c07a0dd210
|
/examples/ad_manager/v201902/adjustment_service/update_traffic_adjustments.py
|
60a54bd299660da60a8ece16a64cfb2643030b0a
|
[
"Apache-2.0"
] |
permissive
|
ale180192/googleads-python-lib
|
77afff4c352ac3f342fc8b3922ec08873d6da5be
|
783a2d40a49956fb16ed73280708f6f9e322aa09
|
refs/heads/master
| 2020-08-10T15:20:06.051974
| 2019-10-11T07:06:58
| 2019-10-11T07:06:58
| 214,367,074
| 0
| 0
|
Apache-2.0
| 2019-10-11T07:04:21
| 2019-10-11T07:04:20
| null |
UTF-8
|
Python
| false
| false
| 3,009
|
py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a historical adjustment of 110% for New Years Day traffic.
"""
from __future__ import print_function
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
ADJUSTMENT_ID = 'INSERT_ADJUSTMENT_ID_HERE'
def main(client, adjustment_id):
# Initialize the adjustment service.
adjustment_service = client.GetService('AdjustmentService', version='v201902')
# Create a statement to select a single traffic forecast adjustment by id.
statement = (
ad_manager.StatementBuilder(
version='v201902').Where('id = :id').WithBindVariable(
'id', adjustment_id))
# Get the forecast traffic adjustment.
response = adjustment_service.getTrafficAdjustmentsByStatement(
statement.ToStatement())
# Create a new historical adjustment segment for New Year's Day.
this_new_years = datetime.date(datetime.date.today().year, 12, 31)
next_new_years = datetime.date(datetime.date.today().year + 1, 12, 31)
new_years_segment = {
'basisType': 'HISTORICAL',
'historicalAdjustment': {
'targetDateRange': {
'startDate': next_new_years,
'endDate': next_new_years
},
'referenceDateRange': {
'startDate': this_new_years,
'endDate': this_new_years
},
'milliPercentMultiplier': 110000
}
}
if 'results' in response and len(response['results']):
# Update each local traffic adjustment.
updated_adjustments = []
for adjustment in response['results']:
adjustment['forecastAdjustmentSegments'].append(new_years_segment)
updated_adjustments.append(adjustment)
# Update traffic adjustments remotely.
adjustments = adjustment_service.updateTrafficAdjustments(
updated_adjustments)
# Display the results.
if adjustments:
for adjustment in adjustments:
print('Traffic forecast adjustment with id %d and %d segments was '
'created.' % (adjustment['id'],
len(adjustment['forecastAdjustmentSegments'])))
else:
print('No traffic adjustments were updated.')
else:
print('No traffic adjustments found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADJUSTMENT_ID)
|
[
"davidwihl@users.noreply.github.com"
] |
davidwihl@users.noreply.github.com
|
2ffdda96a873aba49978b503a61bf9f7d102c380
|
aabe7008e0eb77617f1a76cddb98e4b17fd5ce27
|
/nni/algorithms/compression/v2/pytorch/base/pruner.py
|
730b9c749493d56b5adb0b6fab1fccd139408f77
|
[
"MIT"
] |
permissive
|
penghouwen/nni
|
a09a374a81be46fe246c425275585d5fe79404af
|
2e6a2fd2df0d5700cb028b25156bb535a3fc227a
|
refs/heads/master
| 2021-12-21T14:02:32.228973
| 2021-12-13T16:54:39
| 2021-12-13T16:54:39
| 435,926,123
| 1
| 0
|
MIT
| 2021-12-07T15:09:36
| 2021-12-07T15:09:35
| null |
UTF-8
|
Python
| false
| false
| 6,529
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
from .compressor import Compressor, LayerInfo
_logger = logging.getLogger(__name__)
__all__ = ['Pruner']
class PrunerModuleWrapper(Module):
def __init__(self, module: Module, module_name: str, config: Dict, pruner: Compressor):
"""
Wrap a module to enable data parallel, forward method customization and buffer registeration.
Parameters
----------
module
The module user wants to compress.
config
The configurations that users specify for compression.
module_name
The name of the module to compress, wrapper module shares same name.
pruner
The pruner used to calculate mask.
"""
super().__init__()
# origin layer information
self.module = module
self.name = module_name
# config and pruner
self.config = config
self.pruner = pruner
# register buffer for mask
self.register_buffer("weight_mask", torch.ones(self.module.weight.shape))
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.register_buffer("bias_mask", torch.ones(self.module.bias.shape))
else:
self.register_buffer("bias_mask", None)
def forward(self, *inputs):
# apply mask to weight, bias
self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)
if hasattr(self.module, 'bias') and self.module.bias is not None:
self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)
return self.module(*inputs)
class Pruner(Compressor):
"""
The abstract class for pruning algorithm. Inherit this class and implement the `_reset_tools` to customize a pruner.
"""
def reset(self, model: Optional[Module] = None, config_list: Optional[List[Dict]] = None):
super().reset(model=model, config_list=config_list)
def _wrap_modules(self, layer: LayerInfo, config: Dict):
"""
Create a wrapper module to replace the original one.
Parameters
----------
layer
The layer to instrument the mask.
config
The configuration for generating the mask.
"""
_logger.debug("Module detected to compress : %s.", layer.name)
wrapper = PrunerModuleWrapper(layer.module, layer.name, config, self)
assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name
# move newly registered buffers to the same device of weight
wrapper.to(layer.module.weight.device)
return wrapper
def load_masks(self, masks: Dict[str, Dict[str, Tensor]]):
"""
Load an exist masks on the wrapper. You can train the model with an exist masks after load the masks.
Parameters
----------
masks
The masks dict with format {'op_name': {'weight': mask, 'bias': mask}}.
"""
wrappers = self.get_modules_wrapper()
for name, layer_mask in masks.items():
assert name in wrappers, '{} is not in wrappers of this pruner, can not apply the mask.'.format(name)
if layer_mask.get('weight') is not None:
assert hasattr(wrappers[name], 'weight_mask'), 'There is no attribute weight_mask in wrapper.'
setattr(wrappers[name], 'weight_mask', layer_mask.get('weight'))
if layer_mask.get('bias') is not None:
assert hasattr(wrappers[name], 'bias_mask'), 'There is no attribute bias_mask in wrapper.'
setattr(wrappers[name], 'bias_mask', layer_mask.get('bias'))
def compress(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]]]:
"""
Returns
-------
Tuple[Module, Dict]
Return the wrapped model and mask.
"""
return self.bound_model, {}
# NOTE: need refactor dim with supporting list
def show_pruned_weights(self, dim: int = 0):
"""
Log the simulated prune sparsity.
Parameters
----------
dim
The pruned dim.
"""
for _, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
mask_size = weight_mask.size()
if len(mask_size) == 1:
index = torch.nonzero(weight_mask.abs() != 0, as_tuple=False).tolist()
else:
sum_idx = list(range(len(mask_size)))
sum_idx.remove(dim)
index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0, as_tuple=False).tolist()
_logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}')
def export_model(self, model_path: str, mask_path: Optional[str] = None):
"""
Export pruned model weights, masks and onnx model(optional)
Parameters
----------
model_path
Path to save pruned model state_dict. The weight and bias have already multiplied the masks.
mask_path
Path to save mask dict.
"""
assert self.bound_model is not None, 'The bound model reference has been cleared.'
assert model_path is not None, 'model_path must be specified.'
mask_dict = {}
self._unwrap_model()
for name, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
bias_mask = wrapper.bias_mask
if weight_mask is not None:
mask_sum = weight_mask.sum().item()
mask_num = weight_mask.numel()
_logger.debug('Layer: %s Sparsity: %.4f', name, 1 - mask_sum / mask_num)
wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask)
if bias_mask is not None:
wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask)
# save mask to dict
mask_dict[name] = {"weight": weight_mask, "bias": bias_mask}
torch.save(self.bound_model.state_dict(), model_path)
_logger.info('Model state_dict saved to %s', model_path)
if mask_path is not None:
torch.save(mask_dict, mask_path)
_logger.info('Mask dict saved to %s', mask_path)
self._wrap_model()
|
[
"noreply@github.com"
] |
penghouwen.noreply@github.com
|
55303a17c04c8a0efbd951d112b3225f0d9cb8b7
|
48983b88ebd7a81bfeba7abd6f45d6462adc0385
|
/MOG/50.py
|
4f0d7d569452389c938806754ec6d5d1f0269de2
|
[] |
no_license
|
lozdan/oj
|
c6366f450bb6fed5afbaa5573c7091adffb4fa4f
|
79007879c5a3976da1e4713947312508adef2e89
|
refs/heads/master
| 2018-09-24T01:29:49.447076
| 2018-06-19T14:33:37
| 2018-06-19T14:33:37
| 109,335,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# author: Daniel Lozano
# source: MatcomOnlineGrader (MOG) ( http://matcomgrader.com )
# problem name: El Numero Decodificado
# problem url: http://matcomgrader.com/problem/50/el-numero-decodificado/
n = int(input())
count = 1
def digits_sum(num):
add = 0
while num != 0:
add += num % 10
num = num // 10
return add
while count != digits_sum(n - count):
count += 1
print(n - count)
|
[
"lozanodaniel02@gmail.com"
] |
lozanodaniel02@gmail.com
|
5399e23352d99fa49189fb77253df88e8639566e
|
eb82022c0cfc7c8747661cff9624ad2099fa1c3f
|
/dev_accounting_report/report/sales_delivery_out_rekap_xls.py
|
195552276cf5b3294ff059aa939ef9c184ff83a4
|
[] |
no_license
|
dadysuarsa/Odoo
|
8d026a066c390cc8f72805d2672212e61260c1cb
|
c9becd0c192fa239520ad3e1a11d81f70832eddf
|
refs/heads/master
| 2023-03-11T06:02:06.011575
| 2021-02-26T02:17:37
| 2021-02-26T02:17:37
| 276,346,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,299
|
py
|
import time
import xlwt, operator
from odoo.report import report_sxw
from report_engine_xls import report_xls
from odoo.tools.translate import _
from datetime import datetime
import pytz
class ReportStatus(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(ReportStatus, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'cr': cr,
'uid': uid,
'time': time,
})
_xs = report_xls.xls_styles
style_title = xlwt.easyxf(_xs['xls_title'])
style_blue = xlwt.easyxf(_xs['wrap'] + _xs['bold'] + _xs['fill_blue'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_blue_center = xlwt.easyxf(_xs['bold'] + _xs['fill_blue'] + _xs['center'] + _xs['borders_all'])
style_blue_center.alignment.middle = 1
style_yellow = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_yellow_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'], num_format_str=report_xls.decimal_format)
style_yellow_percent = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'], num_format_str=report_xls.percentage_format)
style_normal_bold = xlwt.easyxf(_xs['bold'] + _xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.decimal_format)
style_normal_date = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.date_format)
style_normal_center = xlwt.easyxf(_xs['wrap'] + _xs['top'] + _xs['center'] + _xs['borders_all'])
style_normal_italic = xlwt.easyxf(_xs['italic'] + _xs['borders_all'])
style_normal_percent = xlwt.easyxf(_xs['borders_all'], num_format_str=report_xls.percentage_format)
columns = [
['Tanggal Kirim', 13],
['No SJ/DO', 15],
['Satuan', 8],
['QTY Kirim', 17],
['Buyer', 45],
['No SC', 15],
['No Invoice', 15],
['Tgl Invoice', 12],
['Mata Uang', 10],
['Qty Invoice', 17],
['PPN VALAS', 17],
['PPN IDR', 17],
['DPP VALAS', 17],
['DPP IDR', 17],
['TOTAL VALAS', 17],
['TOTAL IDR', 17],
]
class sales_delivery_out_rekap_xls(report_xls):
def generate_xls_report(self, parser, _xs, data, obj, wb):
# import ipdb;ipdb.set_trace()
ws = wb.add_sheet(('Rekap Sales Detail Delivery'))
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
ws.set_horz_split_pos(7)
ws.write_merge(0, 0, 0, 5, 'REKAP SALES DELIVERY', style_title)
ws.write_merge(1, 1, 0, 3, (('Downloaded Date : %s') %(datetime.strptime(str(datetime.now(pytz.timezone('Asia/Jakarta')))[:18], "%Y-%m-%d %H:%M:%S").strftime("%d-%m-%Y %H:%M:%S"))), style_normal_date)
ws.write_merge(2, 2, 0, 3, 'Tanggal', style_blue_center)
ws.write_merge(2, 2, 4, 4, 'Divisi', style_blue_center)
ws.row(3).height_mismatch = True
ws.row(3).height = 20 * 28
ws.write_merge(3, 3, 0, 3, data['date_from'] + ' - ' + data['date_to'], style_normal_center)
ws.write_merge(3, 3, 4, 4, data['divisi'], style_normal_center)
ws.write_merge(5, 5, 0, 4, 'Delivery', style_blue_center)
ws.write_merge(5, 5, 5, 15, 'SO & Invoice', style_blue_center)
c_hdr_cell_style = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_right = xlwt.easyxf(_xs['bold'] + _xs['fill'] + _xs['borders_all'] + _xs['right'],
num_format_str=report_xls.decimal_format)
c_cell_style = xlwt.easyxf(_xs['borders_all'],
num_format_str=report_xls.decimal_format)
c_hdr_cell_style_grey = xlwt.easyxf(_xs['bold'] + _xs['fill_grey'] + _xs['borders_all'],
num_format_str=report_xls.decimal_format)
row_count = 6
col_count = 0
for column in columns:
ws.col(col_count).width = 256 * column[1]
ws.write(row_count, col_count, column[0], c_hdr_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_start = row_count
for lines in data['csv']:
for line in lines:
ws.write(row_count, col_count, line, c_cell_style)
col_count += 1
row_count += 1
col_count = 0
row_count += 1
ws.write_merge(row_count, row_count, 6, 8, 'GRAND TOTAL', c_hdr_cell_style_grey)
col_count = 9
while col_count <= 15:
sum_cell_start = xlwt.Utils.rowcol_to_cell(row_start, col_count)
sum_cell_end = xlwt.Utils.rowcol_to_cell(row_count - 2, col_count)
ws.write(row_count, col_count, xlwt.Formula('sum(' + sum_cell_start + ':' + sum_cell_end + ')'), c_hdr_cell_style_grey)
col_count += 1
pass
sales_delivery_out_rekap_xls('report.sales.delivery.out.rekap.xls','stock.picking','addons/dev_accounting_report/report/report_excel.mako', parser=ReportStatus, header=False)
|
[
"dads02_zetti@yahoo.com"
] |
dads02_zetti@yahoo.com
|
71240c639014721fc67dd2c7ff9f05d6c32de443
|
095a1c126ffaf703d923431ce5279a0dac384740
|
/timecard/views/auth_views.py
|
f01ca3273a967bcb926fb3d487993405f8ebdcb9
|
[] |
no_license
|
patpio/timecard
|
8bc5c6dbfc3877157dc8bfca7f9f5debd1e7b486
|
f4a2f2db69410a2b98d9815fbac5048ba8c47126
|
refs/heads/master
| 2023-03-22T15:51:06.658738
| 2021-01-12T22:42:44
| 2021-01-12T22:42:44
| 321,773,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,926
|
py
|
from flask import Blueprint, render_template, url_for, flash, request, abort
from flask_login import login_user, logout_user, login_required, current_user
from werkzeug.utils import redirect
from timecard import db
from ..models import User
from ..forms import SignUpForm, LoginForm
bp_auth = Blueprint('auth', __name__, url_prefix='/auth')
@bp_auth.route('/signup', methods=['GET', 'POST'])
@login_required
def signup():
if current_user != User.query.filter_by(username='admin').first():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('main.home'))
return render_template('signup.html', form=form)
@bp_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
flash(f'Logged in successfully as {user.username}', 'success')
return redirect(request.args.get('next') or url_for('main.home'))
return render_template('login.html', form=form)
@bp_auth.route('/logout', methods=['GET'])
def logout():
logout_user()
flash('Logged out successfully.', 'success')
return redirect(url_for('main.home'))
@bp_auth.route('/admin', methods=['GET', 'POST'])
def admin():
if User.query.all():
abort(403)
form = SignUpForm()
if form.validate_on_submit():
user = User(username='admin', email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
return render_template('signup.html', form=form)
|
[
"pat_pi@yahoo.com"
] |
pat_pi@yahoo.com
|
9f64b6fde8ce5918644f9e426104b18db422e7c5
|
f881c10e0d654da82218403dbd2adbdc606dc455
|
/apps/user_login/models.py
|
96fff17dd9d564cfa7fed5ca4f762658b6b74462
|
[] |
no_license
|
alialwahish/restfull_users
|
1732dceeddf4367d678ff6cdf2668dbc95463182
|
24d00811b2b46b33e5cf5c311367bd153344dc70
|
refs/heads/master
| 2020-03-17T15:37:13.562082
| 2018-05-16T20:05:21
| 2018-05-16T20:05:21
| 133,717,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from __future__ import unicode_literals
from django.db import models
class dojo(models.Model):
name=models.CharField(max_length=255)
city=models.CharField(max_length=255)
state=models.CharField(max_length=2)
class ninjas(models.Model):
first_name=models.CharField(max_length=255)
last_name=models.CharField(max_length=255)
dojo = models.ForeignKey(dojo, on_delete=True, related_name="ninjas")
|
[
"bayati.ali@icloud.com"
] |
bayati.ali@icloud.com
|
6e5bfeee02160589220079caf6d6e3e3b76ab585
|
629090051b975b5814b4b48e2cb2c784fa6705e4
|
/pgsmo/objects/sequence/sequence.py
|
58b4198fa17dee038f943fed6dd518f8db8054e6
|
[
"MIT"
] |
permissive
|
microsoft/pgtoolsservice
|
3d3597821c7cae1d216436d4f8143929e2c8a82a
|
24a048226f7f30c775bbcbab462d499a465be5da
|
refs/heads/master
| 2023-08-28T12:55:47.817628
| 2023-08-25T22:47:53
| 2023-08-25T22:47:53
| 80,681,087
| 68
| 35
|
NOASSERTION
| 2023-09-13T21:46:55
| 2017-02-02T01:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Optional, List, Dict
from smo.common.node_object import NodeObject, NodeLazyPropertyCollection, NodeCollection
from smo.common.scripting_mixins import ScriptableCreate, ScriptableDelete, ScriptableUpdate
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class Sequence(NodeObject, ScriptableCreate, ScriptableDelete, ScriptableUpdate):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'templates')
MACRO_ROOT = templating.get_template_root(__file__, 'macros')
GLOBAL_MACRO_ROOT = templating.get_template_root(__file__, '../global_macros')
@classmethod
def _from_node_query(cls, server: 's.Server', parent: NodeObject, **kwargs) -> 'Sequence':
"""
Creates a Sequence object from the result of a sequence node query
:param server: Server that owns the sequence
:param parent: Parent object of the sequence
:param kwargs: Row from a sequence node query
Kwargs:
oid int: Object ID of the sequence
name str: Name of the sequence
:return: A Sequence instance
"""
seq = cls(server, parent, kwargs['name'])
seq._oid = kwargs['oid']
seq._schema = kwargs['schema']
seq._scid = kwargs['schemaoid']
seq._is_system = kwargs['is_system']
return seq
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
self._server = server
self._parent: Optional['NodeObject'] = parent
self._name: str = name
self._oid: Optional[int] = None
self._is_system: bool = False
self._child_collections: Dict[str, NodeCollection] = {}
self._property_collections: List[NodeLazyPropertyCollection] = []
# Use _column_property_generator instead of _property_generator
self._full_properties: NodeLazyPropertyCollection = self._register_property_collection(self._sequence_property_generator)
ScriptableCreate.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableDelete.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableUpdate.__init__(self, self._template_root(server), self._macro_root(), server.version)
self._schema: str = None
self._scid: int = None
self._def: dict = None
def _sequence_property_generator(self):
template_root = self._template_root(self._server)
# Setup the parameters for the query
template_vars = self.template_vars
# Render and execute the template
sql = templating.render_template(
templating.get_template_path(template_root, 'properties.sql', self._server.version),
self._macro_root(),
**template_vars
)
cols, rows = self._server.connection.execute_dict(sql)
if len(rows) > 0:
return rows[0]
# PROPERTIES ###########################################################
@property
def schema(self):
return self._schema
@property
def scid(self):
return self._scid
# -FULL OBJECT PROPERTIES ##############################################
@property
def cycled(self):
return self._full_properties.get("cycled", "")
@property
def increment(self):
return self._full_properties.get("increment", "")
@property
def start(self):
return self._full_properties.get("start", "")
@property
def current_value(self):
return self._full_properties.get("current_value", "")
@property
def minimum(self):
return self._full_properties.get("minimum", "")
@property
def maximum(self):
return self._full_properties.get("maximum", "")
@property
def cache(self):
return self._full_properties.get("cache", "")
@property
def cascade(self):
return self._full_properties.get("cascade", "")
@property
def seqowner(self):
return self._full_properties.get("seqowner", "")
@property
def comment(self):
return self._full_properties.get("comment", "")
# IMPLEMENTATION DETAILS ###############################################
@classmethod
def _macro_root(cls) -> List[str]:
return [cls.MACRO_ROOT, cls.GLOBAL_MACRO_ROOT]
@classmethod
def _template_root(cls, server: 's.Server') -> str:
return cls.TEMPLATE_ROOT
# HELPER METHODS ##################################################################
def _create_query_data(self):
""" Gives the data object for create query """
return {"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
}}
def _update_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"o_data": {
"schema": self.schema,
"name": self.name,
"seqowner": self.seqowner,
"comment": self.comment
}
}
def _delete_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"cascade": self.cascade
}
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
32a9080820f79c628edcd8a11fb345d860e9800a
|
28b1ed1359bd9539f9a15b64663652ec4eb3f284
|
/Week_12/matplotlib_example.py
|
301f43223dac2683ae8891d160b23ec806636397
|
[] |
no_license
|
achapkowski/Python_for_GIS_and_RS
|
5fb68cbe1d46f28487e2a41099cf42b942587afa
|
9b5d8da6b7bdbbfaa2f45b20d8704c317a86e785
|
refs/heads/master
| 2021-01-20T02:12:01.785780
| 2017-04-24T22:44:08
| 2017-04-24T22:44:08
| 89,385,947
| 1
| 0
| null | 2017-04-25T17:02:35
| 2017-04-25T17:02:35
| null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
import xlrd
file_and_path = r"C:\Users\greg6750\Documents\IPython Notebooks\Python_for_GIS_and_RS\Week_12\SENZA_0_SUNAA_0_CORN.xlsx"
print("Reading Workbook")
workbook = xlrd.open_workbook(file_and_path)
worksheet = workbook.sheet_by_index(0)
freq = []
g = []
t = []
print("Creating Arrays")
for row in range(worksheet.nrows):
if row>0:
#Frequency
freq_cell = worksheet.cell(row,0)
freq.append(freq_cell.value)
GRR_cell = worksheet.cell(row,8)
g.append(GRR_cell.value)
TOA_cell = worksheet.cell(row,14)
t.append(TOA_cell.value)
#For plotting, import matplotlib
from matplotlib import pyplot as plt
#import matplotlib.pyplot as plt
##Basic single plot
#plt.plot(freq, g)
#plt.show()
####Multiple plots
##plt.subplot(211)
###plt.figure(1)
##plt.plot(freq, g, 'b-o')
##plt.subplot(2, 1, 2)
##plt.plot(freq, t, 'r-o')
##plt.show()
##Typing numpy and matplotlib together
import numpy as np
gaussian = np.random.normal(0, 1, 100000)
plt.hist(gaussian, bins=100)
#print "Mean: %f Standard Deviation: %f" % (gaussian.mean(), gaussian.std())
plt.show()
|
[
"gbrunner@esri.com"
] |
gbrunner@esri.com
|
de63f5be05fb160c05847158071ed0f615ee7519
|
5922398212b6e113f416a54d37c2765d7d119bb0
|
/python/Binary Tree Serialization.py
|
50267542ef2fe76b1e4ff14b7fd8af5aabe2c3f3
|
[] |
no_license
|
CrazyCoder4Carrot/lintcode
|
e777f73e1fdfe3b8abc9dbfc07d26602bf614151
|
33dcd7f0e2d9bee58840a3370837cb2db82de1eb
|
refs/heads/master
| 2021-01-09T20:38:59.813198
| 2017-01-16T22:34:26
| 2017-01-16T22:34:26
| 60,287,619
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
'''
@param root: An object of TreeNode, denote the root of the binary tree.
This method will be invoked first, you should design your own algorithm
to serialize a binary tree which denote by a root node to a string which
can be easily deserialized by your own "deserialize" method later.
'''
def serialize(self, root):
# write your code here
if not root:
return []
stack = [root]
data = []
while stack:
levelstack = []
for node in stack:
if node:
data.append(node.val)
levelstack.append(node.left)
levelstack.append(node.right)
else:
data.append('#')
stack = levelstack
i = len(data) - 1
while i >= 0:
if data[i] == '#':
del data[i]
i-=1
else:
return data
'''
@param data: A string serialized by your serialize method.
This method will be invoked second, the argument data is what exactly
you serialized at method "serialize", that means the data is not given by
system, it's given by your own serialize method. So the format of data is
designed by yourself, and deserialize it here as you serialize it in
"serialize" method.
'''
def deserialize(self, data):
# write your code here
if not data:
return None
root = TreeNode(data[0])
stack = [root]
i = 1
while stack:
levelstack = []
for node in stack:
node.left = TreeNode(data[i]) if i < len(data) and data[i] != "#" else None
i += 1
if node.left:
levelstack.append(node.left)
node.right = TreeNode(data[i]) if i < len(data) and data[i] != "#" else None
i += 1
if node.right:
levelstack.append(node.right)
stack = levelstack
return root
|
[
"liuzhenbang1988@gmail.com"
] |
liuzhenbang1988@gmail.com
|
6dd8a41262ec87d8286028a969c7d6f182b407b1
|
68a52ad1df836c9f6d922515b2f896b6928ce6a0
|
/SafetyProductionSystem/weekworktask/migrations/0005_auto_20190225_1120.py
|
48e63ae27db7629622745b675ecbae9443f84283
|
[] |
no_license
|
Chuazhen0/SafetyProductionSystem
|
1141f845e04b032ff2a230c8def26066f061600c
|
442d5df3818d43aebb9830f2456c73018aae2acf
|
refs/heads/master
| 2020-05-20T12:47:46.365020
| 2019-05-08T09:56:01
| 2019-05-08T09:56:01
| 185,579,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
# Generated by Django 2.0.5 on 2019-02-25 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weekworktask', '0004_auto_20190214_1558'),
]
operations = [
migrations.AlterField(
model_name='weekworktask',
name='created_at',
field=models.DateTimeField(null=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='created_by',
field=models.ForeignKey(null=True, on_delete=models.SET('systemsettings.MyUser'), related_name='周期检测任务创建人', to='systemsettings.MyUser', verbose_name='创建人'),
),
migrations.AlterField(
model_name='weekworktask',
name='last_updated_at',
field=models.DateTimeField(null=True, verbose_name='最后更新时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='last_updated_by',
field=models.ForeignKey(null=True, on_delete=models.SET('systemsettings.MyUser'), related_name='周期检测任务最后更新人', to='systemsettings.MyUser', verbose_name='最后更新人'),
),
migrations.AlterField(
model_name='weekworktask',
name='number',
field=models.CharField(max_length=30, null=True, verbose_name='周期检测任务编码'),
),
migrations.AlterField(
model_name='weekworktask',
name='task_start_time',
field=models.DateField(null=True, verbose_name='计划开始时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='完成时限'),
),
]
|
[
"Caohuazhenrn@163.com"
] |
Caohuazhenrn@163.com
|
0343fcf7a0ecf13d0cf6070e68aaf6fa43ea727c
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/stringMethods_20200707100259.py
|
8206fc5fa46489851f957ce0776ea9caca48fe98
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
def array(arr):
newArr = []
for i in range(len(arr)):
b =
print(arr[i])
array(["[6,7,5]","[1,8]"])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
2f8083908138a26a1c74293a8d1ff16a6f17f9a0
|
59b0e278e6b60582e5ff70be604fa8e955b9c697
|
/samples/demo_03.py
|
089d8b235b42b04de576b81be9d94cf2fe34bf85
|
[] |
no_license
|
qq329999897/P3P4_API_LineTestFrame
|
0a18b52feb37df301f1eb7a60a7a096ecd6709f9
|
71de1fc23dc976c5965865f4eb79dd78559c531d
|
refs/heads/master
| 2023-01-05T14:48:35.546705
| 2020-11-01T01:52:59
| 2020-11-01T01:52:59
| 308,985,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# @author: liusir
# @file: demo_03.py
# @time: 2020/10/18 10:30 上午
import logging
import time
from logging import handlers
logger = logging.getLogger('newdream')
logger.setLevel( logging.DEBUG )
formatter = logging.Formatter('%(asctime)s - %(message)s')
th = handlers.TimedRotatingFileHandler("test.log",when='D',interval=1,backupCount=5)
th.setFormatter( formatter )
th.setLevel( logging.DEBUG )
th.suffix = "%Y_%m_%d_%H_%M_%S.log" #设置日志格式名称
logger.addHandler( th )
logger.info('hello1')
time.sleep(4)
logger.warning('hello2')
time.sleep(4)
logger.error('hello3')
|
[
"329999897@qq.com"
] |
329999897@qq.com
|
ddf082a606438d2b7b4eaa1c225de04615338997
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/GLES3/EXT/AMD/performance_monitor.py
|
0ebc960e38c441205a44853cabf3e8f2a8205694
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188
| 2017-04-17T11:04:55
| 2017-04-17T11:04:55
| 80,312,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['numGroups', 'groupsSize', 'groups'])
def glGetPerfMonitorGroupsAMD(numGroups, groupsSize, groups):
pass
@params(api='gles3', prms=['group', 'numCounters', 'maxActiveCounters', 'counterSize', 'counters'])
def glGetPerfMonitorCountersAMD(group, numCounters, maxActiveCounters, counterSize, counters):
pass
@params(api='gles3', prms=['group', 'bufSize', 'length', 'groupString'])
def glGetPerfMonitorGroupStringAMD(group, bufSize, length, groupString):
pass
@params(api='gles3', prms=['group', 'counter', 'bufSize', 'length', 'counterString'])
def glGetPerfMonitorCounterStringAMD(group, counter, bufSize, length, counterString):
pass
@params(api='gles3', prms=['group', 'counter', 'pname', 'data'])
def glGetPerfMonitorCounterInfoAMD(group, counter, pname):
pass
@params(api='gles3', prms=['n', 'monitors'])
def glGenPerfMonitorsAMD(n, monitors):
pass
@params(api='gles3', prms=['n', 'monitors'])
def glDeletePerfMonitorsAMD(n, monitors):
pass
@params(api='gles3', prms=['monitor', 'enable', 'group', 'numCounters', 'counterList'])
def glSelectPerfMonitorCountersAMD(monitor, enable, group, numCounters, counterList):
pass
@params(api='gles3', prms=['monitor'])
def glBeginPerfMonitorAMD(monitor):
pass
@params(api='gles3', prms=['monitor'])
def glEndPerfMonitorAMD(monitor):
pass
@params(api='gles3', prms=['monitor', 'pname', 'dataSize', 'data', 'bytesWritten'])
def glGetPerfMonitorCounterDataAMD(monitor, pname, dataSize, bytesWritten):
pass
|
[
"cdenizol@gmail.com"
] |
cdenizol@gmail.com
|
410b9989382a4f8aa1248d40affccc169854c326
|
c8705e8f8797ebdd6c76e8aa697d7ed9da46f3c3
|
/colorpicker/models.py
|
177cab327cb231f450d3e90323b1f4c21f356eb4
|
[] |
no_license
|
zokis/django-colorpicker
|
1cedcb511011c504165a687c19848140f3656562
|
f391341969a29e7de1dc1887ef9e9fadd8669216
|
refs/heads/master
| 2020-12-30T18:29:58.990911
| 2013-02-15T13:30:54
| 2013-02-15T13:30:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
# -*- coding: utf-8 -*-
from django.core.validators import ValidationError
from django.db.models import CharField
from widgets import ColorPickerWidget
from forms import ColorField as ColorFormField
from utils import (is_valid_alpha_hex, is_valid_hex, is_valid_rgb,
is_valid_rgba, rgba_to_alpha_hex, rgb_to_hex, hex_to_rgb)
FORMAT_RGB = 'rgb'
FORMAT_HEX = 'hex'
FORMAT_RGBA = 'rgba'
FORMAT_HEXA = 'hexa'
FORMATS = (FORMAT_RGB, FORMAT_HEX, FORMAT_RGB, FORMAT_HEXA)
class ColorField(CharField):
def __init__(self, format='hex', *args, **kwargs):
kwargs['max_length'] = 25
self.format = format
super(ColorField, self).__init__(*args, **kwargs)
def formfield(self, *args, **kwargs):
kwargs['widget'] = ColorPickerWidget(format=self.format)
kwargs['form_class'] = ColorFormField
return super(ColorField, self).formfield(*args, **kwargs)
def clean(self, value, model_instance):
'''
Valida cores nos formatos RGB RGBA #RRGGBB e #RRGGBBAA
'''
import re
invalid = 'Cor %s inválida' % self.format.upper()
value = value.replace(' ', '')
if self.format == FORMAT_RGB:
regex = re.compile("rgb\(\d{1,3},\d{1,3},\d{1,3}\)",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_rgb
elif self.format == FORMAT_RGBA:
regex = re.compile("rgba\((?P<r>\d{1,3}),(?P<g>\d{1,3}),(?P<b>\d{1,3}),(?P<a>(0\.\d+)|\d)\)",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_rgba
elif format == FORMAT_HEXA:
regex = re.compile("#([A-Fa-f\d]{8}|[A-Fa-f\d]{6}|[A-Fa-f\d]{3})",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_alpha_hex
else:
regex = re.compile("#([A-Fa-f\d]{8}|[A-Fa-f\d]{6}|[A-Fa-f\d]{3})",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_hex
if len(regex.findall(value)) != 1:
raise ValidationError(invalid)
if not is_valid(value):
raise ValidationError(invalid)
return super(ColorField, self).clean(value, model_instance)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^colorpicker\.models\.ColorField"])
except ImportError:
pass
|
[
"marcelo.zokis@gmail.com"
] |
marcelo.zokis@gmail.com
|
10bb32abb023447157a766575d4476a86ed88ecf
|
3863c069014bccc095e66d956af7900249ebf784
|
/ir/bm25_ranker.py
|
e7fec5bef3cb810a935fcf5ddbd098b108bb84e2
|
[] |
no_license
|
asvishen/Factoid-Question-Answering
|
28403c3ef60b36b44e6efe3efdad74524a32a200
|
0516aebf5f80c6cfa51475ae2c32dea0ef325719
|
refs/heads/master
| 2020-12-03T02:01:29.614281
| 2013-05-31T20:57:18
| 2013-05-31T20:57:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,504
|
py
|
# -*- coding: utf-8 -*-
'''
Rank candidate texts by their similarity to the query.
@author: gavin hackeling
'''
from __future__ import division
from nltk import word_tokenize
from math import log
from collections import defaultdict
class BM25_calc:
def __init__(self, query, c):
self.k1 = 1.2
self.b = 0.75
#self.stop_words = ['the', 'is', 'are', 'am', 'was', 'have', 'had', 'has',
#'a', 'an', 'be', 'did', 'does', 'do', 'to', ]
#self.query = [t.lower() for t in query if t.lower() not in self.stop_words]
self.query = [t.lower() for t in query]
self.original_collection = c
c = [d.lower() for d in c]
self.collection = [word_tokenize(d) for d in c]
self.avg_len = sum([len(d) for d in self.collection]) / len(c)
self.freq_counter = defaultdict(int)
def get_num_docs_containing(self, token):
num = 0
for document in self.collection:
if token in document:
num += 1
return num
# TODO do this once
def get_tf(self, token, document):
counter = defaultdict(int)
for word in document:
#if word not in self.stop_words:
counter[word] += 1
return counter[token]
def get_idf(self, token):
N = len(self.collection)
nq = self.get_num_docs_containing(token)
top = N - nq + 0.5
bottom = nq + 0.5
idf = log(top / bottom)
return max(.5, idf)
def score(self, document):
score = 0
for token in self.query:
tf = self.get_tf(token, document)
idf = self.get_idf(token)
top = tf * (self.k1 + 1)
bottom = tf + self.k1 * (
1 - self.b + self.b * (len(document) / self.avg_len))
s = idf * (top / bottom)
score += max(s, 0)
return score
def rank(self):
scores = []
for document_index, document in enumerate(self.collection):
s = self.score(document)
scores.append((s, document, document_index))
scores.sort(key=lambda tup: tup[0], reverse=True)
originals = []
for i in scores:
originals.append(self.original_collection[i[2]])
return originals
if __name__ == '__main__':
query = 'did the Ravens win the Super Bowl?'
query = word_tokenize(query)
collection = [
'The Baltimore Ravens would later win Super Bowl XLVII in 2013 against the San Francisco 49ers.',
"Ray Lewis was a member of both Ravens' Super Bowl wins.",
'75 Jonathan Ogden elected in 2013 played for Ravens 1996–2007 won Super Bowl XXXV Retired numbers.',
'The Ravens officially have no retired numbers.',
"Michael Crabtree never had a chance to make a catch in the end zone on what turned out to be the San Francisco 49ers' last play of Super Bowl XLVII a 3431 loss to ",
'Ravens quarterback Trent Dilfer and wide receiver ',
' The Ravens became the third wildcard team to win the Super Bowl.',
'The Oakland Raiders did it in 1981 and ',
'The Baltimore Ravens have appeared in two Super Bowls and won both of them.',
'Here are the results victories in bold Super Bowl XXXV 12801 Baltimore 34 New ',
'the and'
]
#collection = [
#'The Oakland Raiders did it in 1981 and ',
#]
bm25_calc = BM25_calc(query, collection)
ranked = bm25_calc.rank()
|
[
"gavinhackeling@gmail.com"
] |
gavinhackeling@gmail.com
|
c6a6239b372a7a6543add1f815a61de4f4418db6
|
060b39ef80a00090732b5362427b1f96eda81d09
|
/DataProcess/run_feature_extraction.py
|
2560d4c88bb1ace582393d880fb054727ddd45c1
|
[] |
no_license
|
hphp/Kaggle
|
73a7fd4b19b4f1cf6616f72a4309d4769c8a8535
|
b27422f8b913c47f484e3abebb1f7aaf6607c6a4
|
refs/heads/master
| 2016-09-09T22:36:21.945873
| 2013-12-10T04:05:00
| 2013-12-10T04:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
#!/usr/bin/python
'''
written by hp_carrot
2013-11-26
add resized_pixel_fe()
'''
import os
def convolution_feature_extraction():
piclist = os.listdir("/home/hphp/Documents/data/Kaggle/DogVsCatData/test1/")
t_range = len(piclist)
period = 1000
total = int(t_range/period)
print total
for rr in range(200,total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
cmd = "python feature_extraction.py " + str(start) + " " + str(end)
os.system(cmd)
def color_HSV_feature_extraction(DataHome,img_data_dir,data_csv_file):
piclist = os.listdir(DataHome + img_data_dir)
t_range = len(piclist)
period = 1000
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
if start >= end :
break
cmd = "python DogVsCat_get_hsv_feature.py " + str(start) + " " + str(end) + " " + img_data_dir + " " + data_csv_file
print cmd
os.system(cmd)
def resized_pixel_fe(DataHome, src_img_route, train_feature_filename, valid_feature_filename):
piclist = os.listdir(DataHome + src_img_route)
t_range = len(piclist)
period = 1000
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
if start >= end :
break
cmd = "python DogVsCat_patchtrain_feature.py " + DataHome + " " + src_img_route + " " + train_feature_filename + " " + valid_feature_filename + " " + str(start) + " " + str(end)
print cmd
os.system(cmd)
def g_resized_pixel_fe(cmd_part1, t_range, period):
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * period
end = min((rr+1)*period, t_range)
if start >= end :
break
cmd = cmd_part1 + " " + str(start) + " " + str(end)
print cmd
os.system(cmd)
piclist = os.listdir("/home/hphp/Documents/data/Kaggle/CIFAR-10/train/")
t_range = len(piclist)
g_resized_pixel_fe("python feature_extraction_pixel_frm_img.py /home/hphp/Documents/data/Kaggle/CIFAR-10/ train/ train_feature_pixel_v.csv 32 32", t_range, 1000)
#DogVsCat_DataHome = "/home/hphp/Documents/data/Kaggle/DogVsCatData/"
#resized_pixel_fe(DogVsCat_DataHome, "train/", DogVsCat_DataHome+"DogVsCat_train_feature_1w.csv", DogVsCat_DataHome+"DogVsCat_valid_feature_1w.csv")
#color_HSV_feature_extraction(DogVsCat_DataHome,"test1/","test.csv")
#color_HSV_feature_extraction(DogVsCat_DataHome,"train/","train.csv")
|
[
"hphpcarrot@gmail.com"
] |
hphpcarrot@gmail.com
|
e55b5369d0cbee68194ee983acf794ce6412cbd6
|
d8a511b5b871740c13e41079657421ad08e26978
|
/wagtailmedia/signal_handlers.py
|
80f6db0c29f016c9c6676cd1ea900192b6a38555
|
[
"BSD-3-Clause"
] |
permissive
|
evanwinter/wagtailmedia
|
0be38630e9f1375506ba3a5b6b10eee72247dcd8
|
e5cc000d6741f78ee44834c1469b64da40a164ed
|
refs/heads/master
| 2020-04-14T18:29:34.129054
| 2019-01-03T20:44:32
| 2019-01-03T20:44:32
| 161,226,229
| 0
| 1
|
NOASSERTION
| 2018-12-10T19:30:39
| 2018-12-10T19:30:39
| null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_delete, pre_save
from wagtailmedia.models import get_media_model
def post_delete_file_cleanup(instance, **kwargs):
# Pass false so FileField doesn't save the model.
transaction.on_commit(lambda: instance.file.delete(False))
def register_signal_handlers():
Media = get_media_model()
post_delete.connect(post_delete_file_cleanup, sender=Media)
|
[
"dan.s.graham@gmail.com"
] |
dan.s.graham@gmail.com
|
aad9d93b67d623651393d22114af6f64db39f48d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/202.py
|
24acce18318d21c58ecc931e58583447ad9cae57
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
from bisect import bisect_left
from copy import copy
def get_results(N, K):
N.sort()
K.sort()
L = len(N)
dwar_points = 0
ken_start = 0
for i in xrange(L):
if N[i] > K[ken_start]:
dwar_points += 1
ken_start += 1
war_points = 0
for i in xrange(len(N)-1,-1,-1):
ken_pos = bisect_left(K, N[i])
if ken_pos == len(K):
ken_choice = 0
else:
ken_choice = ken_pos
if N[i] > K[ken_choice]:
war_points += 1
del N[i]
del K[ken_choice]
return (dwar_points, war_points)
def solve(in_name, out_name):
fin = open(in_name, 'r')
L = fin.readlines()
fin.close()
T = int(L[0])
k = 1
res = []
for i in xrange(T):
n = int(L[k])
N = map(float, L[k+1].strip().split())
K = map(float, L[k+2].strip().split())
k += 3
results = get_results(N, K)
res.append('Case #' + str(i+1) + ': ' + str(results[0]) + ' ' + str(results[1]) + '\n')
fout = open(out_name, 'w')
fout.writelines(res)
fout.close()
return
#solve('D-test.in', 'D-test.out')
#solve('D-small-attempt0.in', 'D-small-attempt0.out')
solve('D-large.in', 'D-large.out')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
c163e2cb577dfcda6f3358d435861abcf43a11e1
|
ab32e6384b7c679a327a4bf1df6dd24c058b78a5
|
/cms/base.py
|
f994beec56933f28b7297703bb7637ad770aaac1
|
[] |
no_license
|
webmaxdev0110/digi-django
|
ad2497791d6d3b6aa74eb697dd7eef324ebb5846
|
4cd52c07bb64e9d9381a957323d277489a02181a
|
refs/heads/master
| 2020-03-23T13:37:12.600565
| 2017-07-10T10:23:15
| 2017-07-10T10:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
"""
Our base representation for different types of page models.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms.admin import item_editor
from feincms.models import create_base_model
class SimplePageManager(models.Manager):
def published(self):
"""
Filter by pages that are marked as active/published.
"""
return self.filter(published=True)
SimplePageAdmin = item_editor.ItemEditor
class SimplePage(create_base_model(inherit_from=models.Model)):
"""
A simple wrapper on the feincms base model with some common fields
set for use in implemented types.
"""
published = models.BooleanField(_('published'), default=False)
title = models.CharField(_('title'), max_length=100,
help_text=_('This is used for the generated navigation too.'))
class Meta(object):
abstract = True
verbose_name = _('simple page')
verbose_name_plural = _('simple pages')
objects = SimplePageManager()
def __str__(self):
return self.title
|
[
"webmax0110.dev@gmail.com"
] |
webmax0110.dev@gmail.com
|
109022396ac7b45bbcd47850815b3f7da8cc38d3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/1519.py
|
648f239ba0d124b8971fef4c06e15947f1995be6
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
from bisect import insort, bisect_left, bisect_right
def palin(x):
return str(x) == str(x)[::-1]
arr = []
def gen(N):
for x in range(1, int(N**.5)+1):
if palin(x) and palin(x*x) and 1 <= x*x <= N:
insort(arr, x*x)
def solve(A, B):
l = bisect_left(arr, A)
r = bisect_right(arr, B)
return r-l
if __name__ == '__main__':
gen(10**14)
T = int(raw_input())
for case in range(1,T+1):
A, B = map(int, raw_input().split())
print "Case #{}: {}".format(case, solve(A, B))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
11071e8ceadb00b104d22972d509236a54c3253f
|
e839a2fdd40effd2cea9c8bbea1629e7a5b453dc
|
/appinit_backend/app/lib/files/remove.py
|
da38d92a6b1876c25ec6ed96778bacaa81dab65b
|
[
"MIT"
] |
permissive
|
lost-osiris/webplatform-backend
|
bfb48979fabd0d04104b3b07bd9b7cad2d6cfce6
|
8b1b7c94dbc5314450fbe75b8ca4625d39608d4a
|
refs/heads/master
| 2021-07-06T00:12:32.257988
| 2019-08-21T08:45:21
| 2019-08-21T08:45:21
| 177,480,021
| 0
| 0
|
MIT
| 2020-09-04T09:08:18
| 2019-03-24T23:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 601
|
py
|
from lib.imports.default import *
import lib.files.meta as meta
action = "remove"
def call(**kwargs):
if "id" in kwargs:
file_obj = meta.call(id=kwargs['id'])
if not file_obj['isAttached']:
__remove(kwargs['id'])
elif "ids" in kwargs and type(kwargs["ids"]) is list:
for fid in kwargs["ids"]:
file_obj = meta.call(id=fid)
if not meta.call['isAttached']:
__remove(fid)
return True
def __remove(file_id):
import gridfs
manager = Manager()
db = manager.db("files")
fs = gridfs.GridFS(db)
fs.delete(ObjectId(file_id))
|
[
"mowens@redhat.com"
] |
mowens@redhat.com
|
2a0e99bf5aef26fa2fcfc7edcc980199c3190c6c
|
d10a8314da8ef71d2e63c0ecfbdc60a1cf2d78e2
|
/calculate_next/lib/template/parser/template/formats/world.py
|
d6a2422d3eba9d40fa266c3eeea24f61247cd7da
|
[] |
no_license
|
Yuego/calculate-experiments
|
d005376dc6fb0002ac0016beb878d7274707a39e
|
40cd601bfea604c887c213d70303938367b7b3b1
|
refs/heads/master
| 2021-01-22T23:58:04.817503
| 2013-11-22T10:19:48
| 2013-11-22T10:19:48
| 14,140,182
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#coding: utf-8
from __future__ import unicode_literals, absolute_import
from pyparsing import *
from calculate_next.lib.template.parser.rules import slotted_package_atom
from calculate_next.lib.template.parser.template.parser import FormatParser
class WorldFormatParser(FormatParser):
comment = '#'
@classmethod
def _value_atom(cls, s, l, t):
return {t[0].strip(): None}
def get_syntax(self):
_command = Word('!^+-', exact=1)
comment = self.get_comment_rules()
value = Combine(Optional(_command) + slotted_package_atom).setParseAction(self._value_atom)
syntax = ZeroOrMore(comment | value)
return syntax
def collapse_tree(self, d, depth=0):
comments = d.pop('__comments')
result = []
idx = 0
for k, v in d.items():
while idx in comments:
result.extend([comments.pop(idx), '\n'])
idx += 1
idx += 1
result.extend([k, '\n'])
for comment in comments.values():
result.extend([comment, '\n'])
return ''.join(result)
|
[
"root@proscript.ru"
] |
root@proscript.ru
|
1a848ab9ed33cb6c5cfa7e042a832a8136ea3894
|
2d0e5f5c6dd2e44ecf4166c81caff17f39c0c638
|
/매일 프로그래밍/20201123/solution.py
|
8c911b44122c8a89e11407f4557f4b7a1e374f97
|
[] |
no_license
|
limkeunhyeok/daily-coding
|
17d120a9f499189be3250a501e73e312802508a9
|
960dad7758c99619da0a33c899d5d4d8d8ff524d
|
refs/heads/master
| 2022-04-30T22:32:54.173400
| 2022-04-16T13:15:49
| 2022-04-16T13:15:49
| 174,705,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# 단일 연결 리스트(singly linked list)가 주어지면 리스트의 중간 노드 값을 프린트 하시오. (제일 효율적인 방법으로)
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, data):
newNode = Node(data)
if self.head is None:
self.head = newNode
else:
current = self.head
while current.next is not None:
current = current.next
current.next = newNode
def findMiddle(List):
if List.head is None:
print('-1')
else:
pt1 = List.head
pt2 = List.head
size = 1
while pt1.next is not None:
pt1 = pt1.next
size += 1
cnt = size // 2
while cnt != 0:
pt2 = pt2.next
cnt -= 1
print(pt2.data)
l = LinkedList()
l.append(1)
l.append(2)
l.append(3)
l.append(4)
l.append(5)
findMiddle(l) # 3
|
[
"gorloom6425@naver.com"
] |
gorloom6425@naver.com
|
65afa1f2ec2766360a434863c0492058e97d2aeb
|
2a4ad073755ff447926e44b7c2e0b56b5ded37d2
|
/algorithm/sort algorithm/merge_sort.py
|
92e56c286905eec9488110d807ad37f09c0b8983
|
[] |
no_license
|
wcb2213/Learning_notes
|
3a9b3fdb7df5c6844a9031db8dd7e9dd858e093c
|
d481e1754c15c91557027bee872f4d97da3c0fca
|
refs/heads/master
| 2021-07-06T15:54:56.199655
| 2020-09-04T14:05:50
| 2020-09-04T14:05:50
| 174,832,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
py
|
#!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/6/14
"""归并排序"""
# 时间复杂度(平均) 时间复杂度(最坏) 时间复杂度(最好) 空间复杂度 稳定性 复杂性
# O(nlog2n)O(nlog2n) O(nlog2n)O(nlog2n) O(nlog2n)O(nlog2n) O(n)O(n) 稳定 较复杂
def MergeSort(lists):
if len(lists) < 2:
return lists
num = len(lists) // 2
left = MergeSort(lists[:num])
right = MergeSort(lists[num:])
return Merge(left, right)
def Merge(left,right): # 两个有序列表合成一个有序列表
r, l=0, 0
res=[]
while l<len(left) and r<len(right):
if left[l] <= right[r]:
res.append(left[l])
l += 1
else:
res.append(right[r])
r += 1
res += list(left[l:])
res += list(right[r:])
return res
if __name__ == '__main__':
lists = [2, 3, 5, 7, 1, 4, 6, 15, 5, 2, 7, 9, 10, 15, 9, 17, 12]
print(MergeSort(lists))
|
[
"wcb2213@163.com"
] |
wcb2213@163.com
|
551e00715914982da405d9a73c65f21cb2aa1ea4
|
2b8d4e22d10ca118fba0100cc87af04f3939448f
|
/odoo app/dymax/module/modules/warehouse_stock_restrictions/__manifest__.py
|
4e1798a9a3ecd835fdcc84b185c9bd028aadcdd0
|
[] |
no_license
|
ahmed-amine-ellouze/personal
|
f10c0a161da709f689a3254ec20486411102a92d
|
4fe19ca76523cf274a3a85c8bcad653100ff556f
|
refs/heads/master
| 2023-03-28T23:17:05.402578
| 2021-03-25T13:33:18
| 2021-03-25T13:33:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
# -*- coding: utf-8 -*-
{
'name': "Warehouse Restrictions",
'summary': """
Warehouse and Stock Location Restriction on Users.""",
'description': """
This Module Restricts the User from Accessing Warehouse and Process Stock Moves other than allowed to Warehouses and Stock Locations.
""",
'author': "Techspawn Solutions",
'website': "http://www.techspawn.com",
'category': 'Warehouse',
'version': '14.0',
'images': ['static/description/WarehouseRestrictions.jpg'],
'depends': ['base', 'stock'],
'data': [
'security/ir.model.access.csv',
'security/security.xml',
'views/users_view.xml',
],
}
|
[
"hussnainsajid08@gmail.com"
] |
hussnainsajid08@gmail.com
|
41cbd95a1732dcc78afb0031cdcd749613a85d01
|
f7aa97fe19b431523f35dc5badc9e8ff919ffa00
|
/fss17/project/tools/Discretize.py
|
843b20424c0b5b1183c3b4e3a70057c7d79357e5
|
[
"Apache-2.0"
] |
permissive
|
rahlk/fss17
|
3b331427d450c5bb46b71b4aa5c77c59a8ec0a70
|
49e22c4ad01ff751f24c3e5702b7fa36a3a18e96
|
refs/heads/master
| 2021-01-19T18:03:13.364689
| 2017-12-12T12:51:28
| 2017-12-12T12:51:28
| 101,105,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,945
|
py
|
"""
An instance filter that discretizes a range of numeric attributes in the dataset into nominal attributes. Discretization is by Fayyad & Irani's MDL method (the default).
For more information, see:
Usama M. Fayyad, Keki B. Irani: Multi-interval discretization of continuous valued attributes for classification learning. In: Thirteenth International Joint Conference on Artificial Intelligence, 1022-1027, 1993.
Igor Kononenko: On Biases in Estimating Multi-Valued Attributes. In: 14th International Joint Conference on Articial Intelligence, 1034-1040, 1995.
Dougherty, James, Ron Kohavi, and Mehran Sahami. "Supervised and unsupervised discretization of continuous features." Machine learning: proceedings of the twelfth international conference. Vol. 12. 1995.
"""
from __future__ import division, print_function
from misc import *
import numpy as np
import pandas as pd
from pdb import set_trace
from collections import Counter
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier as CART
def fWeight(tbl):
"""
Sort features based on entropy
"""
clf = CART(criterion='entropy')
features = tbl.columns[:-1]
klass = tbl[tbl.columns[-1]]
try:
clf.fit(tbl[features], [k == True for k in klass])
lbs = clf.feature_importances_
except ValueError:
set_trace()
return [tbl.columns[i] for i in np.argsort(lbs)[::-1]]
def discretize(feature, klass, atleast=-1, discrete=False):
"""
Recursive Minimal Entropy Discretization
````````````````````````````````````````
Inputs:
feature: A list or a numpy array of continuous attributes
klass: A list, or a numpy array of discrete class labels.
atleast: minimum splits.
Outputs:
splits: A list containing suggested spilt locations
"""
def measure(x):
def ent(x):
C = Counter(x)
N = len(x)
return sum([-C[n] / N * np.log(C[n] / N) for n in C.keys()])
def stdev(x):
if np.isnan(np.var(x) ** 0.5):
return 0
return np.var(x) ** 0.5
if not discrete:
return ent(x)
else:
return stdev(x)
# Sort features and klass
feature, klass = sorted(feature), [k for (f, k) in
sorted(zip(feature, klass))]
splits = []
gain = []
lvl = 0
def redo(feature, klass, lvl):
if len(feature) > 0:
E = measure(klass)
N = len(klass)
T = [] # Record boundaries of splits
for k in xrange(len(feature)):
west, east = feature[:k], feature[k:]
k_w, k_e = klass[:k], klass[k:]
N_w, N_e = len(west), len(east)
T += [N_w / N * measure(k_w) + N_e / N * measure(k_e)]
T_min = np.argmin(T)
left, right = feature[:T_min], feature[T_min:]
k_l, k_r = klass[:T_min], klass[T_min:]
# set_trace()
def stop(k, k_l, k_r):
gain = E - T[T_min]
def count(lst): return len(Counter(lst).keys())
delta = np.log2(float(3 ** count(k) - 2)) - (
count(k) * measure(k) - count(k_l) * measure(k_l) - count(
k_r) * measure(k_r))
# print(gain, (np.log2(N-1)+delta)/N)
return gain < (np.log2(N - 1) + delta) / N or T_min == 0
if stop(klass, k_l, k_r) and lvl >= atleast:
if discrete:
splits.append(T_min)
else:
splits.append(feature[T_min])
else:
_ = redo(feature=left, klass=k_l, lvl=lvl + 1)
_ = redo(feature=right, klass=k_r, lvl=lvl + 1)
# ------ main ------
redo(feature, klass, lvl=0)
# set_trace()
return splits
def _test0():
"A Test Function"
test = np.random.normal(0, 10, 1000).tolist()
klass = [int(abs(i)) for i in np.random.normal(0, 1, 1000)]
splits = discretize(feature=test, klass=klass)
set_trace()
def _test1():
tbl_loc = explore(name='ant')[0]
tbl = csv2DF(tbl_loc)
new = discreteTbl(tbl)
set_trace()
def discreteTbl(tbl, B=0.33, Prune=True):
"""
Discretize a table
``````````````````
Columns 1 to N-1 represent the independent attributes, column N the dependent.
Parameters:
tbl - A Pandas data.dat Frame
B - Cutoff for Pruning Columns (float between 0,1)
Prune - Prune (True/False)
Returns:
Pandas data.dat Frame: Discretized table
"""
dtable = []
fweight = fWeight(tbl)
for i, name in enumerate(tbl.columns[:-1]):
new = []
feature = tbl[name].values
klass = tbl[tbl.columns[-1]].values
splits = discretize(feature, klass)
LO, HI = min(feature), max(feature)
cutoffs = sorted(list(set(splits + [LO, HI])))
def pairs(lst):
while len(lst) > 1:
yield (lst.pop(0), lst[0])
cutoffs = [t for t in pairs(sorted(list(set(splits + [LO, HI]))))]
for f in feature:
for n in cutoffs:
if n[0] <= f < n[1]:
new.append(n)
elif f == n[1] == HI:
new.append((n[0], HI))
dtable.append(new)
dtable.append(klass.tolist())
dtable = pd.DataFrame(dtable).T
dtable.columns = tbl.columns
ranks = fWeight(tbl)
if Prune:
return dtable[ranks[:int(len(ranks) * B)] + [tbl.columns[-1]]]
else:
return dtable[ranks + [tbl.columns[-1]]]
if __name__ == '__main__':
_test0()
pass
|
[
"i.m.ralk@gmail.com"
] |
i.m.ralk@gmail.com
|
f752ed117122b654d3db7de4b0b29d175e3d6732
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sets_20200605201123.py
|
da4da35d79893db365b73571b8ec063d8489a308
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
def Strings(str):
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(len(newArray)):
if newArray[j][0] in values:
values[newArray[j][0]] += int(newArray[j][1])
else:
values[newArray[j][0]] = int(newArray[j][1])
for k in values:
keys.append(k)
keys = sorted(keys)
# for i in keys:
# if i in values:
# answer = values[i]
print
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
561c07f563101185de123baff76553af01f9f150
|
d1e2f5993573a16ed6cf359215e596814db33ad7
|
/flaskm/db_respository/versions/003_migration.py
|
c26e106263a75f6e4f7112810b5f90ddb811e57f
|
[] |
no_license
|
Andor-Z/My-Learning-Note
|
a6b62fd10119cede9ba4c6c79b2dcb5c346d11e0
|
202401f1be1f9f7c32049623315c0c54720498f7
|
refs/heads/master
| 2022-10-22T13:55:44.821097
| 2016-07-10T09:21:02
| 2016-07-10T09:21:02
| 42,592,078
| 1
| 1
| null | 2022-10-20T21:49:08
| 2015-09-16T14:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
alembic_version = Table('alembic_version', pre_meta,
Column('version_num', VARCHAR(length=32), nullable=False),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=64)),
Column('location', String(length=64)),
Column('about_me', Text),
Column('member_since', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x000000000347CAE8>)),
Column('last_seen', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x0000000004CDF268>)),
Column('email', String(length=64)),
Column('username', String(length=64)),
Column('role_id', Integer),
Column('password_hash', String(length=128)),
Column('confirmed', Boolean, default=ColumnDefault(False)),
Column('avatar_hash', String(length=32)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].drop()
post_meta.tables['users'].columns['avatar_hash'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].create()
post_meta.tables['users'].columns['avatar_hash'].drop()
|
[
"zyfsta@outlook.com"
] |
zyfsta@outlook.com
|
33312e48a6fec52577cc1a2ee8867f5750e74dfe
|
1f410c8010877a56f4457535197dce856676b20b
|
/src/apps/datasets/migrations/0003_dataset_uuid.py
|
a0dbd6564812f765b4f6083fab8af3ea40c986b9
|
[
"MIT"
] |
permissive
|
ckcollab/brains
|
1484222312b1695081bc77d9d5ca4ee6e8ce7ad8
|
1f85462d3e4f25170b8c487a0ff4efb598bf1f2e
|
refs/heads/master
| 2021-05-30T13:42:30.628334
| 2015-12-30T01:20:11
| 2015-12-30T01:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-18 20:04
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('datasets', '0002_auto_20151217_1928'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
|
[
"eric@ckcollab.com"
] |
eric@ckcollab.com
|
a083a7f709dddbd60e57d7f87fa6d2c921a93153
|
b0c391ecf351e2317ac61c257dd6bfa5b10d4015
|
/pymotifs/motifs/info.py
|
8e50f9402510548d536cf1cc88526c18a5f68479
|
[] |
no_license
|
BGSU-RNA/RNA-3D-Hub-core
|
57db94bfff9b338b3a751f545699f4117150b921
|
1982e10a56885e56d79aac69365b9ff78c0e3d92
|
refs/heads/master
| 2023-05-26T09:41:38.397152
| 2023-05-23T05:50:10
| 2023-05-23T05:50:10
| 6,049,336
| 3
| 1
| null | 2022-06-21T21:27:52
| 2012-10-02T18:26:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
"""Load the motif info data.
This will load the cached data to store all motifs into the DB.
"""
from pymotifs import core
from pymotifs import models as mod
from pymotifs.motifs.utils import BaseLoader
from pymotifs.motifs.release import Loader as ReleaseLoader
class Loader(BaseLoader):
dependencies = set([ReleaseLoader])
@property
def table(self):
return mod.MlMotifsInfo
def motifs(self, cached):
data = []
for entry in cached['motifs']:
data.append(self.table(
motif_id=entry['motif_id'],
ml_release_id=cached['release'],
type=cached['loop_type'],
handle=entry['name']['handle'],
version=entry['name']['version'],
comment=entry['comment'],
))
return data
def data(self, pair, **kwargs):
loop_type, release = pair
cached = self.cached(loop_type)
if not cached:
raise core.InvalidState("No cached data")
return self.motifs(cached)
|
[
"blakes.85@gmail.com"
] |
blakes.85@gmail.com
|
07bafbf54361a1f49f8246f063abe7ea2f4ac270
|
386448448c23d0e4f6b72d37f7ca20caa1ecc207
|
/part 09 增加子弹/settings.py
|
93d23f33f61f96182c92c5aae5d24b66cb55ca40
|
[] |
no_license
|
typeme/pygame-demo
|
1299bd1b437f52234cf1c48a4ee3265811bbf4a5
|
875fabec70ae7aaa245f7fc1c35f2dee173df58e
|
refs/heads/master
| 2020-05-28T09:38:54.475818
| 2019-07-01T15:00:33
| 2019-07-01T15:00:33
| 188,958,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
import pygame as pg
vec = pg.math.Vector2
# 定义了一些颜色 (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (106, 55, 5)
# 游戏基本设定
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60 # 刷新率
TITLE = "part 09 Demo"
BGCOLOR = BROWN # 背景颜色
TILESIZE = 64 # 方格的尺寸
GRIDWIDTH = WIDTH / TILESIZE # 每行方格的数量
GRIDHEIGHT = HEIGHT / TILESIZE # 每列方格的数量
WALL_IMG = 'tileGreen_39.png'
# Player settings
PLAYER_SPEED = 300.0
PLAYER_ROT_SPEED = 250.0
PLAYER_IMG = 'manBlue_gun.png'
PLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)
BARREL_OFFSET = vec(30, 10)
# Gun settings
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 500
BULLET_LIFETIME = 1000
BULLET_RATE = 150
KICKBACK = 200
GUN_SPREAD = 5
# Mob settings
MOB_IMG = 'zombie1_hold.png'
MOB_SPEED = 150
MOB_HIT_RECT = pg.Rect(0, 0, 35, 35)
|
[
"typeme3@163.com"
] |
typeme3@163.com
|
75cc7c8d1fba46bcee40c74f4deab8796d53a56b
|
5b37c4bd44553a0ae29d14cde773a73fd6f091ef
|
/day16.py
|
0b8a71970dd0b9bdd6a4e7b5dd0869ff6515f8c7
|
[] |
no_license
|
Curiouspaul1/30daysofcode-Dsc
|
bf38cacc76d537a4722d7a87be2d6d8657c1ffd9
|
56735671732b22645d6e0dd87884a141c6ddb90b
|
refs/heads/master
| 2022-04-26T00:52:56.590578
| 2020-04-24T12:27:42
| 2020-04-24T12:27:42
| 250,358,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,047
|
py
|
from flask import Flask, request, jsonify,make_response
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
from flask_bcrypt import Bcrypt
from day10 import emailcheck
import os
# Database directory
basedir = os.getcwd()
app = Flask(__name__)
#app config
"""
This config clause specifies the database location. And disabes an option to
track changes in database to False (it's turned on by default). Sqlite comes
by default with flask so no need to worry
about installing any rdbms
"""
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(basedir,"app.sqlite") or os.getenv("DATABASE_URI")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
bcrypt = Bcrypt(app)
# Database Model
class User(db.Model):
"""
The user class represents an sql table. It's schema is outlined
below, as with the aid of an ORM (Sqlalchemy) or more precisely
flask-sqlalchemy (a wrapper built around the more generic sqlalchemy).
This allows me to write native python objects that translate to (more or less)
SQL tables.
"""
id = db.Column(db.Integer,primary_key=True,nullable=False)
username = db.Column(db.String(50),unique=True)
email = db.Column(db.String(100),unique=True) ## The unique property on email, disallows duplicate emails
password = db.Column(db.String(100))
# Signup Handler
@app.route('/signup',methods=['POST'])
def signup():
# fetch data
user_data = request.get_json()
# hash password
password_hash = bcrypt.generate_password_hash(user_data["password"])
# validate email using email checker from day10 (regex)
if emailcheck(user_data["email"]):
# checks to see if email doesn't already exists
try:
new_user = User(password=password_hash,email=user_data["email"])
db.session.add(new_user)
except IntegrityError:
return make_response("User with email already exists",406)
# checks also to see if username doesnt already exist
try:
new_user.username = user_data["username"]
db.session.commit()
except IntegrityError:
return make_response("User with username already exists",406)
else:
return make_response("Invalid Email",406)
return make_response("registration successful",200)
# Login/Auth Handler
@app.route('/login',methods=['POST'])
def login():
login_data = request.get_json()
# find user with username or email
user = User.query.filter_by(username=login_data["username"]).first() or User.query.filter_by(email=login_data["email"])
if user:
# fetch passowrd from database then compare
password_hash = user.password
if bcrypt.check_password_hash(password_hash,login_data["password"]):
return make_response("Signed in successfully", 200)
else:
return make_response("Wrong password",401)
else:
return make_response("No such user found",404)
|
[
"170408025@live.unilag.edu.ng"
] |
170408025@live.unilag.edu.ng
|
82136ba6add582586d0f7de5f1aebe36c5ef8f5c
|
2e2c9cf0bf1f6218f82e7ecddbec17da49756114
|
/day1python基础/__init__.py
|
b29e98abae8a3e380d7654fbeaf3546ede374470
|
[] |
no_license
|
guoyunfei0603/py31
|
c3cc946cd9efddb58dad0b51b72402a77e9d7592
|
734a049ecd84bfddc607ef852366eb5b7d16c6cb
|
refs/heads/master
| 2023-03-02T20:50:02.052878
| 2021-02-05T06:17:24
| 2021-02-05T06:17:24
| 279,454,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/24 10:18
# @Author : guoyunfei.0603
# @File : __init__.py.py
# s = "'abcd'"
# print(s[0:2]) #'a
# 三、 将字符串中的单词位置反转,“hello xiao mi” 转换为 “mi xiao hello”
# (提示:通过字符串分割,拼接,列表反序等知识点来实现)
s = "hello xiao mi"
s1 = s.split(' ')
t = s1[::-1] # 方式一
# print(t,type(t)) 是一个列表 ,最后还要拼接成字符串!!
new_str = ' '.join(t)
print(new_str,type(new_str))
# s1.reverse() # 方式二
# print(s1)
|
[
"1359239107@qq.com"
] |
1359239107@qq.com
|
4b7a7e5245954567017ea30f2e6e5b2a68d61c27
|
38c35956be6343855914b1c58b8fbd2e40c6e615
|
/AdHoc/1030.py
|
6cab25d7e55eca194d128a95ba59b1e53ae65c24
|
[] |
no_license
|
LucasBarbosaRocha/URI
|
b43e4f4a6b3beed935f24839001bea354411c4bd
|
2c9bcc13300a9f6243242e483c8f9ec3296a88ad
|
refs/heads/master
| 2020-06-25T05:06:51.297824
| 2019-08-22T04:50:11
| 2019-08-22T04:50:11
| 199,210,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
nc = int(input())
for i in range(nc):
entrada = input().split(" ")
n = int(entrada[0])
k = int(entrada[1])
lista = [1]*n
vivos = n
pos = 0
while vivos > 1:
j = 0
while j < k:
while (lista[pos] == -1):
pos = pos + 1
if (pos == n):
pos = 0
pos = pos + 1
if (pos == n):
pos = 0
j = j + 1
lista[pos - 1] = -1
vivos = vivos - 1
#print (lista)
print ("Case %d: %d" %((i+1), lista.index(max(lista)) + 1))
|
[
"lucas.lb.rocha@gmail.com"
] |
lucas.lb.rocha@gmail.com
|
0c6cb54ad19b2cdaa6b81ab6851c9972fa85bc7a
|
aee4c0839933a11d8ce3c485d06595202dd3cabd
|
/keras/layers/reshaping/cropping1d.py
|
2eb632e38d0ae45a148bb71d27c864c72c325578
|
[
"Apache-2.0"
] |
permissive
|
xiaoheilong3112/keras
|
fc3025a2f14838bf8416b2faed766cb43da62f9b
|
8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec
|
refs/heads/master
| 2023-08-07T18:23:36.804563
| 2023-07-25T19:16:12
| 2023-07-25T19:16:48
| 137,238,629
| 1
| 0
|
Apache-2.0
| 2023-07-26T05:22:44
| 2018-06-13T15:59:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras cropping layer for 1D input."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Cropping1D")
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super().__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(
cropping, 2, "cropping", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tf.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if (
inputs.shape[1] is not None
and sum(self.cropping) >= inputs.shape[1]
):
raise ValueError(
"cropping parameter of Cropping layer must be "
"greater than the input shape. Received: inputs.shape="
f"{inputs.shape}, and cropping={self.cropping}"
)
if self.cropping[1] == 0:
return inputs[:, self.cropping[0] :, :]
else:
return inputs[:, self.cropping[0] : -self.cropping[1], :]
def get_config(self):
config = {"cropping": self.cropping}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
b010f851ace9d560f4744da9777c12ef58ecc805
|
96a34a048c783a75736bf0ec775df22142f9ee53
|
/packages/service-library/src/servicelib/docker_utils.py
|
0a1e3c094b6d77ab5579293a2b2d6b49970d63c3
|
[
"MIT"
] |
permissive
|
ITISFoundation/osparc-simcore
|
77e5b9f7eb549c907f6ba2abb14862154cc7bb66
|
f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63
|
refs/heads/master
| 2023-08-31T17:39:48.466163
| 2023-08-31T15:03:56
| 2023-08-31T15:03:56
| 118,596,920
| 39
| 29
|
MIT
| 2023-09-14T20:23:09
| 2018-01-23T10:48:05
|
Python
|
UTF-8
|
Python
| false
| false
| 532
|
py
|
from datetime import datetime
import arrow
def to_datetime(docker_timestamp: str) -> datetime:
# docker follows RFC3339Nano timestamp which is based on ISO 8601
# https://medium.easyread.co/understanding-about-rfc-3339-for-datetime-formatting-in-software-engineering-940aa5d5f68a
# This is acceptable in ISO 8601 and RFC 3339 (with T)
# 2019-10-12T07:20:50.52Z
# This is only accepted in RFC 3339 (without T)
# 2019-10-12 07:20:50.52Z
dt: datetime = arrow.get(docker_timestamp).datetime
return dt
|
[
"noreply@github.com"
] |
ITISFoundation.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.