blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
732ccf2811be54afbd199a94e72658e129c6f81b
|
8e09c9562173cb40fe26912fcdb1d4c6c08897d7
|
/tfx/components/evaluator/component_test.py
|
52d28474308225045d848dd0c656642a98ec0934
|
[
"Apache-2.0"
] |
permissive
|
robertlugg/tfx
|
6a0050f6f1876ba5d53e45fd0d80acac2441187d
|
49778c502bb6668ed8230877407fe40ae3a99a06
|
refs/heads/master
| 2020-07-27T17:00:47.355938
| 2019-09-16T23:00:02
| 2019-09-16T23:00:32
| 209,164,014
| 0
| 0
|
Apache-2.0
| 2019-09-17T21:58:47
| 2019-09-17T21:58:46
| null |
UTF-8
|
Python
| false
| false
| 1,928
|
py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.evaluator.component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.components.evaluator import component
from tfx.proto import evaluator_pb2
from tfx.types import channel_utils
from tfx.types import standard_artifacts
class ComponentTest(tf.test.TestCase):
def testConstruct(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
def testConstructWithSliceSpec(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]),
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-team@google.com"
] |
tensorflow-extended-team@google.com
|
f55510e0cc367aad9ebfda9b2a6faa0435ae1473
|
2119953dd04916fa2adf3f42a487f3f9754d1f66
|
/modules/sandbox/docker/geo-web-viz/app.py
|
9034f8727870c8bdee5a64203363aecd3f7ec266
|
[
"MIT"
] |
permissive
|
sarahwertz/sepal
|
91d12e3317cd07ad4c99469d5b6211d74013b330
|
efbbc33ac99db332fc13f9dfd4c777a8d2c1b41e
|
refs/heads/master
| 2020-06-11T07:42:08.835556
| 2019-05-27T14:21:28
| 2019-05-27T14:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
import json
import logging
import traceback
import sys
from flask import Flask, Blueprint, request, Response
import config
import layers
import raster
import render
from config import to_file
app = Flask(__name__)
http = Blueprint(__name__, __name__)
session_state = {'layer_by_id': {}, 'index_by_id': {}, 'renderers': {}}
@http.errorhandler(Exception)
def handle_invalid_usage(error):
print(error)
print_stacktrace()
return "Internal Error", 500
@http.route('/layers', methods=['GET'])
def list_layers():
return json_response(layers.list_layers(state()))
@http.route('/layers/order', methods=['POST'])
def order_layers():
layers.reorder(json.loads(request.values['order']), state())
return json_response({'status': 'OK'})
@http.route('/raster/info', methods=['GET'])
def raster_info():
raster_file = to_file(request.values['path'])
return json_response(
{
'bandCount': raster.band_count(raster_file),
'nodata': raster.read_nodata(raster_file)
}
)
@http.route('/raster/band/<band_index>', methods=['GET'])
def band_info(band_index):
nodata = request.values.get('nodata', None)
if nodata:
nodata = float(nodata)
return json_response(
raster.band_info(
raster_file=to_file(request.values['path']),
band_index=int(band_index),
nodata=nodata)
)
@http.route('/raster/save', methods=['POST'])
def save_raster():
layer = json.loads(request.values['layer'])
bounds = layers.save_raster(layer, state())
return json_response({'bounds': bounds})
@http.route('/shape/save', methods=['POST'])
def save_shape():
layer = json.loads(request.values['layer'])
bounds = layers.save_shape(layer, state())
return json_response({'bounds': bounds})
@http.route('/layers/<layer_id>', methods=['DELETE'])
def remove_raster(layer_id):
layers.remove_layer(layer_id, state())
return json_response({'status': 'OK'})
@http.route('/layers/features/<lat>/<lng>')
def attributes(lat, lng):
return json_response(layers.features(float(lat), float(lng), state()))
@http.route('/layer/<layer_id>/<z>/<x>/<y>.<fmt>')
def render_tile(layer_id, z, x, y, fmt):
return Response(
render.render_tile(layer_id, int(z), int(x), int(y), str(fmt), renderers()),
mimetype=('image/%s' % fmt)
)
def state():
return session_state
def renderers():
return state().get('renderers', {})
def json_response(data):
return Response(json.dumps(data), mimetype='application/json')
def print_stacktrace():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.register_blueprint(http)
app.secret_key = config.session_key
app.run(
host='0.0.0.0',
port=config.server_port,
threaded=True,
debug=config.debug_mode
)
|
[
"daniel.wiell@fao.org"
] |
daniel.wiell@fao.org
|
efc55a073b926991fd43116f9fdd132aabaee02c
|
55a4573cdeb116b20a625a398af04337f180d598
|
/instrument/ifmessage.py
|
a4c5e4261d01cb8b46c95bd26d5bfe772ae5403e
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/omega-trh-daq
|
f506e4c968b7942dccb6cf012c377c3719a04143
|
98a18c62130af36d43c2882659e65321c3a98529
|
refs/heads/master
| 2020-04-02T11:10:18.632072
| 2019-07-23T15:33:39
| 2019-07-23T15:33:39
| 154,374,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 9 09:33:42 2018
@author: derek
"""
class InterfaceMessage():
imFirstIndex = 0
imLastIndex = 1
imAllIndex = 2
# interface STATES
imWaitingToConnect = 'WaitingToConnect'
imConnected = 'Connected'
imDisconnected = 'Disconnected'
imStopped = 'Stopped'
def __init__(self):
self.input_ready = False
self.input = []
self.output_ready = False
self.output = []
self.connection_status = False
self.state = self.imStopped
def add_input(self, msg):
self.input.append(msg)
self.input_ready = True
def has_input(self):
if (len(self.input) > 0):
return True
return False
def get_input(self, index=None, clear_buffer=False):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.input.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.input.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.input
else:
# throw exception?
pass
if (clear_buffer):
self.input = []
return msg
def add_output(self, msg):
self.output.append(msg)
self.output_ready = True
def has_output(self):
if (len(self.output) > 0):
return True
return False
def get_output(self, index=None, clear_buffer=True):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.output.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.output.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.output
else:
# throw exception?
pass
if (clear_buffer):
self.output = []
# print(self.output)
return msg
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
14867c67fd1d822563fe8ecb1841dce728a316df
|
1c801375ead766790f5c097081a1bbbc6a593a9e
|
/baseSpider/算法/随机获取1000此列表元素并统计次数.py
|
f9d47c387d183b937269c6fbd47b14e83dfe9a35
|
[] |
no_license
|
llf-1996/python3Spider
|
5803d1f42b660c7c2643bbc31f17126ac06e7ceb
|
4621db8c7383940f8e60754d6640406101141095
|
refs/heads/master
| 2023-06-01T04:31:27.555140
| 2020-12-13T09:38:19
| 2020-12-13T09:38:19
| 156,145,515
| 2
| 3
| null | 2023-05-23T00:12:59
| 2018-11-05T01:48:46
|
Python
|
UTF-8
|
Python
| false
| false
| 527
|
py
|
'''
随机获取一个字符串列表中的字符串,求获取一千次的情况下,各字符串被随机到的次数。
'''
__author__ = 'llf'
import random
from collections import Counter
c = Counter()
ll = ['a', 'b']
for i in range(1000):
a = random.choice(ll)
c[a] = c[a] + 1
print('结果:', type(c), dir(c), c)
'''
<class 'collections.Counter'>
[
'clear', 'copy', 'elements', 'fromkeys', 'get', 'items',
'keys', 'most_common', 'pop', 'popitem', 'setdefault',
'subtract', 'update', 'values'
]
'''
|
[
"2367746876@qq.com"
] |
2367746876@qq.com
|
2351627cba429794c787f1b8b52c0bf5472cd577
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_9/mchjos007/question2.py
|
95a03b09fd80edb8ce36c1da69e53ec942c2d03e
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
filein = open (input("Enter the input filename:\n"), "r")
lines = filein.readlines()
filein.close()
fileOut = open(input("Enter the output filename:\n"),"w")
width = eval(input("Enter the line width:\n"))
finalFormattedString=""
linecount= 0
currentlineinprogress = ""
for currentline in lines:
wordcount=0
linecount += 1
currentlinearray = currentline.split(" ")
if(currentline != "\n"):
for word in currentlinearray:
wordcount+=1
if linecount == len(lines) and wordcount == len(currentlinearray):
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n" + word
currentlineinprogress = ""
else:
finalFormattedString += currentlineinprogress +" " + word
else:
if word[-1] == "\n":
word = word[:-1]
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n"
currentlineinprogress = ""
if currentlineinprogress != "":
currentlineinprogress+= " "
currentlineinprogress += word
else:
finalFormattedString += currentlineinprogress + "\n\n"
currentlineinprogress = ""
print(finalFormattedString, file = fileOut)
fileOut.close()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
e25b350871e12d31f6b6bc62c04e5aba3c26130e
|
5db3009eb36afe7110ed5402be3a9e570c58c540
|
/my_plugins/YouCompleteMe/third_party/ycmd/third_party/jedi_deps/jedi/test/completion/docstring.py
|
2b9f3481cf5fd27532a2eb46fe7d83f487fbd3c2
|
[
"GPL-3.0-only",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
imfangli/vimrc
|
ced2c6caece1cf19421c6ea7deb017bec4ca3a27
|
d2d14e7d083d70cc8627ddccb5b99c53c3c38be3
|
refs/heads/master
| 2022-02-01T00:34:31.855421
| 2022-01-22T15:57:28
| 2022-01-22T15:57:28
| 211,766,038
| 2
| 0
|
MIT
| 2019-09-30T03:15:03
| 2019-09-30T03:15:02
| null |
UTF-8
|
Python
| false
| false
| 3,723
|
py
|
""" Test docstrings in functions and classes, which are used to infer types """
# -----------------
# sphinx style
# -----------------
def sphinxy(a, b, c, d, x):
""" asdfasdf
:param a: blablabla
:type a: str
:type b: (str, int)
:type c: random.Random
:type d: :class:`random.Random`
:param str x: blablabla
:rtype: dict
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? ['seed']
c.seed
#? ['seed']
d.seed
#? ['lower']
x.lower
#? dict()
sphinxy()
# wrong declarations
def sphinxy2(a, b, x, y, z):
"""
:param a: Forgot type declaration
:type a:
:param b: Just something
:type b: ``
:param x: Just something without type
:param y: A function
:type y: def l(): pass
:param z: A keyword
:type z: return
:rtype:
"""
#?
a
#?
b
#?
x
#?
y
#?
z
#?
sphinxy2()
def sphinxy_param_type_wrapped(a):
"""
:param str a:
Some description wrapped onto the next line with no space after the
colon.
"""
#? str()
a
# local classes -> github #370
class ProgramNode():
pass
def local_classes(node, node2):
"""
:type node: ProgramNode
... and the class definition after this func definition:
:type node2: ProgramNode2
"""
#? ProgramNode()
node
#? ProgramNode2()
node2
class ProgramNode2():
pass
def list_with_non_imports(lst):
"""
Should be able to work with tuples and lists and still import stuff.
:type lst: (random.Random, [collections.defaultdict, ...])
"""
#? ['seed']
lst[0].seed
import collections as col
# use some weird index
#? col.defaultdict()
lst[1][10]
def two_dots(a):
"""
:type a: json.decoder.JSONDecoder
"""
#? ['raw_decode']
a.raw_decode
# sphinx returns
def return_module_object():
"""
:rtype: :class:`random.Random`
"""
#? ['seed']
return_module_object().seed
# -----------------
# epydoc style
# -----------------
def epydoc(a, b):
""" asdfasdf
@type a: str
@param a: blablabla
@type b: (str, int)
@param b: blablah
@rtype: list
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? list()
epydoc()
# Returns with param type only
def rparam(a,b):
"""
@type a: str
"""
return a
#? str()
rparam()
# Composite types
def composite():
"""
@rtype: (str, int, dict)
"""
x, y, z = composite()
#? str()
x
#? int()
y
#? dict()
z
# Both docstring and calculated return type
def both():
"""
@rtype: str
"""
return 23
#? str() int()
both()
class Test(object):
def __init__(self):
self.teststr = ""
"""
# jedi issue #210
"""
def test(self):
#? ['teststr']
self.teststr
# -----------------
# statement docstrings
# -----------------
d = ''
""" bsdf """
#? str()
d.upper()
# -----------------
# class docstrings
# -----------------
class InInit():
def __init__(self, foo):
"""
:type foo: str
"""
#? str()
foo
class InClass():
"""
:type foo: str
"""
def __init__(self, foo):
#? str()
foo
class InBoth():
"""
:type foo: str
"""
def __init__(self, foo):
"""
:type foo: int
"""
#? str() int()
foo
def __init__(foo):
"""
:type foo: str
"""
#? str()
foo
# -----------------
# Renamed imports (#507)
# -----------------
import datetime
from datetime import datetime as datetime_imported
def import_issues(foo):
"""
@type foo: datetime_imported
"""
#? datetime.datetime()
foo
|
[
"fangli@zhiai2019.com"
] |
fangli@zhiai2019.com
|
b763c6f7ccf02fc091dbceba1f1aa1bff14ba011
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/8qD23E6XRMaWhyJ5z_9.py
|
4d2e12540b5fba5e913c81d0957e9d467412bb06
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
def happiness_number(s):
happy = 0
sad = 0
happy += s.count(":)")
happy += s.count("(:")
sad -= s.count(":(")
sad -= s.count("):")
return happy + sad
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ad7b0b5fccd2951a4d8e3d28056322b5a64c1f14
|
f9646f1a269b0108b174b68172424f19ea563da5
|
/lande/utilities/shell.py
|
b81a52fb5394cf59fcfe24f8cce4cf478e85e955
|
[] |
no_license
|
zimmerst/PhD-python
|
07a4ef2dd66e2bc9ac08861a04acbf934cb0ae49
|
21d24c0ae70925201b05f73c8044cc39639f8859
|
refs/heads/master
| 2020-12-26T04:56:27.165230
| 2014-01-27T00:55:17
| 2014-01-27T00:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
def format_command(*args, **kwargs):
r""" Create a string suitable for running a shell program command where
*args are the positional arguments for the command and
**kwargs are the keyword arguments for the script
For example:
>>> print format_command('ls','-al', '--author')
ls \
-al \
--author
>>> print format_command('gtlike', evfile='ft1.fits')
gtlike \
evfile=ft1.fits
If you need parameters with dashes, you can pass in a dictionary:
>>> print format_command('du', '-h', {'--max-depth':3})
du \
-h \
--max-depth=3
This function is not (yet) very robust, but does what I need.
"""
line_break = ' \\' # slash
tab=' '
sep = '\n'.join([line_break,tab])
args=list(args)
for i,v in enumerate(args):
if isinstance(v,dict):
kwargs.update(args.pop(i))
if args < 1: raise Exception("Command name must be passed into script")
return sep.join(map(str,args) + ['%s=%s' % (a,b) for a,b in kwargs.items()])
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"lande@37a9682d-6443-41a2-8582-b44379b6e86f"
] |
lande@37a9682d-6443-41a2-8582-b44379b6e86f
|
579528bb6dac8b7a786b56c7fa8aebcbc771d0bc
|
dd15b5ed1050bdd6de3d9a0ee0c448d2ccba09e0
|
/assets/python/mm_surface.py
|
f39a152bc61b12bf8992ad5c81b8cbbfa09dac2c
|
[] |
no_license
|
rblack42/nffs-2021-symposium
|
7f5c581fb46c23dd6896a37e0ac429b22d9de823
|
496696a43958fdf6ad5870b730675ed0b097e8cc
|
refs/heads/master
| 2023-02-24T02:16:01.579345
| 2021-01-27T21:47:15
| 2021-01-27T21:47:15
| 329,161,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import math
class Surface(object):
def __init__(self,
span, # with dihedral
chord, # root chord
camber, # root camber
tip_radius, # outer LE tip radius
center_span, # center section span (<= span)
tip_elevation # dihedral tip elevation
):
self.root_chord = chord
self.root_camber = camber * chord / 100
self.center_span = center_span
self.tip_radius = tip_radius
self.span = span
self.tip_elevation = tip_elevation
def radius(self, c, t):
return c**2/(8*t) + t/2
def arc_height(self, x_percent, chord, camber):
xr = x_percent * chord
rad = self.radius(chord, camber)
cx = chord/2
cy = -(rad-camber)
fact = math.sqrt(rad**2 - (xr - cx)**2)
xh = cy + fact
print(xr,xh, rad, camber, cx,cy,rad,fact)
return xh
def get_chord(self, y):
r = self.tip_radius
c = self.root_chord
yt = y - (self.span/2 - r)
print("->",y,r, yt)
if yt < 0:
return c
f = r**2 - yt**2
print("F:",f)
return c - r + math.sqrt(f)
def gen_mesh(self,nx, ny):
dx = 1.0/nx
dy = 1.0/ny
print(dx,dy)
for y in range(ny+1):
yr = y * dy * self.span/2 # 0-span
ch = self.get_chord(yr)
x0 = self.root_chord - ch;
for x in range(nx+1):
xr = x0 + x * dx * ch
print("(%3.2f,%3.2f)" % (xr,yr), end="")
print()
def run(self):
tip_span = (self.span - self.center_span)/2
self.dihedral_angle = \
math.atan2(self.tip_elevation, tip_span)
print (self.dihedral_angle * 180/math.pi)
self.gen_mesh(nx=5,ny=50)
if __name__ == "__main__":
s = Surface(18,5,6,2,10,1.75);
s.run()
|
[
"roie.black@gmail.com"
] |
roie.black@gmail.com
|
e54990b791469e8f9788843e62d9cbd5ba1586b7
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Py Box/Games/Connect4.py
|
f10c212bbe6d558099c3a54e5c383f2009f477de
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:1559de38727aac969fef6c397825e86b6e68b16dacaafbe4b2f54499954aaaa9
size 5271
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
fa751f128d9ce6cc8de27b5d0d8262f701ca0df7
|
1dc727f5b326dd984962efa4d982ed9fe036c8fc
|
/cmsplugin_hanz_card/cms_plugins.py
|
9342ebd45a4d621b861df6bbe5db794242c93700
|
[] |
no_license
|
hanztura/iamhanz
|
2a7380dfe5aa9f05d72fdc1d77d77c950692d30c
|
1aeee4c3404ed5048a48187e8b75f0e958c042ba
|
refs/heads/master
| 2021-08-30T22:51:53.916315
| 2017-12-19T18:13:44
| 2017-12-19T18:13:44
| 113,453,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_filer_image.cms_plugins import FilerImagePlugin
from .models import Card
from .forms import CardForm
@plugin_pool.register_plugin
class CardPlugin(FilerImagePlugin):
cache = False
form = CardForm
model = Card
name = _("Card Plugin")
render_template = "plugins/card_plugin.html"
fieldsets = (
(_('Card'), {
'fields': [
'style',
'card_title',
'caption_text',
'image',
'image_url',
'alt_text',
]
}),
(_('Image resizing options'), {
'fields': (
'use_original_image',
('width', 'height',),
('crop', 'upscale',),
'thumbnail_option',
'use_autoscale',
)
}),
(None, {
'fields': ('alignment',)
}),
(_('More'), {
'classes': ('collapse',),
'fields': (
'free_link',
'page_link',
'file_link',
('original_link', 'target_blank',),
'link_attributes',
'description',
),
}),
)
|
[
"hctura.official@gmail.com"
] |
hctura.official@gmail.com
|
0af95378e0e392f99cf06587dc97eef7e8859d13
|
ef2ea1152afc07e1341abdc99b037f2c803a0a68
|
/test_cnn.py
|
6de00fafda4942ffd6bbc0f62aafb20aaa792164
|
[
"Apache-2.0"
] |
permissive
|
Diriba-Getch/CNN-Multi-Label-Text-Classificati2on
|
484a82ed66e7266fb565ebe834e2c7842d1d2f91
|
0792c0f244b8190e097da42e8719c8bb03573e14
|
refs/heads/master
| 2023-05-14T16:22:32.973452
| 2021-05-27T14:47:21
| 2021-05-27T14:47:21
| 362,522,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,167
|
py
|
# -*- coding:utf-8 -*-
import os
import time
import numpy as np
import tensorflow as tf
import data_helpers
# Parameters
# ==================================================
logger = data_helpers.logger_fn('tflog', 'test-{}.log'.format(time.asctime()))
MODEL = input("☛ Please input the model file you want to test, it should be like(1490175368): ")
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input('✘ The format of your input is illegal, it should be like(1490175368), please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = input("☛ Use Class Bind or Not?(Y/N) \n")
while not (CLASS_BIND.isalpha() and CLASS_BIND.upper() in ['Y', 'N']):
CLASS_BIND = input('✘ The format of your input is illegal, please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = CLASS_BIND.upper()
TRAININGSET_DIR = 'Train.json'
VALIDATIONSET_DIR = 'Validation.json'
TESTSET_DIR = 'Test.json'
MODEL_DIR = 'runs/' + MODEL + '/checkpoints/'
SAVE_FILE = 'predictions.txt'
# Data loading params
tf.flags.DEFINE_string("training_data_file", TRAININGSET_DIR, "Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file", VALIDATIONSET_DIR, "Data source for the validation data")
tf.flags.DEFINE_string("test_data_file", TESTSET_DIR, "Data source for the test data")
tf.flags.DEFINE_string("checkpoint_dir", MODEL_DIR, "Checkpoint directory from training run")
tf.flags.DEFINE_string("use_classbind_or_not", CLASS_BIND, "Use the class bind info or not.")
# Model Hyperparameters
tf.flags.DEFINE_integer("pad_seq_len", 150, "Recommand padding Sequence length of data (depends on the data)")
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("embedding_type", 1, "The embedding type (default: 1)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_classes", 367, "Number of labels (depends on the task)")
tf.flags.DEFINE_integer("top_num", 2, "Number of top K prediction classess (default: 3)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth", True, "Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
dilim = '-' * 100
logger.info('\n'.join([dilim, *['{:>50}|{:<50}'.format(attr.upper(), value)
for attr, value in sorted(FLAGS.__flags.items())], dilim]))
def test_cnn():
"""Test CNN model."""
# Load data
logger.info("✔ Loading data...")
logger.info('Recommand padding Sequence length is: {}'.format(FLAGS.pad_seq_len))
logger.info('✔︎ Test data processing...')
test_data = data_helpers.load_data_and_labels(FLAGS.test_data_file, FLAGS.num_classes, FLAGS.embedding_dim)
logger.info('✔︎ Test data padding...')
x_test, y_test = data_helpers.pad_data(test_data, FLAGS.pad_seq_len)
y_test_bind = test_data.labels_bind
# Build vocabulary
VOCAB_SIZE = data_helpers.load_vocab_size(FLAGS.embedding_dim)
pretrained_word2vec_matrix = data_helpers.load_word2vec_matrix(VOCAB_SIZE, FLAGS.embedding_dim)
# Load cnn model
logger.info("✔ Loading model...")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# pre-trained_word2vec
pretrained_embedding = graph.get_operation_by_name("embedding/W").outputs[0]
# Tensors we want to evaluate
logits = graph.get_operation_by_name("output/logits").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(zip(x_test, y_test, y_test_bind)),
FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predicitons = []
eval_loss, eval_rec, eval_acc, eval_counter = 0.0, 0.0, 0.0, 0
for batch_test in batches:
x_batch_test, y_batch_test, y_batch_test_bind = zip(*batch_test)
feed_dict = {
input_x: x_batch_test,
dropout_keep_prob: 1.0
}
batch_logits = sess.run(logits, feed_dict)
if FLAGS.use_classbind_or_not == 'Y':
predicted_labels = data_helpers.get_label_using_logits_and_classbind(
batch_logits, y_batch_test_bind, top_number=FLAGS.top_num)
if FLAGS.use_classbind_or_not == 'N':
predicted_labels = data_helpers.get_label_using_logits(batch_logits, top_number=FLAGS.top_num)
all_predicitons = np.append(all_predicitons, predicted_labels)
cur_rec, cur_acc = 0.0, 0.0
for index, predicted_label in enumerate(predicted_labels):
rec_inc, acc_inc = data_helpers.cal_rec_and_acc(predicted_label, y_batch_test[index])
cur_rec, cur_acc = cur_rec + rec_inc, cur_acc + acc_inc
cur_rec = cur_rec / len(y_batch_test)
cur_acc = cur_acc / len(y_batch_test)
eval_rec, eval_acc, eval_counter = eval_rec + cur_rec, eval_acc + cur_acc, eval_counter + 1
logger.info("✔︎ validation batch {} finished.".format(eval_counter))
eval_rec = float(eval_rec / eval_counter)
eval_acc = float(eval_acc / eval_counter)
logger.info("☛ Recall {:g}, Accuracy {:g}".format(eval_rec, eval_acc))
np.savetxt(SAVE_FILE, list(zip(all_predicitons)), fmt='%s')
logger.info("✔ Done.")
if __name__ == '__main__':
test_cnn()
|
[
"chinawolfman@hotmail.com"
] |
chinawolfman@hotmail.com
|
20bb0ef25901482c47de8542f21e7e78fb02f09f
|
614cad3588af9c0e51e0bb98963075e3195e92f5
|
/models/vote_net/backbone_module.py
|
674167186b7eb5fdbacd2d4702c1c38abea4bcc9
|
[] |
no_license
|
dragonlong/haoi-pose
|
2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58
|
43388efd911feecde588b27a753de353b8e28265
|
refs/heads/master
| 2023-07-01T14:18:29.029484
| 2021-08-10T10:57:42
| 2021-08-10T10:57:42
| 294,602,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,862
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, '../models/pointnet2'))
from pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
class Pointnet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, input_feature_dim=0):
super().__init__()
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
batch_size = pointcloud.shape[0]
xyz, features = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz, features)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
end_points['fp2_features'] = features
end_points['fp2_xyz'] = end_points['sa2_xyz']
num_seed = end_points['fp2_xyz'].shape[1]
end_points['fp2_inds'] = end_points['sa1_inds'][:,0:num_seed] # indices among the entire input point clouds
return end_points
if __name__=='__main__':
backbone_net = Pointnet2Backbone(input_feature_dim=0).cuda()
print(backbone_net)
backbone_net.eval()
out = backbone_net(torch.rand(16,20000,3).cuda())
for key in sorted(out.keys()):
print(key, '\t', out[key].shape)
|
[
"lxiaol9@vt.edu"
] |
lxiaol9@vt.edu
|
d0c04d8d6b0caebcc5131c6d7c9185c6da08fb8a
|
b7ebcfa8429948745dbd9fb11f6d13c6905e9aa1
|
/lib/panda/_obj.py
|
fd7b9191f276e967f2b4dc2a6fbb176e63be53ec
|
[] |
no_license
|
SiewYan/PandaTree
|
c00c83a92044b59d460dd2d9a4319eef9f777045
|
5d2da2dc5d419c498a3a14870197aad360d6b071
|
refs/heads/master
| 2020-12-30T12:35:36.718617
| 2018-02-01T16:25:54
| 2018-02-01T16:25:54
| 91,395,990
| 0
| 1
| null | 2017-05-16T00:16:27
| 2017-05-16T00:16:27
| null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
from base import Definition
from oneliner import Include
from constexpr import Constant, Enum
from refbranch import RefBranch
from refvbranch import RefVectorBranch
from generic import GenericBranch
from objbranch import ObjBranch
from branch import Branch
from reference import Reference
from function import Function
from obj import Object
def __init__(self, name, source):
"""
Constructor called either by PhysicsObject or Tree.
Parse the source text block and collect all information on this object.
"""
self.name = name
self.includes = []
self.constants = []
self.enums = []
self.objbranches = []
self.branches = []
self.references = []
self.functions = []
while True:
line = source.readline()
line = line.strip()
if line == '':
break
try:
self.includes.append(Include(line))
continue
except Definition.NoMatch:
pass
try:
self.enums.append(Enum(line, source))
continue
except Definition.NoMatch:
pass
try:
self.constants.append(Constant(line, source))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefVectorBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.objbranches.append(ObjBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(Branch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(GenericBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.references.append(Reference(line))
continue
except Definition.NoMatch:
pass
try:
self.functions.append(Function(line, source))
continue
except Definition.NoMatch:
pass
break
Object.__init__ = __init__
|
[
"yiiyama@mit.edu"
] |
yiiyama@mit.edu
|
31d9a115cbd2a43f5ea11e98d4b3a4cde1224566
|
6bdb32ddbd72c4337dab12002ff05d6966538448
|
/gridpack_folder/mc_request/LHEProducer/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500_13TeV-madgraph_cff.py
|
55b8ce6b6aafef4854d46112ee4cdb202e8e7861
|
[] |
no_license
|
cyrilbecot/DibosonBSMSignal_13TeV
|
71db480de274c893ba41453025d01bfafa19e340
|
d8e685c40b16cde68d25fef9af257c90bee635ba
|
refs/heads/master
| 2021-01-11T10:17:05.447035
| 2016-08-17T13:32:12
| 2016-08-17T13:32:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/b9fddd83b7d8e490347744408902940547e8135f/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/narrow/v1/Radion_ZZ_ZlepZhad_narrow_M2500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
|
[
"sudha.ahuja@cern.ch"
] |
sudha.ahuja@cern.ch
|
d3fc31344ad05d1cccd859ad51a3d6332059f748
|
8b7559f7b69173109d7b6e89ab912dbb8b675c3f
|
/main/tests/test_models.py
|
104c45dcd9bc75e5d3b2024147d13fa149a12099
|
[] |
no_license
|
GoodnessEzeokafor/django-bookstore
|
7859b74ad0bddd32415b6bd917d37c008ba38a73
|
dc47e7fe201cf2a62a93c30730fa1e72a6707f93
|
refs/heads/master
| 2023-02-14T14:13:58.941227
| 2021-01-08T10:14:29
| 2021-01-08T10:14:29
| 327,135,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_works(self):
models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.0")
)
models.Product.objects.create(
name="Pride and Prejudice",
price=Decimal("2.00")
)
models.Product.objects.create(
name="A Tale of Two Cities",
price = Decimal("2.00"),
active=False
)
self.assertEqual(len(models.Product.objects.active()), 2)
|
[
"gootech442@yahoo.com"
] |
gootech442@yahoo.com
|
65da8f31eec34e35df36db0edc77988d9760b5bb
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_plat_product_get_list_request.py
|
f8948ba21e7402d29e9aed6e09e6cc8e9cb8dcca
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 738
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.platproduct.model.plat_product_get_list_request import PlatProductGetListRequest
class TestPlatProductGetListRequest(unittest.TestCase):
"""PlatProductGetListRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPlatProductGetListRequest(self):
"""Test PlatProductGetListRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = PlatProductGetListRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"tokimekiyxp@foxmail.com"
] |
tokimekiyxp@foxmail.com
|
456a38ad9b87e1b826c521e146df928c90163e88
|
0fbd56d4a2ee512cb47f557bea310618249a3d2e
|
/official/vision/beta/modeling/layers/roi_sampler.py
|
46b4c349839f207291fc2ca42a601d9eaabce92c
|
[
"Apache-2.0"
] |
permissive
|
joppemassant/models
|
9968f74f5c48096f3b2a65e6864f84c0181465bb
|
b2a6712cbe6eb9a8639f01906e187fa265f3f48e
|
refs/heads/master
| 2022-12-10T01:29:31.653430
| 2020-09-11T11:26:59
| 2020-09-11T11:26:59
| 294,675,920
| 1
| 1
|
Apache-2.0
| 2020-09-11T11:21:51
| 2020-09-11T11:21:51
| null |
UTF-8
|
Python
| false
| false
| 5,978
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ROI sampler."""
# Import libraries
import tensorflow as tf
from official.vision.beta.modeling.layers import box_matcher
from official.vision.beta.modeling.layers import box_sampler
from official.vision.beta.ops import box_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class ROISampler(tf.keras.layers.Layer):
"""Sample ROIs and assign targets to the sampled ROIs."""
def __init__(self,
mix_gt_boxes=True,
num_sampled_rois=512,
foreground_fraction=0.25,
foreground_iou_threshold=0.5,
background_iou_high_threshold=0.5,
background_iou_low_threshold=0,
**kwargs):
"""Initializes a ROI sampler.
Args:
mix_gt_boxes: bool, whether to mix the groundtruth boxes with proposed
ROIs.
num_sampled_rois: int, the number of sampled ROIs per image.
foreground_fraction: float in [0, 1], what percentage of proposed ROIs
should be sampled from the foreground boxes.
foreground_iou_threshold: float, represent the IoU threshold for a box to
be considered as positive (if >= `foreground_iou_threshold`).
background_iou_high_threshold: float, represent the IoU threshold for a
box to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`]).
background_iou_low_threshold: float, represent the IoU threshold for a box
to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`])
**kwargs: other key word arguments passed to Layer.
"""
self._config_dict = {
'mix_gt_boxes': mix_gt_boxes,
'num_sampled_rois': num_sampled_rois,
'foreground_fraction': foreground_fraction,
'foreground_iou_threshold': foreground_iou_threshold,
'background_iou_high_threshold': background_iou_high_threshold,
'background_iou_low_threshold': background_iou_low_threshold,
}
self._matcher = box_matcher.BoxMatcher(
foreground_iou_threshold,
background_iou_high_threshold,
background_iou_low_threshold)
self._sampler = box_sampler.BoxSampler(
num_sampled_rois, foreground_fraction)
super(ROISampler, self).__init__(**kwargs)
def call(self, boxes, gt_boxes, gt_classes):
"""Assigns the proposals with groundtruth classes and performs subsmpling.
Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the
following algorithm to generate the final `num_samples_per_image` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposed box with a groundtruth class and box by choosing
the largest IoU overlap.
3. Samples `num_samples_per_image` boxes from all proposed boxes, and
returns box_targets, class_targets, and RoIs.
Args:
boxes: a tensor of shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the
invalid box coordinates.
gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with values of -1 indicating the invalid
classes.
Returns:
sampled_rois: a tensor of shape of [batch_size, K, 4], representing the
coordinates of the sampled RoIs, where K is the number of the sampled
RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the
box coordinates of the matched groundtruth boxes of the samples RoIs.
sampled_gt_classes: a tensor of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_indices: a tensor of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`
tensor, i.e.
gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
if self._config_dict['mix_gt_boxes']:
gt_boxes = tf.cast(gt_boxes, dtype=boxes.dtype)
boxes = tf.concat([boxes, gt_boxes], axis=1)
(matched_gt_boxes, matched_gt_classes, matched_gt_indices,
positive_matches, negative_matches, ignored_matches) = (
self._matcher(boxes, gt_boxes, gt_classes))
sampled_indices = self._sampler(
positive_matches, negative_matches, ignored_matches)
sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_indices = (
box_ops.gather_instances(
sampled_indices,
boxes,
matched_gt_boxes,
matched_gt_classes,
matched_gt_indices))
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_indices)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
1dba441eba9e895c8b00e03309a0bcd68e736e31
|
d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3
|
/chromium/mojo/public/tools/bindings/pylib/mojom/generate/test_support.py
|
eb394619d2bf4855522d7157dff0d13e87c59850
|
[
"BSD-3-Clause"
] |
permissive
|
Csineneo/Vivaldi
|
4eaad20fc0ff306ca60b400cd5fad930a9082087
|
d92465f71fb8e4345e27bd889532339204b26f1e
|
refs/heads/master
| 2022-11-23T17:11:50.714160
| 2019-05-25T11:45:11
| 2019-05-25T11:45:11
| 144,489,531
| 5
| 4
|
BSD-3-Clause
| 2022-11-04T05:55:33
| 2018-08-12T18:04:37
| null |
UTF-8
|
Python
| false
| false
| 6,092
|
py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import traceback
import module as mojom
# Support for writing mojom test cases.
# RunTest(fn) will execute fn, catching any exceptions. fn should return
# the number of errors that are encountered.
#
# EXPECT_EQ(a, b) and EXPECT_TRUE(b) will print error information if the
# expectations are not true and return a non zero value. This allows test cases
# to be written like this
#
# def Foo():
# errors = 0
# errors += EXPECT_EQ('test', test())
# ...
# return errors
#
# RunTest(foo)
def FieldsAreEqual(field1, field2):
if field1 == field2:
return True
return field1.name == field2.name and \
KindsAreEqual(field1.kind, field2.kind) and \
field1.ordinal == field2.ordinal and \
field1.default == field2.default
def KindsAreEqual(kind1, kind2):
if kind1 == kind2:
return True
if kind1.__class__ != kind2.__class__ or kind1.spec != kind2.spec:
return False
if kind1.__class__ == mojom.Kind:
return kind1.spec == kind2.spec
if kind1.__class__ == mojom.Struct:
if kind1.name != kind2.name or \
kind1.spec != kind2.spec or \
len(kind1.fields) != len(kind2.fields):
return False
for i in range(len(kind1.fields)):
if not FieldsAreEqual(kind1.fields[i], kind2.fields[i]):
return False
return True
if kind1.__class__ == mojom.Array:
return KindsAreEqual(kind1.kind, kind2.kind)
print 'Unknown Kind class: ', kind1.__class__.__name__
return False
def ParametersAreEqual(parameter1, parameter2):
if parameter1 == parameter2:
return True
return parameter1.name == parameter2.name and \
parameter1.ordinal == parameter2.ordinal and \
parameter1.default == parameter2.default and \
KindsAreEqual(parameter1.kind, parameter2.kind)
def MethodsAreEqual(method1, method2):
if method1 == method2:
return True
if method1.name != method2.name or \
method1.ordinal != method2.ordinal or \
len(method1.parameters) != len(method2.parameters):
return False
for i in range(len(method1.parameters)):
if not ParametersAreEqual(method1.parameters[i], method2.parameters[i]):
return False
return True
def InterfacesAreEqual(interface1, interface2):
if interface1 == interface2:
return True
if interface1.name != interface2.name or \
len(interface1.methods) != len(interface2.methods):
return False
for i in range(len(interface1.methods)):
if not MethodsAreEqual(interface1.methods[i], interface2.methods[i]):
return False
return True
def ModulesAreEqual(module1, module2):
if module1 == module2:
return True
if module1.name != module2.name or \
module1.namespace != module2.namespace or \
len(module1.structs) != len(module2.structs) or \
len(module1.interfaces) != len(module2.interfaces):
return False
for i in range(len(module1.structs)):
if not KindsAreEqual(module1.structs[i], module2.structs[i]):
return False
for i in range(len(module1.interfaces)):
if not InterfacesAreEqual(module1.interfaces[i], module2.interfaces[i]):
return False
return True
# Builds and returns a Module suitable for testing/
def BuildTestModule():
module = mojom.Module('test', 'testspace')
struct = module.AddStruct('teststruct')
struct.AddField('testfield1', mojom.INT32)
struct.AddField('testfield2', mojom.Array(mojom.INT32), 42)
interface = module.AddInterface('Server')
method = interface.AddMethod('Foo', 42)
method.AddParameter('foo', mojom.INT32)
method.AddParameter('bar', mojom.Array(struct))
return module
# Tests if |module| is as built by BuildTestModule(). Returns the number of
# errors
def TestTestModule(module):
errors = 0
errors += EXPECT_EQ('test', module.name)
errors += EXPECT_EQ('testspace', module.namespace)
errors += EXPECT_EQ(1, len(module.structs))
errors += EXPECT_EQ('teststruct', module.structs[0].name)
errors += EXPECT_EQ(2, len(module.structs[0].fields))
errors += EXPECT_EQ('testfield1', module.structs[0].fields[0].name)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[0].kind)
errors += EXPECT_EQ('testfield2', module.structs[0].fields[1].name)
errors += EXPECT_EQ(mojom.Array, module.structs[0].fields[1].kind.__class__)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[1].kind.kind)
errors += EXPECT_EQ(1, len(module.interfaces))
errors += EXPECT_EQ('Server', module.interfaces[0].name)
errors += EXPECT_EQ(1, len(module.interfaces[0].methods))
errors += EXPECT_EQ('Foo', module.interfaces[0].methods[0].name)
errors += EXPECT_EQ(2, len(module.interfaces[0].methods[0].parameters))
errors += EXPECT_EQ('foo', module.interfaces[0].methods[0].parameters[0].name)
errors += EXPECT_EQ(mojom.INT32,
module.interfaces[0].methods[0].parameters[0].kind)
errors += EXPECT_EQ('bar', module.interfaces[0].methods[0].parameters[1].name)
errors += EXPECT_EQ(
mojom.Array,
module.interfaces[0].methods[0].parameters[1].kind.__class__)
errors += EXPECT_EQ(
module.structs[0],
module.interfaces[0].methods[0].parameters[1].kind.kind)
return errors
def PrintFailure(string):
stack = traceback.extract_stack()
frame = stack[len(stack)-3]
sys.stderr.write("ERROR at %s:%d, %s\n" % (frame[0], frame[1], string))
print "Traceback:"
for line in traceback.format_list(stack[:len(stack)-2]):
sys.stderr.write(line)
def EXPECT_EQ(a, b):
if a != b:
PrintFailure("%s != %s" % (a, b))
return 1
return 0
def EXPECT_TRUE(a):
if not a:
PrintFailure('Expecting True')
return 1
return 0
def RunTest(fn):
sys.stdout.write('Running %s...' % fn.__name__)
try:
errors = fn()
except:
traceback.print_exc(sys.stderr)
errors = 1
if errors == 0:
sys.stdout.write('OK\n')
elif errors == 1:
sys.stdout.write('1 ERROR\n')
else:
sys.stdout.write('%d ERRORS\n' % errors)
return errors
|
[
"csineneo@gmail.com"
] |
csineneo@gmail.com
|
d8f8388ccf0bde786d3c4b612af5b9f908999b36
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/errors/types/keyword_plan_idea_error.py
|
7317f5950bfe3158aa862fb8e1d10e4d1711708a
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.errors',
marshal='google.ads.googleads.v5',
manifest={
'KeywordPlanIdeaErrorEnum',
},
)
class KeywordPlanIdeaErrorEnum(proto.Message):
r"""Container for enum describing possible errors from
KeywordPlanIdeaService.
"""
class KeywordPlanIdeaError(proto.Enum):
r"""Enum describing possible errors from KeywordPlanIdeaService."""
UNSPECIFIED = 0
UNKNOWN = 1
URL_CRAWL_ERROR = 2
INVALID_VALUE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
3123b2e166e0c4d6732e9496e51de271ea7f14b1
|
512f48fdcfa78e322526cf47163110009b84bf73
|
/rapid7vmconsole/models/privileges.py
|
62da3458bc049d084294218fcc09b1e20475b6aa
|
[
"MIT"
] |
permissive
|
confluentinc/vm-console-client-python
|
9a0f540c0113acf68ee9dc914715bc255e4d99f4
|
ccbd944a0e0333c73e098b769fe4c82755d29874
|
refs/heads/master
| 2023-07-18T10:33:58.909287
| 2021-09-02T20:52:20
| 2021-09-02T20:52:20
| 402,559,283
| 0
| 0
|
MIT
| 2021-09-02T20:49:56
| 2021-09-02T20:49:56
| null |
UTF-8
|
Python
| false
| false
| 4,944
|
py
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Privileges(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[str]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""Privileges - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this Privileges. # noqa: E501
:return: The links of this Privileges. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Privileges.
:param links: The links of this Privileges. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this Privileges. # noqa: E501
:return: The resources of this Privileges. # noqa: E501
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this Privileges.
:param resources: The resources of this Privileges. # noqa: E501
:type: list[str]
"""
allowed_values = ["all-permissions", "create-reports", "configure-global-settings", "manage-sites", "manage-tags", "manage-static-asset-groups", "manage-dynamic-asset-groups", "manage-scan-templates", "manage-report-templates", "manage-scan-engines", "submit-vulnerability-exceptions", "approve-vulnerability-exceptions", "delete-vulnerability-exceptions", "manage-vuln-investigations", "view-vuln-investigations", "create-tickets", "close-tickets", "assign-ticket-assignee", "manage-site-access", "manage-asset-group-access", "manage-report-access", "use-restricted-report-sections", "manage-policies", "view-asset-group-asset-data", "manage-asset-group-assets", "view-site-asset-data", "specify-site-metadata", "purge-site-asset-data", "specify-scan-targets", "assign-scan-engine", "assign-scan-template", "manage-site-credentials", "manage-scan-alerts", "schedule-automatic-scans", "start-unscheduled-scans"] # noqa: E501
if not set(resources).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `resources` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(resources) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Privileges, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Privileges):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"zachary_youtz@rapid7.com"
] |
zachary_youtz@rapid7.com
|
8c462a9504616211d1a864ac6f1a00d0a2cba936
|
b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e
|
/pypos/pypos-000/pypos.py
|
1d36f8dacb776baa51da76f15440425f8f40a5f8
|
[] |
no_license
|
pglen/pgpygtk
|
4d1405478a714f003984cf3e3db04ff1f767470b
|
33f58010e304f1a312f2356de453ecedb7aa21ef
|
refs/heads/master
| 2021-01-22T01:18:52.238415
| 2019-01-01T01:37:24
| 2019-01-01T01:37:24
| 102,215,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,615
|
py
|
#!/usr/bin/env python
import os, sys, getopt, signal
import gobject, gtk, pango
# ------------------------------------------------------------------------
# This is open source sticker program. Written in python.
GAP = 4 # Gap in pixels
TABSTOP = 4
FGCOLOR = "#000000"
BGCOLOR = "#ffff88"
version = 1.0
verbose = False
# Where things are stored (backups, orgs, macros)
config_dir = os.path.expanduser("~/.pypos")
def OnExit(win):
gtk.main_quit()
def help():
print
print "Pypos version: ", version
print
print "Usage: " + os.path.basename(sys.argv[0]) + " [options] [[filename] ... [filenameN]]"
print
print "Options:"
print
print " -d level - Debug level 1-10. (Limited implementation)"
print " -v - Verbose (to stdout and log)"
print " -c - Dump Config"
print " -h - Help"
print
def area_motion(self, area, event):
print "window motion event", event.state, event.x, event.y
if event.state & gtk.gdk.BUTTON1_MASK:
print "drag"
# Start of program:
if __name__ == '__main__':
try:
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
except: pass
# Let the user know it needs fixin'
if not os.path.isdir(config_dir):
print "Cannot access config dir:", config_dir
sys.exit(1)
opts = []; args = []
try:
opts, args = getopt.getopt(sys.argv[1:], "hv")
except getopt.GetoptError, err:
print "Invalid option(s) on command line:", err
sys.exit(1)
#print "opts", opts, "args", args
for aa in opts:
if aa[0] == "-d":
try:
pgdebug = int(aa[1])
except:
pgdebug = 0
if aa[0] == "-h": help(); exit(1)
if aa[0] == "-v": verbose = True
#if aa[0] == "-x": clear_config = True
#if aa[0] == "-c": show_config = True
#if aa[0] == "-t": show_timing = True
if verbose:
print "PyPos running on", "'" + os.name + "'", \
"GTK", gtk.gtk_version, "PyGtk", gtk.pygtk_version
www = gtk.gdk.screen_width(); hhh = gtk.gdk.screen_height();
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#window.set_decorated(False)
window.set_position(gtk.WIN_POS_CENTER)
window.set_default_size(3*www/4, 3*hhh/4)
window.set_flags(gtk.CAN_FOCUS | gtk.SENSITIVE)
window.connect("destroy", OnExit)
window.show_all()
gtk.main()
|
[
"peterglen99@gmail.com"
] |
peterglen99@gmail.com
|
e9e3cda5266717a5660707d3e5cbb04a54cdf11c
|
34a7e30c3ceafb06c9a21c59c88c3ea5a6e91388
|
/python/datagen/addPriority.py
|
dbbff881d7bd7fd56ded7dd0a280ef0ad32f27fd
|
[] |
no_license
|
DinoBektesevic/DinoBektesevic.github.io
|
91643f54411d214e7552e9ef2e1e0fbece5fb841
|
be8cc8b3b2b58cbc1517593377228ff541fd515c
|
refs/heads/main
| 2023-05-29T22:39:23.801299
| 2021-06-10T02:55:12
| 2021-06-10T02:55:12
| 364,038,461
| 0
| 0
| null | 2021-05-10T20:30:01
| 2021-05-03T19:27:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
import glob
import pandas as pd
import numpy
import seaborn as sns
import matplotlib.pyplot as plt
# filename = "mjd-59662-sdss-simple-expanded.csv"
allfiles = glob.glob("testDir/mjd*-simple-expanded.csv")
for filename in allfiles:
df = pd.read_csv(filename, index_col=0)
newfilename = filename.strip(".csv") + "-priority.csv"
priority = numpy.array([-1]*len(df))
completion = numpy.array([-1]*len(df))
df["priority"] = priority
df["completion"] = completion
fields = list(set(df[df.objType=="sdss field"]["fieldID"]))
fieldPriority = numpy.random.choice([0,1,2,3,4,5], size=len(fields))
fieldCompletion = numpy.random.uniform(high=100, size=len(fields))
for field, priority, completion in zip(fields, fieldPriority, fieldCompletion):
# check if its scheduled
sched = list(df[df.fieldID==field]["scheduled"])
if True in sched:
# give all scheduled plates high priority
priority = 0
df["priority"].loc[df["fieldID"]==field] = priority
df["completion"].loc[df["fieldID"]==field] = completion
df.reset_index()
df.to_csv(newfilename)
# sns.scatterplot(x="fieldID", y="completion", data=df[df.objType=="sdss field"])
# plt.show()
# import pdb; pdb.set_trace()
# print(fields)
|
[
"csayres@uw.edu"
] |
csayres@uw.edu
|
b992279df4179e343cd86a13c730cb7d56b36b83
|
96909e3b2eb787afa739f3020a9292afae61b0b5
|
/web/__init__.py
|
f2ab81fbc93b8f2f236e99d276ed434f89b742c1
|
[] |
no_license
|
fengges/se
|
09bd6306f67d78fe0f51286ab41f629237fcf4d6
|
51e199a7fc5f7666063a556f41669a6a8b4fe37d
|
refs/heads/master
| 2020-03-27T04:29:32.207191
| 2018-08-24T05:47:40
| 2018-08-24T05:47:40
| 145,944,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,314
|
py
|
# author :feng
# time :2018/1/25
# function : 应用初始化
# 注册蓝图
import os
from main import Query
from flask import Flask,json,request
app = Flask(__name__)
subject = [{"code": '01', "k": 46}, {"code": '02', "k": 98}, {"code": '03', "k": 98},
{"code": '04', "k": 88}, {"code": '05', "k": 98}, {"code": '06', "k": 28},
{"code": '07', "k": 54}, {"code": '0701', "k": 64}, {"code": '0702', "k": 30},
{"code": '0703', "k": 52}, {"code": '0705', "k": 16}, {"code": '0706', "k": 12},
{"code": '0707', "k": 14}, {"code": '0709', "k": 98}, {"code": '0710', "k": 98},
{"code": '0712', "k": 10}, {"code": '08', "k": 50}, {"code": '0801', "k": 26},
{"code": '0802', "k": 98}, {"code": '0803', "k": 14}, {"code": '0804', "k": 12},
{"code": '0805', "k": 98}, {"code": '0806', "k": 12}, {"code": '0807', "k": 38},
{"code": '0808', "k": 98}, {"code": '0809', "k": 52}, {"code": '0810', "k": 98},
{"code": '0811', "k": 22}, {"code": '0812', "k": 72}, {"code": '0813', "k": 30},
{"code": '0814', "k": 68}, {"code": '0815', "k": 14}, {"code": '0816', "k": 14},
{"code": '0817', "k": 98}, {"code": '0818', "k": 14}, {"code": '0819', "k": 18},
{"code": '0820', "k": 18}, {"code": '0821', "k": 18}, {"code": '0823', "k": 24},
{"code": '0824', "k": 14}, {"code": '0825', "k": 26}, {"code": '0826', "k": 10},
{"code": '0827', "k": 12}, {"code": '0828', "k": 36}, {"code": '0829', "k": 14},
{"code": '0830', "k": 82}, {"code": '0831', "k": 16}, {"code": '0832', "k": 28},
{"code": '09', "k": 74}, {"code": '10', "k": 98}, {"code": '11', "k": 14},
{"code": '12', "k": 98}]
a = Query(subject)
@app.route('/search',methods=['GET','POST'])
def index6():
t = request.data
if len(t)==0:
t=request.values['data']
data = json.loads(t)
text=data['keyword']
if "filer" not in data:
filer={}
else:
filer = data['filer']
if "school" in filer and "all" in filer["school"]:
del filer["school"]
if "code" in filer and "all" in filer["code"]:
del filer["code"]
r=a.do_query(text,filer)
s=json.jsonify(r)
return s
|
[
"1059387928@qq.com"
] |
1059387928@qq.com
|
b055d8ae6cafcbe25b727929949414109497dfbf
|
fe91e0f7f74c3156a5c194713a69d9846b9e26a2
|
/flask_app/blueprints/api/blueprint.py
|
9493ed788ee23a4f569ba5bbd705e244e4682ac4
|
[
"BSD-3-Clause"
] |
permissive
|
getslash/backslash
|
cbf963006e3de565a1512f79c6c9ab84e705c67e
|
67554c039f8ac6a648deb191cc7fb69480f28253
|
refs/heads/develop
| 2023-01-10T22:26:11.666887
| 2022-06-17T05:06:00
| 2022-06-17T05:06:00
| 23,376,788
| 17
| 15
|
NOASSERTION
| 2022-12-27T16:17:59
| 2014-08-27T04:30:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
import functools
from flask import Blueprint
from flask_simple_api import SimpleAPI
from ... import activity
from ...utils.api_utils import (auto_render, requires_login,
requires_login_or_runtoken)
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = SimpleAPI(blueprint)
_api_info = {'endpoints': {}}
def API(func=None, require_real_login=False, generates_activity=True, require_login=True, version=1):
if func is None:
return functools.partial(API, require_real_login=require_real_login, generates_activity=generates_activity, require_login=require_login, version=version)
returned = auto_render(func)
endpoint_info = _api_info['endpoints'][func.__name__] = {}
endpoint_info['login_required'] = require_login
endpoint_info['version'] = version
if generates_activity:
returned = activity.updates_last_active(returned)
if require_login:
if require_real_login:
returned = requires_login(returned)
else:
returned = requires_login_or_runtoken(returned)
return api.include(returned)
@blueprint.route('/', methods=['OPTIONS'], strict_slashes=False)
def get_api_info():
from flask import jsonify
return jsonify(_api_info)
|
[
"vmalloc@gmail.com"
] |
vmalloc@gmail.com
|
17cbd59404e6774d0093023cd921bea9c0b812b8
|
3e5e8d6c1b39d459f4e489db083bd437f88bf213
|
/path/path_server.py
|
f998079d6e3812562f8b43147012f9300ab9e3bd
|
[] |
no_license
|
emonson/SamVis
|
37b4f92e482a5227520c4f6b95896ab35d0b71e5
|
98f1dc793bc6a0a38785cb279cd8d27a44807b8b
|
refs/heads/master
| 2020-06-04T03:04:53.257031
| 2014-10-30T17:34:39
| 2014-10-30T17:34:39
| 9,029,161
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,354
|
py
|
import cherrypy
import json
from path_obj import PathObj
import os
import glob
class ResourceIndex(object):
def __init__(self, server_url, data_names):
self.server_url = server_url
self.data_names = data_names
@cherrypy.expose
def index(self):
return self.to_html()
@cherrypy.expose
def datasets(self):
return json.dumps(self.data_names)
def to_html(self):
html_item = lambda (name): '<div><a href="' + self.server_url + '?data={name}">{name}</a></div>'.format(**vars())
items = map(html_item, self.data_names)
items = ''.join(items)
return '<html>{items}</html>'.format(**vars())
class PathServer:
# _cp_config = {'tools.gzip.on': True}
def __init__(self, path):
print 'STARTING UP', path
self.path = PathObj(path)
@cherrypy.expose
def index(self):
return self.path.path_data_dir
# ------------
# Paths
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcoords(self, district_id = None, depth = 1, previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
d = int(depth)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
return self.path.GetDistrictDeepPathLocalRotatedCoordInfo_JSON(dist_id, prev_id, d, R_old)
# ------------
# Ellipses
@cherrypy.expose
@cherrypy.tools.gzip()
def districtellipses(self, district_id = None, type = 'space', previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
if type == 'diffusion':
return self.path.GetDistrictDiffusionRotatedEllipses_JSON(dist_id, prev_id, R_old)
else:
return self.path.GetDistrictLocalRotatedEllipses_JSON(dist_id, prev_id, R_old)
# ------------
# Query
@cherrypy.expose
@cherrypy.tools.gzip()
def pathtimedistrict(self, time=None):
if time is not None:
t = int(time)
# Get district ID for path at a specified time
return self.path.GetDistrictFromPathTime_JSON(t)
@cherrypy.expose
@cherrypy.tools.gzip()
def netpoints(self):
# 2D coordinates of overview of district centers
return self.path.GetNetPoints_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def datainfo(self):
# {datatype:('image', 'gene',...), shape:[n_rows, n_cols], alldata_bounds:[min, max]}}
return self.path.GetDataInfo_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def transitiongraph(self):
# nodes (with ids and occupation times) and edges (with transition sums)
return self.path.GetTransitionGraph_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def timesfromdistrict(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# Average 1st passage times to other districts from this one
return self.path.GetTimesFromDistrict_JSON(dist_id)
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcenterdata(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# TODO: Make this more general. For now it's just an image for the district center
# TODO: Need to figure out a way to detect early on what type of data is associated
# with each district, and tailor the JS visualizations accordingly, and here
# just grab data without knowing what it is.
return self.path.GetDistrictCenterData_JSON(dist_id)
# ------------
# Utility
def parse_rold(self, rold):
# Parse comma-separated list of four floats encoded as a string
try:
a00, a01, a10, a11 = (float(r) for r in rold.split(','))
R_old = [[a00, a01], [a10, a11]]
except:
R_old = [[1.0, 0.0], [0.0, 1.0]]
return R_old
# ------------
class Root(object):
def __init__(self, names_list):
self.data_names = names_list
@cherrypy.expose
@cherrypy.tools.gzip()
def index(self):
return json.dumps(self.data_names)
# Storing server name and port in a json file for easy config
server_filename = 'server_conf.json'
server_opts = json.loads(open(server_filename).read())
# Go through data directory and add methods to root for each data set
data_dir = server_opts['path_data_dir']
vis_page = 'district_path.html'
data_paths = [xx for xx in glob.glob(os.path.join(data_dir,'*')) if os.path.isdir(xx)]
data_dirnames = [os.path.basename(xx) for xx in data_paths]
# Storing the dataset names in the root so they can easily be passed to the html pages
root = Root(data_dirnames)
# This adds the methods for each data directory
for ii,name in enumerate(data_dirnames):
print name, data_paths[ii]
setattr(root, name, PathServer(data_paths[ii]))
# add the resource index, which will list links to the data sets
base_url = 'http://' + server_opts['server_name'] + '/~' + server_opts['account'] + '/' + server_opts['path_web_path'] + '/' + vis_page
root.resource_index = ResourceIndex(server_url=base_url, data_names=data_dirnames)
# Start up server
cherrypy.config.update({
# 'tools.gzip.on' : True,
'server.socket_port': server_opts['path_port'],
# 'server.socket_host':'127.0.0.1'
'server.socket_host':server_opts['server_name']
})
cherrypy.quickstart(root)
|
[
"emonson@cs.duke.edu"
] |
emonson@cs.duke.edu
|
6751813df7cadcbc722015f087934164f1982cbe
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res_bw/scripts/common/lib/email/mime/nonmultipart.py
|
527fda5afdf5a3217564dd26d8a2f0384691bce1
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 730
|
py
|
# 2016.02.14 12:47:55 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/email/mime/nonmultipart.py
"""Base class for MIME type messages that are not multipart."""
__all__ = ['MIMENonMultipart']
from email import errors
from email.mime.base import MIMEBase
class MIMENonMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def attach(self, payload):
raise errors.MultipartConversionError('Cannot attach additional subparts to non-multipart/*')
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\email\mime\nonmultipart.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:47:55 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a91f1a29d2b88913cdb79f5181f207f5e3eadd65
|
05e634a232574f676434dfa8e4183f3d0a1a4bc9
|
/paddlecv/ppcv/ops/connector/base.py
|
9d315823ec24a76a0e34664c97122662ff637792
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/models
|
67ac00d93c5255ac64a9d80ae5be2e8927e47cee
|
8042c21b690ffc0162095e749a41b94dd38732da
|
refs/heads/release/2.4
| 2023-09-04T15:23:59.543625
| 2023-07-20T11:54:16
| 2023-07-20T11:54:16
| 88,868,842
| 7,633
| 3,597
|
Apache-2.0
| 2023-09-05T23:23:54
| 2017-04-20T13:30:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
from ppcv.ops.base import BaseOp
class ConnectorBaseOp(BaseOp):
def __init__(self, model_cfg, env_cfg=None):
super(ConnectorBaseOp, self).__init__(model_cfg, env_cfg)
self.name = model_cfg["name"]
keys = self.get_output_keys()
self.output_keys = [self.name + '.' + key for key in keys]
@classmethod
def type(self):
return 'CONNECTOR'
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
149cbde05cc6385c66a90062e7ac22763bf9aed1
|
a03a7935a191d63bee76fd3b85a61ee27f98904a
|
/test/tests/databases/pdbdatabase.py
|
819adafd698290f378a6eb7b80bb41c8c6c1bf27
|
[] |
no_license
|
cchriste/visit
|
57091c4a512ab87efd17c64c7494aa4cf01b7e53
|
c72c413f571e56b52fb7221955219f11f4ba19e3
|
refs/heads/master
| 2020-04-12T06:25:27.458132
| 2015-10-12T15:41:49
| 2015-10-12T15:41:49
| 10,111,791
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,311
|
py
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: pdbdatabase.py
#
# Tests: mesh - 2D,3D curvilinear, single domain
# plots - Pseudocolor, Subset, Vector
# operators - Clip
#
# Programmer: Brad Whitlock
# Date: Thu Sep 25 09:31:28 PDT 2003
#
# Modifications:
# Brad Whitlock, Wed Mar 31 09:11:08 PDT 2004
# I added code to clear the engine cache to reduce memory usage.
#
# Brad Whitlock, Fri Apr 9 16:54:15 PST 2004
# I added TestSection to divide up the tests a little.
#
# Brad Whitlock, Thu Sep 2 12:08:59 PDT 2004
# I replaced some deprecated calls with their new equivalents.
#
# Brad Whitlock, Tue Dec 7 17:52:33 PST 2004
# I added a test for mixvars in Flash files.
#
# Mark C. Miller, Sat Feb 3 00:42:05 PST 2007
# Added tests for array variables
# ----------------------------------------------------------------------------
##
## This creates a name for a test.
##
def CreateTestName(testName, testIndex):
name = "%s_%02d" % (testName, testIndex)
return name
def sv3():
v3 = View3DAttributes()
v3.viewNormal = (0.516282, 0.582114, 0.628169)
v3.focus = (0, 0, 0)
v3.viewUp = (-0.488576, 0.80261, -0.342213)
v3.viewAngle = 30
v3.parallelScale = 43.589
v3.nearPlane = -87.178
v3.farPlane = 87.178
v3.imagePan = (0, 0)
v3.imageZoom = 1.41577
v3.perspective = 1
SetView3D(v3)
##
## This function performs the test using the specified database.
##
def TestWithDatabase(db, testName):
TestSection("Testing with %s" % db)
# Open the test database
OpenDatabase(db)
##
## Do the 2D tests.
##
# Add the plots.
AddPlot("Subset", "material(mesh)")
DrawPlots()
# Do the first test in the series
Test(CreateTestName(testName, 0))
SetTimeSliderState(6)
Test(CreateTestName(testName, 1))
SetTimeSliderState(15)
Test(CreateTestName(testName, 2))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
Test(CreateTestName(testName, 3))
AddPlot("Mesh", "mesh")
DrawPlots()
v = View2DAttributes()
v.windowCoords = (-6.07862, -0.374491, 4.48986, 10.8545)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 4))
# Try turning off material 2
SetActivePlots((0,1))
TurnMaterialsOff("2")
Test(CreateTestName(testName, 5))
TurnMaterialsOn()
ResetView()
DeleteAllPlots()
AddPlot("Pseudocolor", "mesh/a")
DrawPlots()
Test(CreateTestName(testName, 6))
# Define a expression. I'm testing this because of the strange
# <mesh/var> syntax that my plugin has.
DefineVectorExpression("testexp1", "3.0 * {<mesh/lt>, <mesh/a>/399.0}")
AddPlot("Vector", "testexp1")
DrawPlots();
vec = VectorAttributes()
vec.nVectors = 1200
vec.colorByMag = 0
SetPlotOptions(vec)
v.windowCoords = (-9.51217, -0.289482, 0.983025, 10.6717)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 7))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
##
## Do the 3D tests.
##
AddPlot("Subset", "material2(revolved_mesh)")
AddOperator("Clip")
c = ClipAttributes()
c.funcType = c.Plane
c.plane1Status = 0
c.plane2Status = 1
c.plane3Status = 1
SetOperatorOptions(c)
DrawPlots()
# Set the view
sv3()
Test(CreateTestName(testName, 8))
SetTimeSliderState(6)
sv3()
Test(CreateTestName(testName, 9))
SetTimeSliderState(15)
sv3()
Test(CreateTestName(testName, 10))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
sv3()
Test(CreateTestName(testName, 11))
# Turn off some materials
TurnMaterialsOff(("1", "3", "4"))
sv3()
Test(CreateTestName(testName, 12))
TurnMaterialsOn()
# Set the time back to frame 2
SetTimeSliderState(2)
ResetView()
DeleteAllPlots()
#
# Test array variables
#
AddPlot("Pseudocolor","logical_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 13))
DeleteAllPlots()
ResetView()
AddPlot("Pseudocolor","revolved_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 14))
DeleteAllPlots()
ResetView()
AddPlot("Label","logical_mesh/marray")
DrawPlots()
Test(CreateTestName(testName, 15))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
CloseDatabase(db)
ClearCache("localhost")
#
# Test mixvars.
#
def TestMixvars(db):
TestSection("Testing mixvars in Flash files")
DeleteAllPlots()
OpenDatabase(db)
AddPlot("Pseudocolor", "mesh/mixvar")
DrawPlots()
ResetView()
v = View2DAttributes()
v.windowCoords = (-9.51866, 3.29394, 13.9258, 26.4126)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v.fullFrameActivationMode = v.Off
SetView2D(v)
Test("pdb_nomix")
# Do the same plot but with forced MIR so the mixvar gets reconstructed.
ClearWindow()
m = GetMaterialAttributes()
m.forceMIR = 1
SetMaterialAttributes(m)
DrawPlots()
Test("pdb_mix")
DeleteAllPlots()
#
# Run the test a few times with different versions of the database. We do this
# because we have the same database split up three different ways and all the
# ways a database can be split up must work.
#
# multi{00,01,02}.pdb - Contains multiple time states in each file but
# we group them all into "multi*.pdb database".
#
# family??.pdb - Contains a single time state in each file but
# we group them all into "family*.pdb database".
#
# allinone00.pdb - Contains all of the time states in one file.
#
databases = (data_path("pdb_test_data/multi*.pdb database"),
data_path("pdb_test_data/family*.pdb database"),
data_path("pdb_test_data/allinone00.pdb"))
testNames = ("pdb_multi", "pdb_family", "pdb_allinone")
for i in range(len(databases)):
TestWithDatabase(databases[i], testNames[i])
# Do the mixvar test.
TestMixvars(databases[2])
Exit()
|
[
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] |
bonnell@18c085ea-50e0-402c-830e-de6fd14e8384
|
2d1bf385aa0af57dac548b94154d0021b5bcbf2c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_069/ch31_2019_09_28_01_31_28_102445.py
|
f9f4884fb86277a703c6d470865f3c6e798b155a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
valor = float(input('Qual o valor da casa?' ))
salario = float(input('Qual o seu salário? '))
tempo = int(input('Em quantos anos deseja pagar? '))
prestacao = valor/tempo*12
if salario >= 0.3*prestacao:
print ('Empréstimo aprovado')
else:
print ('Empréstimo não aprovado')
|
[
"you@example.com"
] |
you@example.com
|
dde962a6155bab28965c2ed4dfa4a581508ce225
|
69d3680f881833a0a4906ad708eac11401bc03c6
|
/python3/2. 01背包问题.py
|
2741d3ecf6749f95de6819feb609bb510721b0ff
|
[] |
no_license
|
menghuu/YALeetcode
|
21df4b5ea6cb0a249263b0ce2df37e7580477ddd
|
1959a884bb1cc9f2f1acb1ba6f413498ea0d1aca
|
refs/heads/master
| 2023-08-18T03:55:41.470428
| 2021-09-11T12:39:02
| 2021-09-11T12:39:02
| 269,104,152
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 m <m@meng.hu>
#
# Distributed under terms of the MIT license.
"""
"""
import sys
N, V = map(int, sys.stdin.readline().strip().split())
# dps[j]
dps = [0 for _ in range(V + 1)]
# dps[i][j]
for _ in range(N):
v, w = map(int, sys.stdin.readline().strip().split())
for j in range(V, v - 1, -1):
dps[j] = max(dps[j], dps[j - v] + w)
print(dps[-1])
|
[
"m@meng.hu"
] |
m@meng.hu
|
c22c01818686115aaa4f416dc26874227498f59a
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/004621ef8e2f9da82e0ed2be016e874230d93a0d-<profiles>-fix.py
|
a3ec5d59222b96ebf703c7962993fe499e3d1581
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
@property
def profiles(self):
'Returns a list of profiles from the API\n\n The profiles are formatted so that they are usable in this module and\n are able to be compared by the Difference engine.\n\n Returns:\n list (:obj:`list` of :obj:`dict`): List of profiles.\n\n Each dictionary in the list contains the following three (3) keys.\n\n * name\n * context\n * fullPath\n\n Raises:\n F5ModuleError: If the specified context is a value other that\n ``all``, ``server-side``, or ``client-side``.\n '
if ('items' not in self._values['profiles']):
return None
result = []
for item in self._values['profiles']['items']:
context = item['context']
if (context == 'serverside'):
context = 'server-side'
elif (context == 'clientside'):
context = 'client-side'
name = item['name']
if (context in ['all', 'server-side', 'client-side']):
result.append(dict(name=name, context=context, full_path=item['fullPath']))
else:
raise F5ModuleError("Unknown profile context found: '{0}'".format(context))
return result
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
2a37b57347e8945b94ea8041f9511b3b88e12a17
|
72af8e47d5786571bce1789fc047965de4f9ac92
|
/api/__init__.py
|
ad16f4c6968b0f0d9cc72ec542f4a5cc4cc4663a
|
[] |
no_license
|
444thLiao/WES_pipelines
|
18d488e7c01ca618b8a6916979e2d8f64d1aa631
|
06365dc6d91b8c1861c053970e2823c322e5814d
|
refs/heads/master
| 2022-01-20T17:59:11.688758
| 2019-07-17T06:43:44
| 2019-07-17T06:43:44
| 93,579,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
import sys
from os.path import dirname
sys.path.insert(0,dirname(dirname(__file__)))
from luigi_pipelines.share_luigi_tasks import PrintReads, Annovar1, Annovar2
|
[
"l0404th@gmail.com"
] |
l0404th@gmail.com
|
bba750f4f5d2b831e16a33244d5dcbf9e58ec1ac
|
87dcb103e48da1fd17233232a7b4ad1d79ae50d5
|
/svtplay-dl
|
c90f98e505eedf0ecbe8124fd1bd4cc58c18b091
|
[
"MIT"
] |
permissive
|
gusseleet/svtplay-dl
|
9bd64ba5c83775a12496a3dcd42282e5171249ff
|
55d811286df237738802ac9754417a8fed21280f
|
refs/heads/master
| 2020-12-25T02:30:14.399785
| 2016-02-19T20:29:49
| 2016-02-19T20:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
#!/usr/bin/env python
print("This file is no longer updated.")
print("if you still want to use it. go to https://svtplay-dl.se/archive and download the latest one")
|
[
"j@i19.se"
] |
j@i19.se
|
|
5fef8ea7f91e094835ace56319fab0b154591baf
|
18ca2e0f98b98941ff9d9e098e0be89166c8b87c
|
/Abp/Cp17/c17_7_1_resizeAndAddLogo2.py
|
1e9e0f0d7ca76bb52331717c0a1cfcf67a729979
|
[] |
no_license
|
masa-k0101/Self-Study_python
|
f20526a9cd9914c9906059678554285bfda0c932
|
72b364ad4da8485a201ebdaaa430fd2e95681b0a
|
refs/heads/master
| 2023-03-07T07:38:27.559606
| 2021-02-22T16:24:47
| 2021-02-22T16:24:47
| 263,381,292
| 1
| 0
| null | 2020-06-09T17:32:06
| 2020-05-12T15:47:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
#! python3
# -*- coding: utf-8 -*-
# 演習プロジェクト 17.7.1用に改造
# resizeAndAddLogo2.py - カレントディレクトリのすべての画像を300x300に収まる
# ようにサイズ変更し、catlogo.pngを右下に追加する。
import os
from PIL import Image
SQUARE_FIT_SIZE = 300
LOGO_FILENAME = 'catlogo.png'
logo_im = Image.open(LOGO_FILENAME)
logo_width, logo_height = logo_im.size
os.makedirs('withLogo', exist_ok=True)
# カレントディレクトリの全画像をループする
for filename in os.listdir('.'):
# 拡張子の大文字と小文字を区別しない(小文字に変換してマッチする)
lfname = filename.lower()
# PNG, JPG, GIF, BMPファイル以外ならスキップする
if not (lfname.endswith('.png') or lfname.endswith('.jpg') \
or lfname.endswith('.gif') or lfname.endswith('.bmp')) \
or lfname == LOGO_FILENAME:
continue # 画像以外とロゴ画像はスキップする
im = Image.open(filename)
# 画像をサイズ変更する
im.thumbnail((SQUARE_FIT_SIZE, SQUARE_FIT_SIZE))
width, height = im.size
# ロゴの2倍サイズ未満なら、スキップする
if width < logo_width * 2 or height < logo_height * 2:
continue
# ロゴを追加する
print('ロゴを追加中 {}...'.format(filename))
im.paste(logo_im, (width-logo_width, height-logo_height), logo_im)
# 変更を保存する
im.save(os.path.join('withLogo', filename))
|
[
"noreply@github.com"
] |
masa-k0101.noreply@github.com
|
ef35d4d21c0c69a4d991e93868072dc6cf75a519
|
61d484ae68e40b89432f66f98164c811692ee612
|
/ThirdParty/protobuf-registry/python/protobufs/services/profile/actions/get_profile_stats_pb2.py
|
55e84806eed9e93479af3fa9b97e42596ef5d993
|
[
"MIT"
] |
permissive
|
getcircle/luno-ios
|
2a29192c130c48415e55b50850e77a1a37f22ad1
|
d18260abb537496d86cf607c170dd5e91c406f0f
|
refs/heads/master
| 2021-05-01T04:01:52.647661
| 2016-12-05T04:54:08
| 2016-12-05T04:54:08
| 27,101,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,563
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/profile/actions/get_profile_stats.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.profile import containers_pb2 as protobufs_dot_services_dot_profile_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/profile/actions/get_profile_stats.proto',
package='services.profile.actions.get_profile_stats',
syntax='proto3',
serialized_pb=b'\n:protobufs/services/profile/actions/get_profile_stats.proto\x12*services.profile.actions.get_profile_stats\x1a+protobufs/services/profile/containers.proto\"H\n\tRequestV1\x12\x13\n\x0b\x61\x64\x64ress_ids\x18\x01 \x03(\t\x12\x14\n\x0clocation_ids\x18\x02 \x03(\t\x12\x10\n\x08team_ids\x18\x03 \x03(\t\"@\n\nResponseV1\x12\x32\n\x05stats\x18\x01 \x03(\x0b\x32#.services.profile.containers.StatV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_profile_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.profile.actions.get_profile_stats.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.address_ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.location_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.team_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=223,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.profile.actions.get_profile_stats.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stats', full_name='services.profile.actions.get_profile_stats.ResponseV1.stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=289,
)
_RESPONSEV1.fields_by_name['stats'].message_type = protobufs_dot_services_dot_profile_dot_containers__pb2._STATV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
|
[
"mwhahn@gmail.com"
] |
mwhahn@gmail.com
|
13c9726cece639eb23085d411129eaa87a551621
|
87e60b0504be11c6997f1b20b72e9428cc128342
|
/ana/magic/histo.py
|
ea1f01da27e77f8533bcff0d15645274f3f75b83
|
[] |
no_license
|
brettviren/cowbells
|
70a85856fdfc54526c847f115d5dc01ec85ec215
|
1ceca86383f4f774d56c3f159658518242875bc6
|
refs/heads/master
| 2021-01-10T18:44:41.531525
| 2014-04-09T15:17:29
| 2014-04-09T15:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
#!/usr/bin/env python
'''
Histogram store
'''
from UserDict import DictMixin
import ROOT
class Histo(DictMixin):
'''
Provide a dictionary interface to a TDirectory (TFile) for
managing ROOT Histogram objects (any TNamed object, really).
The TDirectory must be associated with a TFile opened with the
"UPDATE" option if items are to be set on objects of this class.
Note, that this allows items to be set using a key name that may
differ from the histogram name. Getting an item by histogram name
will still work but will create a duplicate object in memory. If
you do not wish to save these do not do an explicit TFile::Write()
on the file holding the TDirectory given to Histo.
'''
def __init__(self, tdir = None):
'''
A dictionary-like collection of histograms (any TObjects,
really) tied to a file (TDirectory). <tdir> is some ROOT
TDirectory-like thing where the histograms are to be kept. It
needs to be writable in order to store histograms.
'''
self.tdir = tdir
self.bag = dict()
def __getitem__(self, name):
hist = self.bag.get(name)
if hist: return hist
if self.tdir:
hist = self.tdir.Get(name)
if not hist:
raise KeyError, 'No histogram "%s"' % name
self[name] = hist
return hist
def __setitem__(self, name, obj):
obj.SetDirectory(0)
if name != obj.GetName():
obj.SetName(name)
self.bag[name] = obj
return
def add(self, obj):
self[obj.GetName()] = obj
def keys(self):
kl = set()
if self.tdir:
kl = set([k.GetName() for k in self.tdir.GetListOfKeys()])
map(kl.add, self.bag.keys())
return list(kl)
def flush(self, tdir = None):
'''
Write all hists to directory
'''
tdir = tdir or self.tdir
if not tdir:
raise ValueError, 'No TDirectory to flush to'
for obj in self.bag.values():
tdir.WriteTObject(obj)
def test():
fd = ROOT.TFile.Open('test_histo.root','recreate')
h = Histo(fd)
h['h1key'] = ROOT.TH1F('h1name','hist1',10,-1,1)
assert h['h1key']
h['h1key'].FillRandom('gaus')
entries = h['h1key'].GetEntries()
assert entries
print 'Original entries:', entries
h.flush()
fd.Close()
del(h)
print 'Opening file read-only'
fd2 = ROOT.TFile.Open('test_histo.root','readonly')
h2 = Histo(fd2)
print 'keys',h2.keys()
assert 'h1key' in h2.keys()
print 'h1key',h2.get('h1key')
assert h2.get('h1key')
print 'h1name',h2.get('h1name')
assert not h2.get('h1name')
assert entries == h2['h1key'].GetEntries()
if __name__ == '__main__':
test()
|
[
"bv@bnl.gov"
] |
bv@bnl.gov
|
b6293e11242c694c26602b35f2ac13d2b23179dc
|
86da8478bd5b28045581445263fded606f592158
|
/tests/network/nano_node/data/http/empty_watching.py
|
c91229055452ec790f8826079b0f4474b6efc22f
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Matoking/siliqua
|
c2053214187ed6a2a1d418daf7e43108770c731c
|
b943822631ab18dde85e95d1731ebd7ffd7ef14a
|
refs/heads/master
| 2020-08-28T02:59:53.841369
| 2019-11-18T17:00:26
| 2019-11-18T17:00:26
| 217,568,445
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
from tests.network.nano_node.conftest import HTTPReplay
DATA = [
HTTPReplay(
{
"action": "account_history",
"account": "xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout",
"count": 500,
"raw": True,
"reverse": True
},
{
"error": "Account not found"
}
)
]
|
[
"jannepulk@gmail.com"
] |
jannepulk@gmail.com
|
f4e7f0e88b95e72ed71b719cc5ec004ce4f3a78e
|
c84ba95b559d0d1fd142c88dffec3da45cb8e711
|
/backend/users/migrations/0003_auto_20210115_1652.py
|
04ad1b16a0cd2f76a84ca29e9d06e1ab48a24855
|
[] |
no_license
|
crowdbotics-apps/insta-23855
|
4460bc7f00d52a86f9c30f90249e451957d4b145
|
c3abded4dc1a1dcaf201da48fe12d348468c7a02
|
refs/heads/master
| 2023-02-11T13:48:17.207924
| 2021-01-15T16:54:09
| 2021-01-15T16:54:09
| 329,785,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
# Generated by Django 2.2.17 on 2021-01-15 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210115_0235'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
4eb5f6a1973d51f56c5840b06100a56e3a8e22e8
|
957430fc737d07df115f80dae22ce5cd11096689
|
/restaurants/table/migrations/0001_initial.py
|
36d1e79ffd34d9c15c9e0a9377af92f001469bf6
|
[] |
no_license
|
Hamza-abughazaleh/Restaurant
|
c6ac28c029d1d2c8eadcf0a61575c54d39273623
|
ecffb9a7bf11b115aa0d33617f61e72697f327cc
|
refs/heads/main
| 2023-06-19T09:09:03.268647
| 2021-07-16T19:45:33
| 2021-07-16T19:45:33
| 386,622,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# Generated by Django 3.2.5 on 2021-07-14 19:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import table.validation
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_number', models.IntegerField(error_messages={'unique': 'A Table number already exists.'}, unique=True, verbose_name='Employee number')),
('seats_number', models.IntegerField(validators=[table.validation.validate_table_seats], verbose_name='Employee number')),
('userid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"hamzaabughazaleh23@gmail.com"
] |
hamzaabughazaleh23@gmail.com
|
34f85ad410331a5914a2517ee3343c14572b7b59
|
7a2bfe09f7526c36fce304999fa47466b89fdec2
|
/profiles/models.py
|
7cbf380d6bb77aeabe96546b9fe12b082a1ed6fc
|
[] |
no_license
|
Brachamul/fichier-jdem
|
179344ba64b830c3f6e352907e470a1db8d42a9b
|
f9b40657aea54db83b3abd3e7b38fec9260d34e9
|
refs/heads/master
| 2021-05-01T00:37:50.021517
| 2019-02-07T15:02:06
| 2019-02-07T15:02:06
| 58,691,054
| 0
| 0
| null | 2017-07-04T21:13:01
| 2016-05-13T02:01:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,247
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from django.dispatch import receiver
from fichiers_adherents.models import FichierAdherents, Adherent, Cnil, adherents_actuels
class Member(models.Model):
id = models.IntegerField(primary_key=True)
phoneless = models.BooleanField(default=False)
def historique_adherent(self):
return Adherent.objects.filter(num_adherent=self.id)
def derniere_occurence_fichier(self):
adherents = Adherent.objects.filter(num_adherent=self.id)
fichier = FichierAdherents.objects.filter(adherent__in=adherents)
return Adherent.objects.get(num_adherent=self.id, fichier=fichier.latest())
def notes(self):
return Note.objects.filter(member=self)
def __str__(self):
return str(self.derniere_occurence_fichier())
def initiate(fichier=False):
''' Generate, for all fichiers or a single one, members for each adherent
this is used when rebuilding the DB '''
if fichier :
adherents = Adherent.objects.filter(fichier=fichier)
else :
adherents = Adherent.objects.all()
for adherent in adherents :
new_member, created = Member.objects.get_or_create(id=adherent.num_adherent)
def check_if_phoneless(self):
''' Returns 'True' if the adherent has no phone number '''
self.phoneless = self.derniere_occurence_fichier().phoneless()
self.save()
@receiver(post_save, sender=Adherent)
def initiate_member(sender, instance, created, **kwargs):
new_member, created = Member.objects.get_or_create(id=instance.num_adherent)
new_member.check_if_phoneless()
class Note(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.CharField(max_length=1024)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.text
# https://codepen.io/codyhouse/pen/FdkEf
class WrongNumber(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
reported_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.member
|
[
"barnaby.brachamul@gmail.com"
] |
barnaby.brachamul@gmail.com
|
da60dde1e796db0872b0c257e878c1ebb4826cda
|
ffff723a6c8527b45299a7e6aec3044c9b00e923
|
/PS/BOJ/1238/1238.py
|
599cad18df66ed2c7caf926d2eb19296b2ffb8d7
|
[] |
no_license
|
JSYoo5B/TIL
|
8e3395a106656e090eeb0260fa0b0dba985d3beb
|
3f9ce4c65451512cfa2279625e44a844d476b68f
|
refs/heads/master
| 2022-03-14T09:15:59.828223
| 2022-02-26T01:30:41
| 2022-02-26T01:30:41
| 231,383,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
#!/usr/bin/env python3
import heapq
INF = 10 ** 9
input = __import__('sys').stdin.readline
heappush = heapq.heappush
heappop = heapq.heappop
def get_dist_to_others(edges, src):
nodes_cnt = len(edges)
dists = [INF for _ in range(nodes_cnt)]
heap = [ [0, src] ]
while len(heap) > 0:
[dist, node] = heappop(heap)
dists[node] = min(dists[node], dist)
for n, d in edges[node]:
if dist + d < dists[n]:
dists[n] = dist + d
heappush(heap, [dists[n], n])
return dists
if __name__ == '__main__':
nodes_cnt, edges_cnt, tgt_id = map(int, input().split())
tgt_id -= 1 # convert into zero offset
edges = [ [ ] for _ in range(nodes_cnt) ]
for _ in range(edges_cnt):
src, dst, dist = map(int, input().split())
edges[src-1].append([dst-1, dist])
single_dists = []
for n in range(nodes_cnt):
dist = get_dist_to_others(edges, n)
single_dists.append(dist)
return_dists = []
for n in range(nodes_cnt):
dist = single_dists[n][tgt_id] + single_dists[tgt_id][n]
return_dists.append(dist)
answer = max(return_dists)
print(answer)
|
[
"jsyoo5b@gmail.com"
] |
jsyoo5b@gmail.com
|
f6345fee883766347e8a49dfa0c93038f32995b2
|
48a7b266737b62da330170ca4fe4ac4bf1d8b663
|
/molsysmt/_private/digestion/argument/chi3.py
|
ef60e618382a4208ce40cd84eadebbd653dad6de
|
[
"MIT"
] |
permissive
|
uibcdf/MolSysMT
|
ddab5a89b8ec2377f383884c5169d147cab01322
|
c3d713ba63db24eb8a2426115cf8d9cb3665d225
|
refs/heads/main
| 2023-08-08T15:04:16.217967
| 2023-08-04T05:49:56
| 2023-08-04T05:49:56
| 137,937,243
| 15
| 3
|
MIT
| 2023-06-04T20:27:06
| 2018-06-19T19:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
from ...exceptions import ArgumentError
methods_bool_input = ["molsysmt.topology.get_dihedral_quartets.get_dihedral_quartets",
"molsysmt.structure.get_dihedral_angles.get_dihedral_angles"]
def digest_chi3(chi3, caller=None):
if caller in methods_bool_input:
if isinstance(chi3, bool):
return chi3
raise ArgumentError('chi3', value=chi3, caller=caller, message=None)
|
[
"prada.gracia@gmail.com"
] |
prada.gracia@gmail.com
|
6764d6567a70fd6c2f2886bcd6dfc1234234f72f
|
edf31957838a65e989d5eb5e8118254ac2413fc8
|
/parakeet/analysis/collect_vars.py
|
66543535c72ccdc08e79053098b7cefbdccc4db0
|
[
"BSD-3-Clause"
] |
permissive
|
iskandr/parakeet
|
e35814f9030b9e8508a7049b62f94eee5b8c5296
|
d9089f999cc4a417d121970b2a447d5e524a3d3b
|
refs/heads/master
| 2021-07-18T19:03:05.666898
| 2019-03-13T17:20:20
| 2019-03-13T17:20:20
| 5,889,813
| 69
| 7
|
NOASSERTION
| 2021-07-17T21:43:03
| 2012-09-20T16:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
from .. syntax import Var, Tuple
from syntax_visitor import SyntaxVisitor
class SetCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = set([])
def visit_Var(self, expr):
self.var_names.add(expr.name)
def collect_var_names(expr):
collector = SetCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_var_names_from_exprs(exprs):
collector = SetCollector()
collector.visit_expr_list(exprs)
return collector.var_names
class ListCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = []
def visit_Var(self, expr):
self.var_names.append(expr.name)
def collect_var_names_list(expr):
collector = ListCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_binding_names(lhs):
lhs_class = lhs.__class__
if lhs_class is Var:
return [lhs.name]
elif lhs.__class__ is Tuple:
combined = []
for elt in lhs.elts:
combined.extend(collect_binding_names(elt))
return combined
else:
return []
class CollectBindings(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.bindings = {}
def bind(self, lhs, rhs):
if lhs.__class__ is Var:
self.bindings[lhs.name] = rhs
elif lhs.__class__ is Tuple:
for elt in lhs.elts:
self.bind(elt, rhs)
def visit_Assign(self, stmt):
self.bind(stmt.lhs, stmt.rhs)
def collect_bindings(fn):
return CollectBindings().visit_fn(fn)
|
[
"alex.rubinsteyn@gmail.com"
] |
alex.rubinsteyn@gmail.com
|
546664dc944f734fde1b16887bc05cfe6763ff9b
|
65662b604fa40bdc6e8648e39ed201b0dd8ad6fd
|
/Python Specialization/Course 4/code/party4.py
|
257a2d0f8d47dc1b565fc7854b62718b830ad3d4
|
[
"MIT"
] |
permissive
|
rubysubash/Coursera-Specializations
|
973f9dbc01774dae84d90b6b97870a6dfde674bc
|
88acc792bbee20e8d9b8d34ff6f7c3072236d6f3
|
refs/heads/master
| 2020-08-10T02:43:08.277860
| 2020-06-02T09:48:25
| 2020-06-02T09:48:25
| 214,237,214
| 0
| 0
|
MIT
| 2019-10-10T16:52:27
| 2019-10-10T16:52:27
| null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
class PartyAnimal:
x = 0
name = ""
def __init__(self, nam):
self.name = nam
print self.name,"constructed"
def party(self) :
self.x = self.x + 1
print self.name,"party count",self.x
s = PartyAnimal("Sally")
s.party()
j = PartyAnimal("Jim")
j.party()
s.party()
|
[
"amandalmia18@gmail.com"
] |
amandalmia18@gmail.com
|
f496808570d534acea82cfe877a130b206da08d4
|
a973f336765a31550cc9661be57e0384c317fc38
|
/ejemplo3/proyectoUno/administrativo/urls.py
|
8ef4aee1be71fedb011dd6c3682a4c4b57228cee
|
[] |
no_license
|
PlataformasWeb-P-AA2021/clase03-2bim-ricardoifc
|
0a40d61f351525ab87cb2ce1f0982804cb50df37
|
35c42f8e5c3420bfa66103dcb45a75c5b27d5a5a
|
refs/heads/main
| 2023-06-19T17:46:12.663825
| 2021-07-16T17:47:59
| 2021-07-16T17:47:59
| 377,869,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
"""
Manejo de urls para la aplicación
administrativo
"""
from django.urls import path
# se importa las vistas de la aplicación
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('estudiante/<int:id>', views.obtener_estudiante,
name='obtener_estudiante'),
]
|
[
"66690702+github-classroom[bot]@users.noreply.github.com"
] |
66690702+github-classroom[bot]@users.noreply.github.com
|
5dab7e3bfdea2a2c594b3dad9518850e875f603f
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/B/buttub/basic_twitter_scraper_179.py
|
43a79636f9de0b40da964a9dc909525b46726714
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,294
|
py
|
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
11eaad49e2f332332ac43910e59112ef2d27a95d
|
c0340c511cff5b40b4681c4d3238d807624c0323
|
/models/revision/branching_entropy/branching_direction_entropy.py
|
88c5d3c8f2bdf70871641d209a2d1963a11af595
|
[] |
no_license
|
m-hahn/grammar-optim
|
5fa7ade47d2ad91f517c887ee2c65af24059069d
|
07a1a80692a504bcafc8120a21c4dc9066b495ee
|
refs/heads/master
| 2022-08-30T06:54:42.749264
| 2022-08-05T12:09:28
| 2022-08-05T12:09:28
| 156,456,167
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,065
|
py
|
#/u/nlp/bin/stake.py -g 11.5g -s run-stats-pretrain2.json "python readDataDistEnglishGPUFree.py"
import random
import sys
from math import log, exp
from random import random, shuffle
from corpusIterator_FuncHead import CorpusIteratorFuncHead
languages = ["Hindi", "Swedish", "German", "Urdu", "English", "Spanish", "Chinese", "Slovenian", "Estonian", "Norwegian", "Serbian", "Croatian", "Finnish", "Portuguese", "Catalan", "Russian", "Arabic", "Czech", "Japanese", "French", "Latvian", "Basque", "Danish", "Dutch", "Ukrainian", "Gothic", "Hebrew", "Hungarian", "Latin", "Persian", "Bulgarian", "Romanian", "Indonesian", "Greek", "Turkish", "Slovak", "Belarusian", "Galician", "Italian", "Lithuanian", "Polish", "Vietnamese", "Korean", "Tamil", "Irish", "Marathi", "Afrikaans", "Telugu", "Coptic", "Ancient_Greek", "Old_Church_Slavonic"]
with open("branching_entropy.tsv", "w") as outFile:
print >> outFile, "Language\tBranchingEntropy"
for language in languages:
posUni = set() #[ "ADJ", "ADP", "ADV", "AUX", "CONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
posFine = set() #[ "``", ",", ":", ".", "''", "$", "ADD", "AFX", "CC", "CD", "DT", "EX", "FW", "GW", "HYPH", "IN", "JJ", "JJR", "JJS", "-LRB-", "LS", "MD", "NFP", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "-RRB-", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB", "XX" ]
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
#deps = ["acl", " advcl", " advmod", " amod", " appos", " aux", " case cc", " ccompclf", " compound", " conj", " cop", " csubjdep", " det", " discourse", " dislocated", " expl", " fixed", " flat", " goeswith", " iobj", " list", " mark", " nmod", " nsubj", " nummod", " obj", " obl", " orphan", " parataxis", " punct", " reparandum", " root", " vocative", " xcomp"]
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
originalDistanceWeights = {}
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
totalCount = 0
for partition in ["train", "dev"]:
for sentence in CorpusIterator(language,partition, storeMorph=True).iterator():
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
depsVocab.add(line["dep"])
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (posHead, dep, posHere, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
totalCount += 1
#print orderTable
entropyTotal = 0
dhLogits = {}
for key in keys:
hd = orderTable.get((key[0], key[1], key[2], "HD"), 0) + 0.00000001
dh = orderTable.get((key[0], key[1], key[2], "DH"), 0) + 0.00000001
p_hd = hd/(hd+dh)
entropyHere = p_hd * log(p_hd) + (1-p_hd) * log(1-p_hd)
entropyTotal -= (hd+dh)/totalCount * entropyHere
print >> outFile, ("\t".join(map(str,[language, entropyTotal])))
|
[
"mhahn29@gmail.com"
] |
mhahn29@gmail.com
|
a8c5f8fe733b1263b9e715e46f656c1827f702d7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2843/60723/275315.py
|
68bd3c530b53af09ba7366a8b979d4690d44f3fa
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
num=int(input())
a=input().split()
for i in range(num):
a[i]=int(a[i])
b=[]
for i in range(num-1):
b.append(str(a[i]+a[i+1]))
b.append(str(a[num-1]))
print(' '.join(b))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5cff6d1e75311f6a39ff6edc9ee7a41307b16b8f
|
cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69
|
/dashboard/dashboard/api/sheriffs.py
|
6a66dcc1d8196a45035d18f819709bca37d2f30c
|
[
"BSD-3-Clause"
] |
permissive
|
CTJyeh/catapult
|
bd710fb413b9058a7eae6073fe97a502546bbefe
|
c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb
|
refs/heads/master
| 2020-08-19T21:57:40.981513
| 2019-10-17T09:51:09
| 2019-10-17T18:30:16
| 215,957,813
| 1
| 0
|
BSD-3-Clause
| 2019-10-18T06:41:19
| 2019-10-18T06:41:17
| null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.api import api_request_handler
from dashboard.models import sheriff
class SheriffsHandler(api_request_handler.ApiRequestHandler):
def _CheckUser(self):
pass
def Post(self):
sheriff_keys = sheriff.Sheriff.query().fetch(keys_only=True)
return [key.string_id() for key in sheriff_keys]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
12c1e9f39cad94697ac642a2b342136937d4f0fe
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py
|
9751c236c69e44dbb07d504e8b417ae5707659af
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,737
|
py
|
# 2017.08.29 21:45:24 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py
from gui.prb_control.entities.base.actions_validator import BaseActionsValidator, ActionsValidatorComposite, CurrentVehicleActionsValidator
from gui.prb_control.items import ValidationResult
class InQueueValidator(BaseActionsValidator):
"""
Is player in queue validator.
"""
def _validate(self):
if self._entity.isInQueue():
return ValidationResult(False)
return super(InQueueValidator, self)._validate()
class PreQueueActionsValidator(ActionsValidatorComposite):
"""
Pre queue actions validator base class. It has several parts:
- state validation
- vehicle validation
"""
def __init__(self, entity):
self._stateValidator = self._createStateValidator(entity)
self._vehiclesValidator = self._createVehiclesValidator(entity)
validators = [self._stateValidator, self._vehiclesValidator]
super(PreQueueActionsValidator, self).__init__(entity, validators)
def _createStateValidator(self, entity):
"""
Part of template method to build state validation part
"""
return InQueueValidator(entity)
def _createVehiclesValidator(self, entity):
"""
Part of template method to build vehicles validation part
"""
return CurrentVehicleActionsValidator(entity)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\base\pre_queue\actions_validator.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:45:25 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
d5d199e83ae7039dce538234c4fd52c1271f01f4
|
4364fb1fec2ebda2cd240ddc19ef89243812c122
|
/tensorflow_datasets/image/diabetic_retinopathy_detection_test.py
|
c6729f05bccf57a228449fa8db506e268ffc95fc
|
[
"Apache-2.0"
] |
permissive
|
undeadinu/datasets
|
67ebbe6c20462ed6f58713ccd8dc1d67db89f4d9
|
a6f1bce86404d534b7343fb90f0ebfd6d098c346
|
refs/heads/master
| 2020-04-16T03:31:37.564934
| 2019-01-11T10:12:42
| 2019-01-11T10:13:12
| 165,234,637
| 0
| 0
|
Apache-2.0
| 2019-01-11T11:44:44
| 2019-01-11T11:41:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for diabetic_retinopathy_detection dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets.image import diabetic_retinopathy_detection
from tensorflow_datasets.testing import dataset_builder_testing
class DiabeticRetinopathyDetectionTest(dataset_builder_testing.TestCase):
DATASET_CLASS = diabetic_retinopathy_detection.DiabeticRetinopathyDetection
SPLITS = { # Expected number of examples on each split.
"sample": 4,
"train": 12,
"test": 12,
}
OVERLAPPING_SPLITS = ["sample"] # contains examples from other examples
if __name__ == "__main__":
dataset_builder_testing.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
cde3346e90bf0b24b91ea9df9de7d3821dc8a338
|
d850f5f7cc09a8379c04d38f5c26c2e6b73f3484
|
/kimai_python/models/project_rate.py
|
76b7342f2bc64b749686f63d73e8f0362a61bf71
|
[
"MIT"
] |
permissive
|
MPW1412/kimai-python
|
8d78e3df3036ab11573e800dce96011552aa6946
|
7c89b0866b85fbc4b1092b30eca21f1be48db533
|
refs/heads/master
| 2022-10-12T17:24:50.522103
| 2020-04-24T06:21:57
| 2020-04-24T06:21:57
| 264,545,139
| 0
| 0
|
MIT
| 2020-05-16T23:14:13
| 2020-05-16T23:14:12
| null |
UTF-8
|
Python
| false
| false
| 5,828
|
py
|
# coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProjectRate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'rate': 'float',
'internal_rate': 'float',
'is_fixed': 'bool',
'user': 'User'
}
attribute_map = {
'id': 'id',
'rate': 'rate',
'internal_rate': 'internalRate',
'is_fixed': 'isFixed',
'user': 'user'
}
def __init__(self, id=None, rate=None, internal_rate=None, is_fixed=None, user=None): # noqa: E501
"""ProjectRate - a model defined in Swagger""" # noqa: E501
self._id = None
self._rate = None
self._internal_rate = None
self._is_fixed = None
self._user = None
self.discriminator = None
if id is not None:
self.id = id
if rate is not None:
self.rate = rate
if internal_rate is not None:
self.internal_rate = internal_rate
self.is_fixed = is_fixed
if user is not None:
self.user = user
@property
def id(self):
"""Gets the id of this ProjectRate. # noqa: E501
:return: The id of this ProjectRate. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectRate.
:param id: The id of this ProjectRate. # noqa: E501
:type: int
"""
self._id = id
@property
def rate(self):
"""Gets the rate of this ProjectRate. # noqa: E501
:return: The rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._rate
@rate.setter
def rate(self, rate):
"""Sets the rate of this ProjectRate.
:param rate: The rate of this ProjectRate. # noqa: E501
:type: float
"""
self._rate = rate
@property
def internal_rate(self):
"""Gets the internal_rate of this ProjectRate. # noqa: E501
:return: The internal_rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._internal_rate
@internal_rate.setter
def internal_rate(self, internal_rate):
"""Sets the internal_rate of this ProjectRate.
:param internal_rate: The internal_rate of this ProjectRate. # noqa: E501
:type: float
"""
self._internal_rate = internal_rate
@property
def is_fixed(self):
"""Gets the is_fixed of this ProjectRate. # noqa: E501
:return: The is_fixed of this ProjectRate. # noqa: E501
:rtype: bool
"""
return self._is_fixed
@is_fixed.setter
def is_fixed(self, is_fixed):
"""Sets the is_fixed of this ProjectRate.
:param is_fixed: The is_fixed of this ProjectRate. # noqa: E501
:type: bool
"""
if is_fixed is None:
raise ValueError("Invalid value for `is_fixed`, must not be `None`") # noqa: E501
self._is_fixed = is_fixed
@property
def user(self):
"""Gets the user of this ProjectRate. # noqa: E501
:return: The user of this ProjectRate. # noqa: E501
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ProjectRate.
:param user: The user of this ProjectRate. # noqa: E501
:type: User
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProjectRate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"kajetan.bancerz@gmail.com"
] |
kajetan.bancerz@gmail.com
|
c618026c9962936fdc9c07d9881c1e5b4d611e77
|
99351753f51b2a585f3a0bb1dc11b8c6eebd76df
|
/setup.py
|
f547ea175656df3ebba7efc860cec92119a0174e
|
[] |
no_license
|
FND/tiddlywebplugins.imaker
|
6ef680e76145f9f954a66ba2d1cabd15cc0b4637
|
bcaeca5a4f2b44d9e48414f48cfa5cae468f6c4c
|
refs/heads/master
| 2021-01-15T18:30:52.466042
| 2013-07-13T10:51:54
| 2013-07-13T10:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = 'cdent@peermore.com'
NAME = 'tiddlywebplugins.imaker'
DESCRIPTION = 'Make TiddlyWeb instances'
VERSION = '0.1.3'
import os
from setuptools import setup, find_packages
# You should carefully review the below (install_requires especially).
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test', 'testpackage']),
install_requires = ['tiddlyweb',
'tiddlywebplugins.utils',
'tiddlywebplugins.pkgstore',
],
zip_safe = False
)
|
[
"chris.dent@gmail.com"
] |
chris.dent@gmail.com
|
9d5aafb14738f910a84f9fa615fc45a6cd8f3cc2
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/es6qJTs5zYf8nEBkG_10.py
|
6084216bb61e092527df14f88ae710259e20fc5d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
import re
def is_rectangle(lst):
if len(lst) != 4:
return False
num = []
items = []
for x in range(0, len(lst)):
result = [int(d) for d in re.findall(r'-?\d+', lst[x])]
num.extend(result)
for x in range(0, len(num)):
if num[x] not in items:
items.append(num[x])
if len(items) != 4:
return False
return True
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
8745eab3a8a025abd42708022865113cd6d9859f
|
fd326562890d4f1987c384fc7c60374938231222
|
/OOP/DefinningClasses/Spoopify/project/album.py
|
55660f2a89fde3334a12f48e6c5ecfbc8cdc378d
|
[] |
no_license
|
miro-lp/SoftUni
|
cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4
|
283d9328537919de49f7f6a301e58593bae9ca2a
|
refs/heads/main
| 2023-08-23T21:22:07.856226
| 2021-08-25T15:10:18
| 2021-08-25T15:10:18
| 318,134,101
| 2
| 1
| null | 2021-08-10T12:51:54
| 2020-12-03T09:03:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
from .song import Song
class Album:
def __init__(self, name, *songs):
self.name = name
self.songs = list(songs)
self.published = False
def add_song(self, song: Song):
if self.published:
return "Cannot add songs. Album is published."
else:
if song.single:
return f"Cannot add {song.name}. It's a single"
else:
if song.name not in [i.name for i in self.songs]:
self.songs.append(song)
return f"Song {song.name} has been added to the album {self.name}."
else:
return "Song is already in the album."
def remove_song(self, song: str):
if self.published:
return "Cannot remove songs. Album is published."
else:
if song in [i.name for i in self.songs]:
for s in self.songs:
if s.name == song:
self.songs.remove(s)
break
return f"Removed song {song} from album {self.name}."
else:
return "Song is not in the album."
def publish(self):
if self.published:
return f"Album {self.name} is already published."
else:
self.published = True
return f"Album {self.name} has been published."
def details(self):
name_info = f"Album {self.name}"
album_info = "\n".join([f"== {s.get_info()}" for s in self.songs])
if len(self.songs) > 0:
return name_info + "\n" + album_info + "\n"
else:
return name_info + "\n"
|
[
"miro_lp@abv.bg"
] |
miro_lp@abv.bg
|
930c2c52d19f93eb89a1d6d1cd65fddba65c9851
|
df126574e5fae32aa6ba8ae927942208107897b5
|
/pyconll/load.py
|
2cfdd19dd6edcbb04a5310f3f37bfb3799be6585
|
[
"MIT"
] |
permissive
|
ZmeiGorynych/pyconll
|
865781a9ac2b5c0b9fe2a26d7d14fce60d4454a7
|
6784295db5fde769754e2b1ac46d6100484e45cc
|
refs/heads/master
| 2020-04-14T11:38:14.167823
| 2018-12-28T22:12:38
| 2018-12-28T22:12:38
| 163,819,354
| 0
| 0
| null | 2019-01-02T09:15:40
| 2019-01-02T09:15:40
| null |
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
"""
A wrapper around the Conll class that allow for easy loading of treebanks from
multiple formats. This module also contains logic for iterating over treebank
data without storing Conll objects in memory.
"""
import requests
from pyconll._parser import iter_sentences
from pyconll.unit import Conll
def load_from_string(source):
"""
Load CoNLL-U source in a string into a Conll object.
Args:
source: The CoNLL-U formatted string.
Returns:
A Conll object equivalent to the provided source.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
c = Conll(lines)
return c
def load_from_file(filename):
"""
Load a CoNLL-U file given the filename where it resides.
Args:
filename: The location of the file.
Returns:
A Conll object equivalent to the provided file.
Raises:
IOError: If there is an error opening the given filename.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
c = Conll(f)
return c
def load_from_url(url):
"""
Load a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Returns:
A Conll object equivalent to the provided file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved and status was 4xx or 5xx.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
resp.encoding = 'utf-8'
lines = resp.text.splitlines()
c = Conll(lines)
return c
def iter_from_string(source):
"""
Iterate over a CoNLL-U string's sentences.
Use this method if you only need to iterate over the CoNLL-U file once and
do not need to create or store the Conll object.
Args:
source: The CoNLL-U string.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
for sentence in iter_sentences(lines):
yield sentence
def iter_from_file(filename):
"""
Iterate over a CoNLL-U file's sentences.
Args:
filename: The name of the file whose sentences should be iterated over.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
IOError if there is an error opening the file.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
for sentence in iter_sentences(f):
yield sentence
def iter_from_url(url):
"""
Iterate over a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
lines = resp.text.splitlines()
for sentence in iter_sentences(lines):
yield sentence
|
[
"matgrioni@gmail.com"
] |
matgrioni@gmail.com
|
42e066146f1fa97f71238d54a52fa96707339fed
|
0274f2c465f110598456624581f569331221068b
|
/impl/set_mode.py
|
4e67d0d1e69ae5d21f0e2a6144f1fe0e173dbafa
|
[] |
no_license
|
bluecube/thesis
|
63e745076c86a3122e9c3d7ff42ff22e32921860
|
588db206e64de9b681372fea9a70d3fa2aa598df
|
refs/heads/master
| 2016-09-06T00:01:03.840006
| 2013-05-27T09:36:51
| 2013-05-27T09:36:51
| 1,376,241
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
#!/usr/bin/python
"""
set_mode.py
Set the GPS to SiRF or NMEA mode.
"""
from __future__ import division, print_function, unicode_literals
import gps
import logging
import sys
import argparse
from gps.sirf_messages import *
def setup_logging():
logging.basicConfig(
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level = logging.INFO
)
setup_logging()
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
arg_parser = argparse.ArgumentParser(description="Set the GPS to SiRF or NMEA mode.")
arg_parser.add_argument('gps',
help="Port with a GPS receiver.")
arg_parser.add_argument('--protocol',
help="To which mode to switch the receiver, protocol is either 'NMEA' or 'SIRF'",
default="SIRF")
arguments = arg_parser.parse_args()
x = gps.gps.Gps(arguments.gps)
x.set_protocol(arguments.protocol)
|
[
"blue.cube@seznam.cz"
] |
blue.cube@seznam.cz
|
230b8d139a3fe1b4a2b0befd673aebccdac45332
|
c5959b7e4fc5b752b54a6352449c1bb0d28d9115
|
/bab/bab-12/mysql_fetchmany.py
|
1efde507ffa28d2168fa13356bdb0bff188622af
|
[] |
no_license
|
romanbatavi/kickstarter-python
|
f5592a371740b28c045ef99dd510d1c6a92ff8d1
|
ed3eb692e09a3f44fd3e0b16ab7b042ee2658db6
|
refs/heads/master
| 2023-03-29T11:34:23.774873
| 2021-04-04T09:11:28
| 2021-04-04T09:11:28
| 354,500,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
######################################################
# Nama file: mysql_fetchmany.py
######################################################
import mysql.connector
import sys
def main():
try:
conn = mysql.connector.connect(
user="raharjo",
password="123456789",
host="127.0.0.1",
database="PythonDB"
)
sql = """
SELECT KODE, JUDUL, PENULIS
FROM BUKU
"""
try:
cur = conn.cursor(buffered=True)
cur.execute(sql)
# menangkap satu baris data dalam cursor
rows = cur.fetchmany(2)
print("Menggunakan fetchmany(2):")
# menampilkan data yang telah ditangkap
for (kode,judul,penulis) in rows:
print(kode, '\t',
judul, '\t',
penulis)
except:
print("Pengambilan data gagal")
sys.exit(1)
else:
cur.close()
except mysql.connector.Error as e:
print("ERROR ", e)
else:
conn.close()
if __name__ == "__main__":
main()
|
[
"romanbatavi98@gmail.com"
] |
romanbatavi98@gmail.com
|
941eed0f81560dccbcd378a4fa258db160bfd547
|
07bab8cd09c27e93c6eb0e0c47b6f472b4a89d45
|
/web/home/urls.py
|
f6c1eb52d83ff48da122cf768e4c2cc1af36d91e
|
[] |
no_license
|
arunchaganty/webbed-feet
|
0b0de344a64fe6a1d5619982d603a785d4ee02cb
|
d7b3d96900935d43bea97d175cb5552a1aba02d5
|
refs/heads/master
| 2021-01-01T06:44:59.555676
| 2011-11-28T17:17:52
| 2011-11-28T17:17:52
| 887,725
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('web.home.views',
(r'^logout/$', 'logout'),
(r'^login/$', 'login'),
(r'^ping/$', 'ping'),
(r'^help/$', 'help'),
(r'', 'home'),
)
|
[
"arunchaganty@gmail.com"
] |
arunchaganty@gmail.com
|
573f4c351e671916ffa5970d3e4f0805bfefe12d
|
8c4a366c5dc9762e3c922b991e64c691a154ea88
|
/36.py
|
141e9ea9b51adc24496e1eb27e73e0aabb26772a
|
[] |
no_license
|
VINITHAKO/pro
|
bd7fec9b46d7975c46ba0cb42d353bc10965dbdb
|
c1c3cd943606324f5252f46dd33edf7e180bbb48
|
refs/heads/master
| 2020-06-24T13:36:15.327211
| 2019-08-13T06:10:52
| 2019-08-13T06:10:52
| 198,976,260
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
v = int(input())
n = [ int(x) for x in input().split()]
v = len(n)
s = 0
for i in range(0,v-2):
for j in range(i+1, v-1):
for k in range(j+1, v):
if n[i] > n[j] > n[k] :
s =s+ 1
print(s)
|
[
"noreply@github.com"
] |
VINITHAKO.noreply@github.com
|
a90cf650bf1fb64fc1e55e83bc390af95dac2afa
|
f14bfd79d8bdcd012f21895084598d4bfe9fb9f2
|
/0x03-python-data_structures/7-add_tuple.py
|
4112e9aab615eb65a9654a62ed3705ceb696cbbd
|
[] |
no_license
|
ch-canaza/holbertonschool-higher_level_programming
|
1c62ae2e7798d79e619d8a133c3929720f317196
|
1d7402c90de37b920e163a04f196491a99d516c0
|
refs/heads/master
| 2023-01-18T23:06:57.738005
| 2020-11-12T21:57:53
| 2020-11-12T21:57:53
| 259,409,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
len_a = len(tuple_a)
len_b = len(tuple_b)
if len_a < 2:
for i in range(2 - len_a):
tuple_a += (0,)
if len_b < 2:
for i in range(2 - len_b):
tuple_b += (0,)
a = tuple_a[0] + tuple_b[0]
b = tuple_a[1] + tuple_b[1]
return (a, b)
|
[
"ch-canaza@hotmail.com"
] |
ch-canaza@hotmail.com
|
3f6185d27af2a0cec9d7825cae72b4a476206fe0
|
466607c14d76c8d798e08f05dde2d79a07f6e069
|
/tests/databases/constructed_molecule/mongo_db/utilities.py
|
48ae44f71584e4bcfa63e090dc020faf4557262b
|
[
"MIT"
] |
permissive
|
andrewtarzia/stk
|
7c77006bacd4d3d45838ffb3b3b4c590f1bce336
|
9242c29dd4b9eb6927c202611d1326c19d73caea
|
refs/heads/main
| 2023-08-03T12:29:21.096641
| 2023-07-27T09:45:25
| 2023-07-27T09:45:25
| 191,198,174
| 0
| 1
|
MIT
| 2023-09-04T16:53:05
| 2019-06-10T15:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 850
|
py
|
from collections import Counter
from ...utilities import DatabaseState, get_entry
def get_database_state(database):
"""
Get the state of a :class:`.ValueMongoDb`.
Parameters
----------
database : :class:`.ValueMongoDb`
The database whose state is wanted.
Returns
-------
:class:`.DatabaseState`
The current state of `database`.
"""
entries = Counter()
entries.update(map(get_entry, database._molecules.find({})))
entries.update(map(get_entry, database._position_matrices.find({})))
entries.update(
map(
get_entry,
database._constructed_molecules.find({}),
)
)
entries.update(
map(
get_entry,
database._building_block_position_matrices.find({}),
)
)
return DatabaseState(entries)
|
[
"noreply@github.com"
] |
andrewtarzia.noreply@github.com
|
872a9c9f67e0dda9dbb5ded25dcc5a53ba331d4f
|
c0f7cc71eb5732d3b90da4f1e40c3f806f63bb29
|
/python/fibermeas/exceptions.py
|
e4375607ef2fc9760f3570e749c674ad21f10151
|
[
"BSD-3-Clause"
] |
permissive
|
sdss/fibermeas
|
6bb696ca2e46ded83baf3bc09a7b0e2024884789
|
4d29ff58a14b025cf6320ab1caef5f4bcbba394b
|
refs/heads/master
| 2023-06-24T12:09:38.449736
| 2021-05-07T08:14:52
| 2021-05-07T08:14:52
| 329,701,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-12-05 12:01:21
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-12-05 12:19:32
from __future__ import print_function, division, absolute_import
class FibermeasError(Exception):
"""A custom core Fibermeas exception"""
def __init__(self, message=None):
message = 'There has been an error' \
if not message else message
super(FibermeasError, self).__init__(message)
class FibermeasNotImplemented(FibermeasError):
"""A custom exception for not yet implemented features."""
def __init__(self, message=None):
message = 'This feature is not implemented yet.' \
if not message else message
super(FibermeasNotImplemented, self).__init__(message)
class FibermeasAPIError(FibermeasError):
"""A custom exception for API errors"""
def __init__(self, message=None):
if not message:
message = 'Error with Http Response from Fibermeas API'
else:
message = 'Http response error from Fibermeas API. {0}'.format(message)
super(FibermeasAPIError, self).__init__(message)
class FibermeasApiAuthError(FibermeasAPIError):
"""A custom exception for API authentication errors"""
pass
class FibermeasMissingDependency(FibermeasError):
"""A custom exception for missing dependencies."""
pass
class FibermeasWarning(Warning):
"""Base warning for Fibermeas."""
class FibermeasUserWarning(UserWarning, FibermeasWarning):
"""The primary warning class."""
pass
class FibermeasSkippedTestWarning(FibermeasUserWarning):
"""A warning for when a test is skipped."""
pass
class FibermeasDeprecationWarning(FibermeasUserWarning):
"""A warning for deprecated features."""
pass
|
[
"csayres@uw.edu"
] |
csayres@uw.edu
|
e1fb0821c00054365b7452d5b9ee05d208da67f2
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractUnknowntranslationsCom.py
|
896536aca1cc20dfe50f401116dd029e1050a31a
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
def extractUnknowntranslationsCom(item):
'''
Parser for 'unknowntranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
2f78bd1f6bc462d026579f398379118dc3bdc186
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/importer/tflite2/tflite_schema_head/WhileOptions.py
|
c1ecce2b777e295124727c75a226511d952f2ee9
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_schema_head
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class WhileOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = WhileOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsWhileOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# WhileOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# WhileOptions
def CondSubgraphIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# WhileOptions
def BodySubgraphIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def WhileOptionsStart(builder): builder.StartObject(2)
def Start(builder):
return WhileOptionsStart(builder)
def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): builder.PrependInt32Slot(0, condSubgraphIndex, 0)
def AddCondSubgraphIndex(builder, condSubgraphIndex):
return WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex)
def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
def AddBodySubgraphIndex(builder, bodySubgraphIndex):
return WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex)
def WhileOptionsEnd(builder): return builder.EndObject()
def End(builder):
return WhileOptionsEnd(builder)
|
[
"yao.zhang@greenwaves-technologies.com"
] |
yao.zhang@greenwaves-technologies.com
|
faf1f3684f84eac1eeec3b33cdfae00169f4277a
|
c38b292e7bfaa95ac9a5fbf56d247403941139f4
|
/ticket_details/views.py
|
b9fc42bf585a470a67fc1c6f572b4ab7f393d0dc
|
[] |
no_license
|
poojapauskar/vzcards-api
|
f6a79938ee032c1da1d04b86a266d939d73b8797
|
ed0185f36e274d46f978a8f670a4189571280e8b
|
refs/heads/master
| 2020-12-29T02:38:14.288750
| 2017-08-08T12:30:51
| 2017-08-08T12:30:51
| 44,667,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
from ticket_create.models import Ticket_create
from ticket_details.serializers import Ticket_detailsSerializer
from rest_framework import generics
# from ticket.permissions import IsOwnerOrReadOnly
# from rest_framework import permissions
from django.shortcuts import get_object_or_404
class Ticket_detailsDetail(generics.ListAPIView):
serializer_class = Ticket_detailsSerializer
def get_queryset(self):
ticket_id = self.kwargs['ticket_id']
objects=Ticket_create.objects.filter(ticket_id=ticket_id)
return objects
from django.contrib.auth.models import User
from ticket_details.serializers import UserSerializer
from rest_framework import permissions
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
from django.shortcuts import render
# Create your views here.
|
[
"git.poojapauskar@gmail.com"
] |
git.poojapauskar@gmail.com
|
d1353c31503e6d8d403d93c3e6896c4c42bb991d
|
16b81ffcb40b429bde1e9bc10e5eeddd9d2ec51f
|
/leetcode/largest-number.py
|
0fe00afd4ba572d7756d887bcf9932bddb740397
|
[] |
no_license
|
suminb/coding-exercise
|
564424b7b98ea768c57a5b7f106fd7844e8e2843
|
b8b9377e7a76b498a9e6fb325743b16cbc943932
|
refs/heads/master
| 2023-06-16T03:01:59.009635
| 2020-05-09T11:09:25
| 2020-05-09T11:09:25
| 171,614,062
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
# 179. Largest Number
# difficulty: medium
# https://leetcode.com/problems/largest-number/
from typing import List
import pytest
class CustomString(str):
def __gt__(self, other):
return self + other > other + self
def __lt__(self, other):
return self + other < other + self
class Solution:
def largestNumber(self, nums: List[int]) -> str:
number = ''.join(sorted([CustomString(x) for x in nums], reverse=True))
while len(number) > 1 and number[0] == '0':
number = number[1:]
return number
def test_compare():
assert CustomString('12') > CustomString('121')
assert CustomString('12') > CustomString('120')
assert CustomString('122') > CustomString('121')
assert CustomString('121') > CustomString('120')
@pytest.mark.parametrize('nums, expected', [
([], ''),
([5], '5'),
([0, 0], '0'),
([0, 0, 0], '0'),
([10, 2], '210'),
([10, 0, 0], '1000'),
([121, 12], '12121'),
([[3, 30, 34, 5, 9], '9534330']),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], '9876543210'),
([93, 5, 3, 1, 3, 412, 45, 6151823579123, 3752], '9361518235791235454123752331'),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6], '9876655443322110'),
])
def test(nums, expected):
s = Solution()
assert expected == s.largestNumber(nums)
if __name__ == '__main__':
pytest.main(['-v', __file__])
|
[
"suminb@gmail.com"
] |
suminb@gmail.com
|
f743a0d111f8b3360e91aaf494b4103c4ca98714
|
bef35774ebe121d9657a3d513a21073b8086b331
|
/advanced-algorithms/greedy-algorithm/min_operations.py
|
542e345b8a1ad9a2798e2bfebe31f3c693f476b0
|
[] |
no_license
|
greatertomi/Python-Algorithms-DataStructure
|
dab0097ec8a450dfddc47e3153359e6fcd873225
|
a0ad6aa78086ba39e6f100ac551ddadb9433f724
|
refs/heads/master
| 2022-04-14T10:52:32.986333
| 2020-04-17T16:04:22
| 2020-04-17T16:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
def min_operations(target):
steps = 0
while target != 0:
while target/2 == target//2:
target = target // 2
steps += 1
target -= 1
steps += 1
return steps
print(min_operations(18))
|
[
"oshalusijohn@gmail.com"
] |
oshalusijohn@gmail.com
|
8e8be09d2a84c32daac3f5af9233b2981859a259
|
3b9bf497cd29cea9c24462e0411fa8adbfa6ba60
|
/placement-test/pt-test-2/Kangaroo.py
|
9915efe82926333e63f3e269d4b28590f33cbaa9
|
[] |
no_license
|
niteesh2268/coding-prepation
|
918823cb7f4965bec096ec476c639a06a9dd9692
|
19be0766f6b9c298fb32754f66416f79567843c1
|
refs/heads/master
| 2023-01-02T05:30:59.662890
| 2020-10-17T13:12:34
| 2020-10-17T13:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
if v2 >= v1:
return 'NO'
if (x2-x1)%(v1-v2) == 0:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
|
[
"akualajayaprakash@gmailcom"
] |
akualajayaprakash@gmailcom
|
6fe75c8f5b323d1035afb99283468de94ec63bc8
|
4a81f78adf1a1adfc7bb1d19e42171cc856b2ccc
|
/bin/sqlformat
|
aebd25c96ed652302ce3c726c885c87b9c66a703
|
[] |
no_license
|
Prones94/Music_Site
|
af910c18c442b802971bc0a624891860266d8f85
|
924bffbc2a935217e84292e682c06fe1e83bb018
|
refs/heads/master
| 2022-04-21T02:33:41.006384
| 2020-04-21T22:38:15
| 2020-04-21T22:38:15
| 256,361,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/admin/Desktop/MAKE/BEW/BEW-1.2/Music/music_site/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ronesp.85@gmail.com"
] |
ronesp.85@gmail.com
|
|
abf65c1f1b69d082fe55861689026c4a2695eae2
|
5a514c289084b358c658590f2e2599aea4e4e0a8
|
/lib/engine/engine.py
|
ae0a01f5e1e06f6ca173a7444aeed05615dae022
|
[] |
no_license
|
gladiopeace/Tentacle
|
602d51015d25f71e331a27ccf07270567d4b4a0e
|
7b32937496415b77aec35a10551a4ff33863a829
|
refs/heads/master
| 2023-08-22T13:45:34.700076
| 2021-10-21T08:39:24
| 2021-10-21T08:39:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,293
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author = 'orleven'
import os
import time
import asyncio
import traceback
import async_timeout
from typing import AsyncIterable
from typing import Iterable
from typing import Union
from lib.core.data import logger
from lib.core.data import paths
from lib.core.async_pool import PoolCollector
from lib.core.pocmanage import POCManager
from lib.core.database import TaskDataDB
from script import Script
class Engine(object):
def __init__(self, name:str, targets: AsyncIterable, pm: POCManager, engine_name='Engine'):
self.spend_time = 0
self.name = name
self.pm = pm
self.engine_name = engine_name
self.targets = targets
self._total_task_count = 0
self._error_task_count = 0
self._find_task_count = 0
self.interval_time = 60
self.start_time = time.time()
self.is_continue = True
self.hashdb = TaskDataDB(os.path.join(paths.DATA_PATH, name))
self.hashdb.connect()
self.hashdb.init()
def print_progress(self,manager: PoolCollector):
found_count = self._find_task_count
error_count = self._error_task_count
remaining_count = manager.remain_task_count
scanning_count = manager.scanning_task_count
scanned_count = self._total_task_count - manager.remain_task_count
total_count = self._total_task_count
self.spend_time = time.time() - self.start_time
msg = '[%s] %s found | %s error | %s remaining | %s scanning | %s scanned in %.2f seconds.(total %s)' % (
self.name, found_count, error_count, remaining_count, scanning_count, scanned_count, self.spend_time,
total_count)
logger.sysinfo(msg)
async def _progress_daemon(self, manager: PoolCollector):
while True:
await asyncio.sleep(self.interval_time)
self.print_progress(manager)
async def submit_task(self, manager: PoolCollector):
"""subclass should override this function for _submit_task"""
async def do_scan(self, module: Script,target: Union[dict]) -> Iterable[dict]:
"""subclass should override this function for do_scan"""
async def enum(self):
"""subclass should override this function for enum"""
|
[
"546577246@qq.com"
] |
546577246@qq.com
|
d4209a045d8c2874fb783624e9d57859a3f90d01
|
febb7a4b889c2f40637e2b688eb770cf0809226f
|
/fython/test/instruction/iruc_test.py
|
4bd165906959e2adc6c9080adb1c9ba8161931ea
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nicolasessisbreton/fython
|
68253552c626640b5efc2a7cea9384c8e0425c08
|
988f5a94cee8b16b0000501a22239195c73424a1
|
refs/heads/master
| 2021-01-10T07:10:06.793158
| 2017-08-25T17:27:05
| 2017-08-25T17:27:05
| 50,076,320
| 48
| 3
| null | 2016-08-21T17:16:12
| 2016-01-21T02:30:31
|
Python
|
UTF-8
|
Python
| false
| false
| 225
|
py
|
s = r"""
.a.fy
int dimension(10) target x
int dimension(:) pointer y
y => x[3:5]
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0)
# print(open(w.module.url.fortran_path, 'r').read())
|
[
"contact@nicolasessisbreton.com"
] |
contact@nicolasessisbreton.com
|
f957ccaad9c4850b8a815994ef49492ba8f5b299
|
54f826d6103263e3983d1861ff902c62deb59916
|
/cart/admin.py
|
17023013cabe0fe5be3f2b25a5ad0d8d4e551005
|
[] |
no_license
|
arumjin/gt-qlik
|
e37582ae7d3f0ee1d4da0319459f92b588b1beb4
|
9cf859a669b2302f5430972528275fecdce70926
|
refs/heads/master
| 2022-11-08T18:57:35.386732
| 2020-06-30T00:55:15
| 2020-06-30T00:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django.contrib import admin
from .models import CartItem,Cart
# Register your models here.
class CartItemAdmin(admin.ModelAdmin):
list_display = (
'chart',
'cart'
)
admin.site.register(CartItem,CartItemAdmin)
class CartAdmin(admin.ModelAdmin):
list_display = (
'user',
)
admin.site.register(Cart,CartAdmin)
|
[
"tlfgjawlq@naver.com"
] |
tlfgjawlq@naver.com
|
97a7a1b661d847598f5ef105388d61266babb110
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/441/usersdata/311/109834/submittedfiles/lista1.py
|
9c83a6d93621cba2175ab70f2088ceea4fd25712
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
# -*- coding: utf-8 -*-
n=int(input('Digite a quantidade de numeros: '))
a=[]
for i in range(n):
a.append(int(input('Digite o numero%d:' %(i+1))))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
362467cd5e32cd4dcb90e29eaca44d0b17706341
|
3b030444b2d1d9d57197ccba41387b447114b210
|
/config.py
|
fb716512cf2763712c551c3b4015a4743de47d8e
|
[] |
no_license
|
popfido/PairCNN-Ranking
|
ec85e45ef54f05a6b1778297cd316b2fa8a23a90
|
b29bbe774888e154a8bad5dafa67ec24aba33256
|
refs/heads/master
| 2020-03-09T10:21:58.509310
| 2018-04-09T09:27:02
| 2018-04-09T09:27:02
| 128,735,443
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
# coding:utf8
import warnings
class DefaultConfig(object):
env = 'default' # visdom 环境
model = 'PairCNN' # 使用的模型,名字必须与models/__init__.py中的名字一致
train_dir = './'
train_data_root = './data/train/' # 训练集存放路径
validate_data_root = './data/validate' # 验证集存放路径
test_data_root = './data/test/' # 测试集存放路径
load_model_path = None # 加载预训练的模型的路径,为None代表不加载
dev_ratio = 0.1 # Ratio of dev/validation data picked from training set
batch_size = 128 # batch size
use_gpu = False # user GPU or not
num_workers = 4 # how many workers for loading data
print_freq = 20 # print info every N batch
eval_freq = 100 # Evaluate model on dev set after this many steps (default: 100)
checkpoint_freq = 100 # Save model after this many steps (default: 100)
debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb
result_file = 'result.csv'
seed = 233 # Random seed (default: 233)
max_epoch = 20
lr = 0.1 # initial learning rate
lr_decay = 0.95 # when val_loss increase, lr = lr*lr_decay
embedding_dim = 64 # Dimensionality of character embedding (default: 64)
filter_sizes = "2,3" # Comma-separated filter sizes (default: '2,3')
num_filters = 64 # Number of filters per filter size (default: 64)
num_hidden = 100 # Number of hidden layer units (default: 100)
dropout_keep_prob = 0.5 # Dropout keep probability (default: 0.5)
max_len_left = 10 # max document length of left input
max_len_right = 10 # max document length of right input
weight_decay = 1e-4 # l2_regularization
vocab_size = 300000 # Most number of words in vocab (default: 300000)
def parse(self, kwargs):
"""
根据字典kwargs 更新 config参数
"""
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribut %s" % k)
setattr(self, k, v)
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('__'):
print(k, getattr(self, k))
DefaultConfig.parse = parse
opt = DefaultConfig()
# opt.parse = parse
|
[
"wanghailin317@gmail.com"
] |
wanghailin317@gmail.com
|
f4c3288cf1c1417cd9ed9515fb2741abe00f3bb9
|
07b4dd9a88f3404c4851ea7cbb57c67035bc9a54
|
/eric.py
|
b4e8a8f25539f10b194515115cc8fd428448ebe5
|
[] |
no_license
|
surajgholap/python-Misc
|
9c9d02c42bb37b7378d7336343f8bef7cd802edf
|
4a8ce4bfa5a959692d98663b7b5c0b67a165835f
|
refs/heads/master
| 2021-06-17T19:19:25.021038
| 2021-01-27T20:54:03
| 2021-01-27T20:54:03
| 142,781,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from collections import Counter
import requests
def top_five(corpus):
count_map = {}
for i in corpus:
try:
count_map[i] += 1
except:
count_map[i] = 1
# words = corpus.split()
# counter = Counter(words)
# most_fav = counter.most_common(5)
# for i in most_fav:
# print(i)
def clean_func(corpus, stop):
new = []
for i in corpus.split(" "):
i = i.lower()
if i.isalpha() and i not in stop:
new.append(i)
top_five(" ".join(new))
response = requests.get(
"https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt")
stop_words = requests.get(
"https://gist.githubusercontent.com/sebleier/554280/raw/7e0e4a1ce04c2bb7bd41089c9821dbcf6d0c786c/NLTK's%2520list%2520of%2520english%2520stopwords")
stop_list = stop_words.text.splitlines()
# print(stop_list)
content = response.text.splitlines()
content = " ".join(content[245:])
# print(content)
clean_func(content, stop_list)
|
[
"surajgholap27@gmail.com"
] |
surajgholap27@gmail.com
|
f56acf97b9abbc3b137bf4f924ed3ee07b7c5424
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/vitchyr/disentanglement/n_object_pnp/exp_5_on_random_object_init.py
|
c4a5e28964dd857af355d261e18481bdc3671f3c
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,317
|
py
|
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.disentanglement.contextual_encoder_distance_launcher import (
encoder_goal_conditioned_sac_experiment
)
from rlkit.launchers.launcher_util import run_experiment
if __name__ == "__main__":
variant = dict(
env_id='OneObjectPickAndPlace2DEnv-v0',
qf_state_encoder_is_goal_encoder=True,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_using_encoder_settings=dict(
encode_state=False,
encode_goal=False,
detach_encoder_via_goal=False,
detach_encoder_via_state=False,
),
sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
single_loss_weight=0.5,
use_automatic_entropy_tuning=True,
),
num_presampled_goals=5000,
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
# num_epochs=500,
# num_eval_steps_per_epoch=400,
# num_expl_steps_per_train_loop=1000,
# num_trains_per_train_loop=1000,
# min_num_steps_before_training=1000,
num_epochs=3,
num_eval_steps_per_epoch=100,
num_expl_steps_per_train_loop=100,
num_trains_per_train_loop=100,
min_num_steps_before_training=100,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.5,
fraction_distribution_context=0.5,
max_size=int(1e6),
),
save_debug_video=False,
visualize_representation=False,
debug_visualization_kwargs=dict(
save_period=20,
initial_save_period=2,
),
save_video=True,
save_video_kwargs=dict(
save_video_period=20,
rows=3,
columns=3,
subpad_length=1,
subpad_color=127,
pad_length=1,
pad_color=0,
num_columns_per_rollout=5,
),
evaluation_goal_sampling_mode='random',
exploration_goal_sampling_mode='random',
exploration_policy_kwargs=dict(
exploration_version='occasionally_repeat',
repeat_prob=0.5,
),
encoder_cnn_kwargs=dict(
kernel_sizes=[3, 3, 3],
n_channels=[8, 16, 32],
strides=[1, 1, 1],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
),
use_image_observations=True,
env_renderer_kwargs=dict(
width=12,
height=12,
output_image_format='CHW',
),
video_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
debug_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
use_separate_encoder_for_policy=True,
encoder_kwargs=dict(
hidden_sizes=[],
),
distance_scatterplot_save_period=50,
distance_scatterplot_initial_save_period=10,
)
search_space = {
'reward_type': [
'state_distance',
# 'encoder_distance',
],
'use_image_observations': [
True,
# False,
],
'latent_dim': [
8,
# 16,
],
'max_path_length': [
40,
],
'encoder_kwargs.hidden_sizes': [
[],
],
'env_id': [
'TwoObject-PickAndPlace-OnRandomObjectInit-2D-v1',
],
'replay_buffer_kwargs.fraction_future_context': [
0.5,
],
'disentangled_qf_kwargs.architecture': [
# 'single_head_match_many_heads',
'many_heads',
],
'sac_trainer_kwargs.single_loss_weight': [
# 1.0,
# 0.9,
# 0.5,
# 0.1,
0.0,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
# n_seeds = 2
# mode = 'sss'
# exp_name = 'n-object-pnp--exp-5--on-random-object-init'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for seed in range(n_seeds):
variant['exp_id'] = exp_id
# variant['seed'] = seed
run_experiment(
encoder_goal_conditioned_sac_experiment,
exp_name=exp_name,
mode=mode,
variant=variant,
use_gpu=False,
num_exps_per_instance=2,
# slurm_config_name='cpu_co',
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
b44857c46d895f4857faeac4e3deb4dff8e60872
|
c7a1406b2230acaf412542124ef744c83171fa9a
|
/perdiem/campaign/apps.py
|
d14c5e8fe339d634cbe6cded1f0c73ad9cefa0c4
|
[] |
no_license
|
GilbertRoy/perdiem-django
|
de2f1351088597fb2b5e739388f28ff346e5e824
|
3d1f00b21a28f71cb89e49986d07b893e5abe1d9
|
refs/heads/master
| 2020-03-14T03:30:21.445845
| 2018-04-21T21:44:20
| 2018-04-21T21:44:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.apps import AppConfig
class CampaignConfig(AppConfig):
name = 'campaign'
def ready(self):
import campaign.signals
|
[
"lucas.revolutiontech@gmail.com"
] |
lucas.revolutiontech@gmail.com
|
e4e396bdb9c9ff1453ea79cb8ca39725235f75db
|
8e1f493ce9fc34b42637bc7d69560aab20c384a3
|
/simple_filter/scripts/simple_kalman.py
|
7f10fadc9c8eb8b01238b66e7502fb534f4f3abd
|
[] |
no_license
|
AmyPhung/comprobo20
|
6f980a82174b3938527fb5939cdd539420aaff42
|
2eff4918275542d2d28828df97c8100d2391cfb0
|
refs/heads/master
| 2023-04-28T21:49:52.085491
| 2021-05-17T22:27:14
| 2021-05-17T22:27:14
| 290,074,022
| 0
| 0
| null | 2020-08-25T00:47:21
| 2020-08-25T00:47:21
| null |
UTF-8
|
Python
| false
| false
| 4,471
|
py
|
#!/usr/bin/env python3
"""
This script implements a Kalman filter for the system:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
import matplotlib.pyplot as plt
import rospy
from numpy import arange
from numpy.random import randn
from math import e, sqrt, pi
from dynamic_reconfigure.server import Server
from simple_filter.cfg import SimpleKalmanConfig
class SimpleWorld(object):
""" A simple system with dynamics:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
def __init__(self, mu_0, sigma_0, sigma_m_sq, sigma_z_sq):
""" the initial state is sampled from N(mu_0, sigma_0).
the movement noise is sigma_m_sq and the measurement noise is sigma_z_sq
"""
self.x_true = mu_0 + sqrt(sigma_0)*randn()
self.sigma_m_sq = sigma_m_sq
self.sigma_z_sq = sigma_z_sq
def get_z_t(self):
""" Sample an observation centered at x_true plus Gaussian noise
with variance sigma_sq_z and mean 0 """
return self.x_true + sqrt(self.sigma_z_sq)*randn()
def get_x_t(self):
""" Sample next system state as the current system state plus Gaussian
noise with variance sigma_sq_m and mean 0 """
self.x_true = self.x_true + sqrt(self.sigma_m_sq)*randn()
return self.x_true
class SimpleKalmanFilter(object):
""" A Kalman filter node that estimates a single state x_t using noisy position measurements """
def __init__(self):
""" Sets up the world model and loads initial parameters """
rospy.init_node('simple_kalman')
plt.ion()
# initial beliefs
self.mu = 0
self.sigma_sq = 1
# motor noise
sigma_m_sq = rospy.get_param('~sigma_m_sq', 0.01)
# observation noise
sigma_z_sq = rospy.get_param('~sigma_z_sq', .1)
# time to pause between plots
self.pause_time = rospy.get_param('~pause_time', 0.5)
self.graphs = None
self.world = SimpleWorld(self.mu, self.sigma_sq, sigma_m_sq, sigma_z_sq)
srv = Server(SimpleKalmanConfig, self.config_callback)
def config_callback(self, config, level):
""" Get the pause_time, movement noise, and measurement noise """
self.pause_time = config['pause_time']
self.world.sigma_m_sq = config['sigma_m_sq']
self.world.sigma_z_sq = config['sigma_z_sq']
return config
def run(self):
while not rospy.is_shutdown():
# Graph new observation from the system
z_t = self.world.get_z_t()
self.graphs = self.plot_pdf(z_t)
# Do Kalman updates
K_t = (self.sigma_sq + self.world.sigma_m_sq)/(self.sigma_sq + self.world.sigma_m_sq + self.world.sigma_z_sq)
self.mu = self.mu + K_t*(z_t - self.mu)
self.sigma_sq = (1-K_t)*(self.sigma_sq+self.world.sigma_m_sq)
plt.pause(self.pause_time)
self.graphs = self.plot_pdf(z_t)
# sample next state
self.world.get_x_t()
plt.pause(self.pause_time)
def plot_pdf(self, z):
""" Plot the Gaussian PDF with the specified mean (mu) and variance (sigma_sq)
x_true is the true system state which will be plotted in blue
z is the current observation which will be plotted in red """
xs = arange(min(-5,z-2,self.world.x_true-2), max(5,z+2,self.world.x_true+2), .005)
p_of_x = [1./sqrt(2*pi*self.sigma_sq)*e**(-(x - self.mu)**2/(2*self.sigma_sq)) for x in xs]
plt.xlim([min(xs), max(xs)])
if self.graphs:
self.graphs[0].set_xdata(xs)
self.graphs[0].set_ydata(p_of_x)
self.graphs[1].set_xdata(self.world.x_true)
self.graphs[2].set_xdata(z)
else:
self.graphs = []
self.graphs.append(plt.plot(xs, p_of_x)[0])
self.graphs.append(plt.plot(self.world.x_true, 0,'b.')[0])
self.graphs.append(plt.plot(z, 0,'r.')[0])
self.graphs[1].set_markersize(20)
self.graphs[2].set_markersize(20)
plt.ylim([0, 5])
plt.legend(('probability density','true position','measured position'))
plt.show(block=False)
return self.graphs
if __name__ == '__main__':
node = SimpleKalmanFilter()
node.run()
|
[
"paullundyruvolo@gmail.com"
] |
paullundyruvolo@gmail.com
|
77d284e2b345dc9df82af95355126cbf386ca2fd
|
a74a0317d8b8e1cf5135cbd0821617f70c8879ca
|
/old/python_resume/file.py
|
a0b117dbafe07691e133591f387574ae6e1beeb9
|
[] |
no_license
|
chuck1/python-resume
|
cbd3c0eb2fe3d0894b3809a2ac1526d171d6afc2
|
5b83fa831525faba17f72173cfff9c2155bd21fc
|
refs/heads/master
| 2021-01-10T19:14:08.036676
| 2017-01-04T01:23:03
| 2017-01-04T01:23:03
| 42,127,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
import os
import json
class Manager(object):
def __init__(self, root):
self.root = root
def get_path(self, filename):
return os.path.join(self.root, filename)
def read_text(self, filename):
path = self.get_path(filename)
with open(path, 'r') as f:
text = f.read()
return text
def write_text(self, filename, text):
path = self.get_path(filename)
#try:
#os.makedirs(os.path.dirname(path))
#except:
# pass
#fd = os.open(path, os.O_WRONLY, 0666)
#os.fchmod(fd,0666)
#os.close(fd)
with open(path, 'w') as f:
f.write(text)
def read_json(self, filename):
try:
text = self.read_text(filename)
except:
text = "{}"
j = json.loads(text)
return j
def write_json(self, filename, j):
text = json.dumps(j)
self.write_text(filename, text)
|
[
"charlesrymal@gmail.com"
] |
charlesrymal@gmail.com
|
ce72b7bb239177efb435d6cc7d06c93e1377518a
|
3fa8eead6e001c4d5a6dc5b1fd4c7b01d7693292
|
/ros _navigation_in_5_days/src/initialize_particles/scripts/init_particles_caller.py
|
e82c4f46632c1080b34ac406afdbf5a7b7ed4ca5
|
[] |
no_license
|
MarzanShuvo/Ros_from_the_construct
|
09261902841cdd832672658947790ec5fbba4cd3
|
4798234284d9d0bab3751e9d8ac2df95ae34a5bf
|
refs/heads/master
| 2023-08-24T17:28:09.182113
| 2021-10-23T07:57:02
| 2021-10-23T07:57:02
| 339,105,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
#! /usr/bin/env python
import rospy
from std_srvs.srv import Empty, EmptyRequest
import sys
rospy.init_node('service_client')
rospy.wait_for_service('/global_localization')
disperse_particles_service = rospy.ServiceProxy('/global_localization', Empty)
msg = EmptyRequest()
result = disperse_particles_service(msg)
print(result)
|
[
"marzanalam3@gmail.com"
] |
marzanalam3@gmail.com
|
a50e4c86ed9764db44777c7fcb47ec51f6780d04
|
6b2dcf691bc7f019d86270ec0588f5232fc3e2b0
|
/inflearn_practice/section7/최대점수 구하기.py
|
e8be936e34aa64f6cf45a818cae04129b1c64022
|
[] |
no_license
|
limgeonho/Algorithm
|
02c55fbf5b09b718dbc2aee83a887143d121ddaf
|
3d4d1ccd6ee3c52dc36ac3cf5f681690fcfdb6ab
|
refs/heads/master
| 2023-06-01T21:05:00.100998
| 2021-06-21T15:04:26
| 2021-06-21T15:04:26
| 371,552,176
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
#최대점수 구하기
#다음문제를 푸는 지 풀지 않는지에 대한 선택(O, X)문제
def DFS(L, time, sum):
global res
if time > m:
return
if L == n:
if sum > res:
res = sum
else:
DFS(L+1, time + t[L], sum + v[L])
DFS(L+1, time, sum)
n, m = map(int, input().split())
v= list()
t = list()
for _ in range(n):
a, b = map(int, input().split())
v.append(a)
t.append(b)
res = -2147000000
DFS(0, 0, 0)
print(res)
|
[
"ghlim909@gmail.com"
] |
ghlim909@gmail.com
|
a2ba4afc7c10c24b24bd646ab7250dcd81777313
|
0d9dd4ac458ac954e453e6f7810ca5e1c759f82d
|
/list
|
fb6c972de61fce49941283ab222a8e272a50cc63
|
[
"MIT"
] |
permissive
|
ovkulkarni/create-repo
|
9335307481686c8109baae7d88cd819dd7ca0cb6
|
0073cd761106e0c5453429204e8da56ba249eb1d
|
refs/heads/master
| 2021-01-10T05:20:54.898788
| 2016-03-30T14:14:03
| 2016-03-30T14:15:06
| 53,800,960
| 0
| 1
| null | 2016-03-14T02:44:39
| 2016-03-13T18:35:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,590
|
#!/usr/bin/env python3
######################################################################################
# #
#The MIT License (MIT) #
# #
#Copyright (c) 2016 Omkar Kulkarni #
# #
#Permission is hereby granted, free of charge, to any person obtaining a copy #
#of this software and associated documentation files (the "Software"), to deal #
#in the Software without restriction, including without limitation the rights #
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
#copies of the Software, and to permit persons to whom the Software is #
#furnished to do so, subject to the following conditions: #
# #
#The above copyright notice and this permission notice shall be included in all #
#copies or substantial portions of the Software. #
# #
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
#SOFTWARE. #
# #
######################################################################################
import requests
import yaml
import sys
import json
import getpass
import os
from termcolor import colored
try:
current_dir = os.path.dirname(os.path.realpath(__file__))
with open(current_dir + '/config.yml', 'r') as f:
config = yaml.load(f.read())
password = getpass.getpass("Enter your Github Password: ")
session = requests.Session()
session.auth = (config["username"], password)
url = 'https://api.github.com/user/repos'
print('[=] Sending request to Github...')
r = session.get(url)
if r.status_code == 200:
returned = json.loads(r.text)
for item in returned:
if item["private"]:
print(colored("[PRIVATE] {} - {}".format(item["full_name"], item["html_url"]), "red"))
else:
print("{} - {}".format(item["full_name"], item["html_url"]))
else:
print("[-] Unable to access repositories. Github returned an error of {}".format(r.status_code))
print("[-] Here is the full content Github returned: {}".format(json.loads(r.text)["message"]))
except KeyboardInterrupt as e:
print("\nExiting...")
sys.exit()
except requests.ConnectionError as e:
print("\n[-] Not Connected To Internet!")
print("Exiting...")
sys.exit()
except BaseException as e:
print("\nReceived an error of {}".format(str(e)))
print("Exiting...")
sys.exit()
|
[
"2019okulkarn@tjhsst.edu"
] |
2019okulkarn@tjhsst.edu
|
|
2e3a3c24699f253c7671d55206bcd6aa7151e478
|
5522054c40e9a35b68351bfa546c2e9fffd01340
|
/mobileoperators/settings.py
|
9fc9e98b471b6627856ba177fb3dccefadbf3c3f
|
[] |
no_license
|
thepylot/Mobile-Networks-App
|
6ee36243c4861063da8b1c086fc0db882a27cb09
|
4893b810b697e399564e1fb1bb6f738b61950b76
|
refs/heads/master
| 2021-11-27T14:22:13.843167
| 2019-02-08T10:37:25
| 2019-02-08T10:37:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
"""
Django settings for mobileoperators project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qhe1goa#897=s5hq^ci--vyby&2ty8wp_2t4dq!85u1iq%3kgb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'208.68.36.230',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mobile',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobileoperators.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobileoperators.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"charliescene512@gmail.com"
] |
charliescene512@gmail.com
|
690bf90029924555962d2aa02c4d1d296434d857
|
4bf53a42b336e67ce75e220dc87f75af9911f036
|
/tapiriik/urls.py
|
4ced6e6a50e2b58565261db7601a7cafecb0b985
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
patricksan/tapiriik
|
5cee925d256f5e2b23397487ef807b5766b710ba
|
1628f8759c9e2d0562b92dd25561a347389f6cf3
|
refs/heads/master
| 2020-12-11T08:07:10.991800
| 2018-02-27T13:14:59
| 2018-02-27T13:14:59
| 38,956,416
| 0
| 0
|
Apache-2.0
| 2018-02-27T13:15:00
| 2015-07-12T09:22:01
|
Python
|
UTF-8
|
Python
| false
| false
| 7,197
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tapiriik.web.views.dashboard', name='dashboard'),
url(r'^auth/redirect/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/redirect/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/return/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ),
url(r'^auth/return/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ), # django's URL magic couldn't handle the equivalent regex
url(r'^auth/login/(?P<service>.+)$', 'tapiriik.web.views.auth_login', {}, name='auth_simple', ),
url(r'^auth/login-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_login_ajax', {}, name='auth_simple_ajax', ),
url(r'^auth/persist-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_persist_extended_auth_ajax', {}, name='auth_persist_extended_auth_ajax', ),
url(r'^auth/disconnect/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect', {}, name='auth_disconnect', ),
url(r'^auth/disconnect-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect_ajax', {}, name='auth_disconnect_ajax', ),
url(r'^auth/logout$', 'tapiriik.web.views.auth_logout', {}, name='auth_logout', ),
url(r'^account/setemail$', 'tapiriik.web.views.account_setemail', {}, name='account_set_email', ),
url(r'^account/settz$', 'tapiriik.web.views.account_settimezone', {}, name='account_set_timezone', ),
url(r'^account/configure$', 'tapiriik.web.views.account_setconfig', {}, name='account_set_config', ),
url(r'^account/rollback/?$', 'tapiriik.web.views.account_rollback_initiate', {}, name='account_rollback_initiate', ),
url(r'^account/rollback/(?P<task_id>.+)$', 'tapiriik.web.views.account_rollback_status', {}, name='account_rollback_status', ),
url(r'^rollback$', 'tapiriik.web.views.rollback_dashboard', {}, name='rollback_dashboard', ),
url(r'^configure/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_save', {}, name='config_save', ),
url(r'^configure/dropbox$', 'tapiriik.web.views.config.dropbox', {}, name='dropbox_config', ),
url(r'^configure/flow/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_flow_save', {}, name='config_flow_save', ),
url(r'^settings/?$', 'tapiriik.web.views.settings.settings', {}, name='settings_panel', ),
url(r'^dropbox/browse-ajax/?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^dropbox/browse-ajax/(?P<path>.+)?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^sync/status$', 'tapiriik.web.views.sync_status', {}, name='sync_status'),
url(r'^sync/activity$', 'tapiriik.web.views.sync_recent_activity', {}, name='sync_recent_activity'),
url(r'^sync/schedule/now$', 'tapiriik.web.views.sync_schedule_immediate', {}, name='sync_schedule_immediate'),
url(r'^sync/errors/(?P<service>[^/]+)/clear/(?P<group>.+)$', 'tapiriik.web.views.sync_clear_errorgroup', {}, name='sync_clear_errorgroup'),
url(r'^activities$', 'tapiriik.web.views.activities_dashboard', {}, name='activities_dashboard'),
url(r'^activities/fetch$', 'tapiriik.web.views.activities_fetch_json', {}, name='activities_fetch_json'),
url(r'^sync/remote_callback/trigger_partial_sync/(?P<service>.+)$', 'tapiriik.web.views.sync_trigger_partial_sync_callback', {}, name='sync_trigger_partial_sync_callback'),
url(r'^diagnostics/$', 'tapiriik.web.views.diag_dashboard', {}, name='diagnostics_dashboard'),
url(r'^diagnostics/queue$', 'tapiriik.web.views.diag_queue_dashboard', {}, name='diagnostics_queue_dashboard'),
url(r'^diagnostics/errors$', 'tapiriik.web.views.diag_errors', {}, name='diagnostics_errors'),
url(r'^diagnostics/error/(?P<error>.+)$', 'tapiriik.web.views.diag_error', {}, name='diagnostics_error'),
url(r'^diagnostics/graphs$', 'tapiriik.web.views.diag_graphs', {}, name='diagnostics_graphs'),
url(r'^diagnostics/user/unsu$', 'tapiriik.web.views.diag_unsu', {}, name='diagnostics_unsu'),
url(r'^diagnostics/user/(?P<user>.+)$', 'tapiriik.web.views.diag_user', {}, name='diagnostics_user'),
url(r'^diagnostics/payments/$', 'tapiriik.web.views.diag_payments', {}, name='diagnostics_payments'),
url(r'^diagnostics/ip$', 'tapiriik.web.views.diag_ip', {}, name='diagnostics_ip'),
url(r'^diagnostics/login$', 'tapiriik.web.views.diag_login', {}, name='diagnostics_login'),
url(r'^supported-activities$', 'tapiriik.web.views.supported_activities', {}, name='supported_activities'),
# url(r'^supported-services-poll$', 'tapiriik.web.views.supported_services_poll', {}, name='supported_services_poll'),
url(r'^payments/claim$', 'tapiriik.web.views.payments_claim', {}, name='payments_claim'),
url(r'^payments/claim-ajax$', 'tapiriik.web.views.payments_claim_ajax', {}, name='payments_claim_ajax'),
url(r'^payments/promo-claim-ajax$', 'tapiriik.web.views.payments_promo_claim_ajax', {}, name='payments_promo_claim_ajax'),
url(r'^payments/claim-wait-ajax$', 'tapiriik.web.views.payments_claim_wait_ajax', {}, name='payments_claim_wait_ajax'),
url(r'^payments/claim/(?P<code>[a-f0-9]+)$', 'tapiriik.web.views.payments_claim_return', {}, name='payments_claim_return'),
url(r'^payments/return$', 'tapiriik.web.views.payments_return', {}, name='payments_return'),
url(r'^payments/confirmed$', 'tapiriik.web.views.payments_confirmed', {}, name='payments_confirmed'),
url(r'^payments/ipn$', 'tapiriik.web.views.payments_ipn', {}, name='payments_ipn'),
url(r'^payments/external/(?P<provider>[^/]+)/refresh$', 'tapiriik.web.views.payments_external_refresh', {}, name='payments_external_refresh'),
url(r'^ab/begin/(?P<key>[^/]+)$', 'tapiriik.web.views.ab_web_experiment_begin', {}, name='ab_web_experiment_begin'),
url(r'^privacy$', 'tapiriik.web.views.privacy.privacy', name='privacy'),
url(r'^garmin_connect_users$', TemplateView.as_view(template_name='static/garmin_connect_users.html'), name='garmin_connect_users'),
url(r'^faq$', TemplateView.as_view(template_name='static/faq.html'), name='faq'),
url(r'^credits$', TemplateView.as_view(template_name='static/credits.html'), name='credits'),
url(r'^contact$', TemplateView.as_view(template_name='static/contact.html'), name='contact'),
# Examples:
# url(r'^$', 'tapiriik.views.home', name='home'),
# url(r'^tapiriik/', include('tapiriik.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
|
[
"cpf@cpfx.ca"
] |
cpf@cpfx.ca
|
09da270e1e2c06a0e560ef30f7fadd91ddaec7e6
|
5330918e825f8d373d3907962ba28215182389c3
|
/CMGTools/ZJetsTutorial/python/samples/run2012/diboson.py
|
b5453eedeeb69a296e71f73508c838dda097261d
|
[] |
no_license
|
perrozzi/cmg-cmssw
|
31103a7179222c7aa94f65e83d090a5cf2748e27
|
1f4cfd936da3a6ca78f25959a41620925c4907ca
|
refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22
| 2021-01-16T23:15:58.556441
| 2017-05-11T22:43:15
| 2017-05-11T22:43:15
| 13,272,641
| 1
| 0
| null | 2017-05-11T22:43:16
| 2013-10-02T14:05:21
|
C++
|
UTF-8
|
Python
| false
| false
| 2,913
|
py
|
import CMGTools.RootTools.fwlite.Config as cfg
# exclusive madgraph samples
# -- -- -- -- -- -- -- --
WWJetsTo2L2Nu = cfg.MCComponent(
name = 'WWJetsTo2L2Nu',
files = [],
xSection = 5.824, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo2L2Q = cfg.MCComponent(
name = 'WZJetsTo2L2Q',
files = [],
xSection = 2.207, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo3LNu = cfg.MCComponent(
name = 'WZJetsTo3LNu',
files = [],
xSection = 1.058, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Nu = cfg.MCComponent(
name = 'ZZJetsTo2L2Nu',
files = [],
xSection = 0.716, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Q = cfg.MCComponent(
name = 'ZZJetsTo2L2Q',
files = [],
xSection = 2.502, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo4L = cfg.MCComponent(
name = 'ZZJetsTo4L',
files = [],
xSection = 0.181, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
mc_diboson_xcl = [
WWJetsTo2L2Nu,
WZJetsTo2L2Q,
WZJetsTo3LNu,
ZZJetsTo2L2Nu,
ZZJetsTo2L2Q,
ZZJetsTo4L
]
# inclusive pythia samples
# -- -- -- -- -- -- -- --
WW = cfg.MCComponent(
name = 'WW',
files = [],
# xSection = 57.1097, # correction factor from Valentina
xSection = 54.838, #PG numbers from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZ = cfg.MCComponent(
name = 'WZ',
files = [],
# xSection = 32.3161,
# xSection = 32.3161 * 0.97, #PG scale factor wrt exclusive samples XS
xSection = 33.21, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZ = cfg.MCComponent(
name = 'ZZ',
files = [],
# xSection = 8.25561, # correction factor from Valentina
# xSection = 8.3 * 2.13, #PG scale factor wrt exclusive samples XS
xSection = 17.654, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
# inclusive pythia samples
mc_diboson_inc = [
WW,
WZ,
ZZ
]
# exclusive madgraph samples
mc_diboson = mc_diboson_xcl
|
[
"colin.bernet@cern.ch"
] |
colin.bernet@cern.ch
|
341f3d6642671fb82aeba75ca4bc26459d43bd1f
|
f1593773b199c435114b316348b81126aa212cd6
|
/web_flask/6-number_odd_or_even.py
|
55e21f462a589f3a87e7aec051ee81b1abdeeef8
|
[] |
no_license
|
ledbagholberton/AirBnB_clone_v2
|
0f0f0889ed7fac9767e45b7fc17eafc388469738
|
8fefc58e76184fcfe86ec16dde1791fd8ff4777f
|
refs/heads/master
| 2020-07-07T02:20:17.093914
| 2019-09-10T06:13:44
| 2019-09-10T06:13:44
| 203,214,786
| 0
| 0
| null | 2019-08-19T17:01:24
| 2019-08-19T17:01:24
| null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
#!/usr/bin/python3
""" start a Flask Web application
"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
""" Print Hello HBNB """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def only_hbnb():
""" Print HBNB """
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def cissome(text):
""" Print C + <name> without underscore """
return("C {}".format(text.replace("_", " ")))
@app.route('/python/', strict_slashes=False)
@app.route('/python', strict_slashes=False)
def pythonalone():
""" Print Python is cool ...by default """
return("Python is cool")
@app.route('/python/<text>', strict_slashes=False)
def pythonissome(text):
""" Print Python + <name> without underscore """
return("Python {}".format(text.replace("_", " ")))
@app.route('/number/<nummer>', strict_slashes=False)
def numberisint(nummer):
""" Print number if it s a number """
if nummer.isdigit():
return("{} is a number".format(nummer))
@app.route('/number_template/<nummer>', strict_slashes=False)
def number_template(nummer):
""" Print a template with a variable """
if nummer.isdigit():
return render_template('5-number.html', name=nummer)
else:
return render_template('no_found.html'), 404
@app.route('/number_odd_or_even/<nummer>', strict_slashes=False)
def number_even(nummer):
""" Print a template witheven or odd """
if nummer.isdigit():
if (int(nummer) % 2) == 0:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="even")
else:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="odd")
else:
return render_template('no_found.html'), 404
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"789@holbertonschool.com"
] |
789@holbertonschool.com
|
bffd4513031c134591a90b558e1174567f6690bc
|
3155c38585c5d1cf27c4d8065cb5821f5b980983
|
/package/awesome_panel/database/settings.py
|
0c78182ac946b4d7c64acbe8293eb5400c0aa261
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
jlstevens/awesome-panel
|
460c86ac886a86fa1a3f6ec79b6186f292ca10bc
|
c67b0f4529a3ce6a8517648f49fef8358e2e2c8b
|
refs/heads/master
| 2020-11-25T03:11:10.018557
| 2019-12-16T20:57:07
| 2019-12-16T20:57:07
| 228,474,317
| 0
| 0
|
Apache-2.0
| 2019-12-16T20:55:56
| 2019-12-16T20:55:55
| null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
"""In this module we provide a list of settings"""
GITHUB_URL = "https://github.com/MarcSkovMadsen/awesome-panel/"
GITHUB_BLOB_MASTER_URL = "https://github.com/MarcSkovMadsen/awesome-panel/blob/master/"
GITHUB_RAW_URL = "https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/master/"
GITHUB_THUMBNAILS_URL = (
"https://github.com/MarcSkovMadsen/awesome-panel/blob/master/assets/images/thumbnails/"
)
THUMBNAILS_ROOT = "assets/images/thumbnails/"
|
[
"MASMA@orsted.dk"
] |
MASMA@orsted.dk
|
2441cabae99ae34d4d6dd1b980b760e07462a3ee
|
bee9a140f51f85c612f4e869747aae3d155188c5
|
/src/main/python/systemds/operator/algorithm/builtin/l2svm.py
|
cd7db9e4dc52e3b06e2141612f7bb57105b73816
|
[
"Apache-2.0"
] |
permissive
|
clarapueyoballarin/systemds
|
cb64a494afd14da142269c788c76edb236d8b755
|
a68a71ddb089ebdd52e8f316a03bda281f4532ba
|
refs/heads/master
| 2023-05-22T16:39:04.409220
| 2021-06-17T16:14:15
| 2021-06-17T16:14:15
| 341,305,828
| 0
| 1
|
Apache-2.0
| 2021-02-24T22:51:31
| 2021-02-22T18:59:49
|
Java
|
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/l2svm.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def l2svm(X: OperationNode, Y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""
:param X: matrix X of feature vectors
:param Y: matrix Y of class labels have to be a single column
:param intercept: No Intercept ( If set to TRUE then a constant bias column is added to X)
:param epsilon: Procedure terminates early if the reduction in objective function value is less than epsilon (tolerance) times the initial objective function value.
:param lambda: Regularization parameter (lambda) for L2 regularization
:param maxIterations: Maximum number of conjugate gradient iterations
:param maxii: -
:param verbose: Set to true if one wants print statements updating on loss.
:param columnId: The column Id used if one wants to add a ID to the print statement, Specificly usefull when L2SVM is used in MSVM.
:return: 'OperationNode' containing model matrix
"""
X._check_matrix_op()
Y._check_matrix_op()
params_dict = {'X':X, 'Y':Y}
params_dict.update(kwargs)
return Matrix(X.sds_context, 'l2svm', named_input_nodes=params_dict)
|
[
"baunsgaard@tugraz.at"
] |
baunsgaard@tugraz.at
|
499ca439f8deb4c3c382d1c47c6df47873853d24
|
e4de060c295fba0d0386d0a7678e744ced18b920
|
/build/car_szenario/cmake/car_szenario-genmsg-context.py
|
309534aaf9d5f6c3f170188f065ab4b9cae655ff
|
[] |
no_license
|
jbenzhhn/carla_hhn
|
af9497d01ce1f34ee0016ca660a0cc5af5f71be8
|
abd803bcdd506641c8152ec994468518ea809f1b
|
refs/heads/master
| 2023-04-05T10:50:28.934452
| 2021-04-07T14:31:41
| 2021-04-07T14:31:41
| 355,151,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/automotive/catkin_ws/src/car_szenario/msg/RoadInfo.msg"
services_str = ""
pkg_name = "car_szenario"
dependencies_str = "std_msgs;geometry_msgs;nav_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "car_szenario;/home/automotive/catkin_ws/src/car_szenario/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;nav_msgs;/opt/ros/melodic/share/nav_msgs/cmake/../msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"johannes.benz@hs-heilbronn.de"
] |
johannes.benz@hs-heilbronn.de
|
f78c251f5afd3689e2c6083e1fc40349ec45fb72
|
3fa4aedf320396c3d780ba3cd3c4760ac007ee30
|
/nba_api/stats/endpoints/boxscoremiscv2.py
|
9a949ea6e5741e796fda0f6a041829728481a016
|
[
"MIT"
] |
permissive
|
Fragadule/nba_api
|
fb4adfe14d355223838df80aa52ab68d2be3c492
|
9df8ba11ade56a1f6b4ff0791adc276052a286c6
|
refs/heads/master
| 2020-04-13T19:51:43.835155
| 2018-12-28T13:41:47
| 2018-12-28T13:41:47
| 163,414,364
| 0
| 0
|
MIT
| 2018-12-28T13:37:28
| 2018-12-28T13:37:28
| null |
UTF-8
|
Python
| false
| false
| 1,841
|
py
|
from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
from nba_api.stats.library.parameters import EndPeriod, EndRange, RangeType, StartPeriod, StartRange
class BoxScoreMiscV2(Endpoint):
endpoint = 'boxscoremiscv2'
expected_data = {'sqlPlayersMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'PLAYER_ID', 'PLAYER_NAME', 'START_POSITION', 'COMMENT', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD'], 'sqlTeamsMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_NAME', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD']}
def __init__(self,
game_id,
end_period=EndPeriod.default,
end_range=EndRange.default,
range_type=RangeType.default,
start_period=StartPeriod.default,
start_range=StartRange.default):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters={
'GameID': game_id,
'EndPeriod': end_period,
'EndRange': end_range,
'RangeType': range_type,
'StartPeriod': start_period,
'StartRange': start_range
},
)
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.sql_players_misc = Endpoint.DataSet(data=data_sets['sqlPlayersMisc'])
self.sql_teams_misc = Endpoint.DataSet(data=data_sets['sqlTeamsMisc'])
|
[
"swarchon@gmail.com"
] |
swarchon@gmail.com
|
9e576c530de7906567dbe5b9d96b25f93accd231
|
cf8be80fe9d7acfae03d86430d1c8ff8d22a8655
|
/ribosome/components/internal/mapping.py
|
3f86df4834cd10bddda137ef16920a9828206e20
|
[
"MIT"
] |
permissive
|
tek/ribosome-py
|
4da2faf3f7c2d646c5a90bf73e81ec12bd360d38
|
8bd22e549ddff1ee893d6e3a0bfba123a09e96c6
|
refs/heads/master
| 2022-12-21T22:46:49.075358
| 2020-08-31T16:22:51
| 2020-08-31T16:22:51
| 66,086,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
from typing import Callable
from amino import do, curried, Do, __, _, Either
from amino.lenses.lens import lens
from amino.logging import module_log
from ribosome.nvim.io.state import NS
from ribosome.data.plugin_state import PluginState
from ribosome.nvim.io.compute import NvimIO
from ribosome.compute.program import Program
from ribosome.config.component import Components
from ribosome.nvim.api.command import nvim_command
from ribosome.data.mapping import Mapping, MapMode
log = module_log()
def mapping_handler(mapping: Mapping) -> Callable[[Components], Either[str, Program]]:
def mapping_handler(components: Components) -> Either[str, Program]:
return components.all.find_map(__.mappings.lift(mapping)).to_either(f'no handler for {mapping}')
return mapping_handler
def mapping_cmd(plugin: str, mapping: Mapping, mode: MapMode) -> NvimIO[None]:
buf = '<buffer>' if mapping.buffer else ''
keys = mapping.keys.replace('<', '<lt>')
rhs = f''':call {plugin}Map('{mapping.ident}', '{keys}')<cr>'''
return nvim_command(
f'{mode.mnemonic}map',
buf,
'<silent>',
mapping.keys,
rhs,
)
@do(NS[PluginState, None])
def activate_mapping(mapping: Mapping) -> Do:
handler = yield NS.inspect_either(mapping_handler(mapping)).zoom(lens.components)
yield NS.modify(__.append.active_mappings((mapping.ident, handler)))
plugin = yield NS.inspect(_.camelcase_name)
yield NS.lift(mapping.modes.traverse(curried(mapping_cmd)(plugin, mapping), NvimIO))
__all__ = ('activate_mapping',)
|
[
"torstenschmits@gmail.com"
] |
torstenschmits@gmail.com
|
73faa7aa222e5a2139a1e51d55fc948bf578dafc
|
059a61afa19361fe2dd3509cda7924a3eb74b8e0
|
/bookmanager/book/models.py
|
4dab062e1015fd45c479fdd1df213fcc75dbe06f
|
[
"MIT"
] |
permissive
|
songaiwen/Django2
|
bf3628b7dcd1c28b65644ecfb4442091fdf54991
|
685e41a7f90e4d245f361f8fb78992aebd422978
|
refs/heads/master
| 2020-03-19T17:54:17.669938
| 2018-06-19T01:38:43
| 2018-06-19T01:38:43
| 136,783,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
from django.db import models
"""
create your models here
定义模型类
模型迁移
操作数据库
"""
#1.定义模型需要集成model.Model
#准备书籍列表信息的模型类
class BookInfo(models.Model):
#创建字段,字段类型,自动创建主键并自动增长
name = models.CharField(max_length=20)
def __str__(self):
#将模型类以字符串的方式输出
return self.name
#准备人物列表信息的模型类
class PeopleInfo(models.Model):
name = models.CharField(max_length=20)
gender = models.BooleanField()
#外键约束,人物属于哪本书
book = models.ForeignKey(BookInfo)
def __str__(self):
return self.name
|
[
"576883213@qq.com"
] |
576883213@qq.com
|
1a6f968edf5fdb4c61c2389a23c364c8b3fffc69
|
c11c337d4f2a609326fe8545c70dafb918ad8110
|
/maintenance/mtrack/scripts.py
|
757f45ca27e71c689cf94262e0725a2cd3a0d47e
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
summertriangle-dev/arposandra
|
7b7f62b63cebe07c6b3b24321a0d01623dfed2b3
|
d4fcbec32e86a96c7d810d3d146695eb0b384889
|
refs/heads/master
| 2023-07-25T02:55:37.534890
| 2023-07-07T01:05:12
| 2023-07-07T01:18:02
| 213,795,406
| 19
| 4
|
NOASSERTION
| 2023-03-04T05:48:36
| 2019-10-09T01:48:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,905
|
py
|
# TODO: I haven't looked very carefully at optimizing these queries.
# May want to come back after a couple years and see how they're doing.
# We sort based on latest release.
def update_set_sort_table():
return f"""
INSERT INTO card_p_set_index_v2__sort_dates
(SELECT representative, server_id, MAX(date) FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
GROUP BY (representative, server_id))
ON CONFLICT (representative, server_id) DO UPDATE SET
date = excluded.date;
WITH rd AS (
SELECT representative, (CASE WHEN MIN(date) < '2020-08-05 08:00:00'::timestamp THEN 0 ELSE 1 END) AS have_shio
FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
WHERE server_id = 'jp'
GROUP BY (representative)
)
UPDATE card_p_set_index_v2 SET nijigasaki_member_state =
(SELECT have_shio FROM rd WHERE rd.representative = card_p_set_index_v2.representative)
WHERE nijigasaki_member_state IS NULL;
-- Do it twice for sets without a release date.
UPDATE card_p_set_index_v2 SET nijigasaki_member_state = 0
WHERE nijigasaki_member_state IS NULL
"""
# Tries to set the release date based on feature list from newly added history
# records. If a card was released without a feature and featured later, the
# date will be set wrong. This won't happen though. In theory...
def update_card_release_dates(prefix):
return f"""
WITH rdates AS (
SELECT DISTINCT ON (card_id, {prefix}history_v5__dates.serverid)
card_id, {prefix}history_v5__dates.serverid, {prefix}history_v5__dates.date
FROM {prefix}history_v5__card_ids
INNER JOIN {prefix}history_v5__dates ON (
{prefix}history_v5__dates.id = {prefix}history_v5__card_ids.id
AND {prefix}history_v5__card_ids.serverid = {prefix}history_v5__dates.serverid
AND type = (CASE
WHEN what = 2 THEN 1
WHEN what = 3 THEN 2
WHEN what = 4 THEN 2
ELSE 2
END)
)
ORDER BY card_id, {prefix}history_v5__dates.serverid, date
)
INSERT INTO card_index_v1__release_dates (
(SELECT card_id, serverid, date FROM rdates)
) ON CONFLICT DO NOTHING;
-- First try the entire history table, because we want the oldest source, but restrict to cards that appeared in the partial update.
UPDATE card_index_v1 SET
source = (SELECT history_v5__card_ids.what FROM history_v5__card_ids
INNER JOIN history_v5 USING (id, serverid) WHERE card_id = card_index_v1.id
ORDER BY sort_date LIMIT 1)
WHERE (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1) IS NOT NULL;
-- If still null it wasn't featured before, so go ahead and use the new hist list
UPDATE card_index_v1 SET
source = (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1)
WHERE source IS NULL
"""
def update_hist_event_link():
return """
WITH event_match AS (
SELECT event_v2.serverid AS sid, event_id, history_v5__dates.id AS hid FROM history_v5__dates
INNER JOIN event_v2 ON (history_v5__dates.serverid=event_v2.serverid
AND EXTRACT(epoch FROM history_v5__dates.date - event_v2.start_t) = 0)
WHERE type = 1
)
INSERT INTO history_v5__dates (
(SELECT hid, sid, 7, NULL, event_id FROM event_match)
) ON CONFLICT DO NOTHING;
"""
|
[
"summertriangle.dev@gmail.com"
] |
summertriangle.dev@gmail.com
|
8e547b29dfdd757e9da010a8fcb2e0a74ff18ac0
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-web/azure/mgmt/web/models/hosting_environment_profile.py
|
de68d8bc558ed823419c5846beb0d775e9a116d2
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HostingEnvironmentProfile(Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
e8d197368d8a83bbf36c5e39a424a7e7a44b5b7c
|
632dcb4e37cadd87cb7ff8715b0048df5cd0d11b
|
/CompuCell3D/core/Demos/SBMLSolverExamples/SBMLSolverAntimony/SBMLSolverAntimony2/Simulation/SBMLSolverAntimony2Steppables.py
|
007262772d5e9b8242557c617ed99a115ce20b47
|
[
"MIT"
] |
permissive
|
CompuCell3D/CompuCell3D
|
df638e3bdc96f84b273978fb479842d071de4a83
|
65a65eaa693a6d2b3aab303f9b41e71819f4eed4
|
refs/heads/master
| 2023-08-26T05:22:52.183485
| 2023-08-19T17:13:19
| 2023-08-19T17:13:19
| 12,253,945
| 51
| 41
| null | 2023-08-27T16:36:14
| 2013-08-20T20:53:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,920
|
py
|
from cc3d.core.PySteppables import *
class SBMLSolverSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
def start(self):
# Antimony model string: cell type 1
model_string_type1 = """model type1()
# Model
S1 => S2; k1*S1
# Initial conditions
S1 = 0
S2 = 1
k1 = 1
end"""
# Antimony model string: cell type 2
model_string_type2 = """model type2()
# Model
S2 => S1; k2*S2
# Initial conditions
S1 = 0
S2 = 0
k2 = 1
end"""
# adding options that setup SBML solver integrator
# these are optional but useful when encountering integration instabilities
options = {'relative': 1e-10, 'absolute': 1e-12}
self.set_sbml_global_options(options)
step_size = 0.001
# Apply model strings to cell types
self.add_antimony_to_cell_types(model_string=model_string_type1, model_name='dpType1', cell_types=[self.TYPE1],
step_size=step_size)
self.add_antimony_to_cell_types(model_string=model_string_type2, model_name='dpType2', cell_types=[self.TYPE2],
step_size=step_size)
def step(self, mcs):
self.timestep_sbml()
def finish(self):
# this function may be called at the end of simulation - used very infrequently though
return
class SecretionSteppable(SecretionBasePy):
def __init(self, frequency=1):
SecretionBasePy.__init__(self, frequency)
def step(self, mcs):
consume_s1 = 1
consume_s2 = 1
secrete_s1 = 1
secrete_s2 = 1
field1 = self.field.Field1
field2 = self.field.Field2
for cell in self.cell_list_by_type(self.TYPE1):
this_cell_s1 = cell.sbml.dpType1['S1']
this_cell_s2 = cell.sbml.dpType1['S2']
cell_volume = cell.volume
if this_cell_s2 > 0.75:
this_secrete_s2 = secrete_s2
else:
this_secrete_s2 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
sbml_values = cell.sbml.dpType1.values()
s1_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field1.get(pt)
s1_consumed += field_value * consume_s1
s2_secreted = this_cell_s2 * this_secrete_s2
cell.sbml.dpType1['S1'] = this_cell_s1 + s1_consumed
cell.sbml.dpType1['S2'] = this_cell_s2 - s2_secreted
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) - s1_consumed / cell_volume
field2_val = field2.get(pt) + s2_secreted / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
for cell in self.cell_list_by_type(self.TYPE2):
this_cell_s1 = cell.sbml.dpType2['S1']
this_cell_s2 = cell.sbml.dpType2['S2']
cell_volume = cell.volume
if this_cell_s1 > 0.75:
this_secrete_s1 = secrete_s1
else:
this_secrete_s1 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
s2_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field2.get(pt)
s2_consumed += field_value * consume_s2
S1_secreted = this_cell_s1 * this_secrete_s1
cell.sbml.dpType2['S1'] = this_cell_s1 - S1_secreted
cell.sbml.dpType2['S2'] = this_cell_s2 + s2_consumed
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) + S1_secreted / cell_volume
field2_val = field2.get(pt) - s2_consumed / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
# Demo: accessing SBML values for further manipulation/coupling with other components
class IdFieldVisualizationSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
self.create_scalar_field_cell_level_py("IdFieldS1")
self.create_scalar_field_cell_level_py("IdFieldS2")
def step(self, mcs):
id_field_s1 = self.field.IdFieldS1
id_field_s2 = self.field.IdFieldS2
for cell in self.cell_list_by_type(self.TYPE1):
id_field_s1[cell] = cell.sbml.dpType1['S1']
id_field_s2[cell] = cell.sbml.dpType1['S2']
for cell in self.cell_list_by_type(self.TYPE2):
id_field_s1[cell] = cell.sbml.dpType2['S1']
id_field_s2[cell] = cell.sbml.dpType2['S2']
|
[
"maciekswat@gmail.com"
] |
maciekswat@gmail.com
|
95d5c5cadc9fb00f3c1f71d28ec0233c15f404b7
|
5a1e1756025bacae88b619d388ebf61b330001ab
|
/1.Class/Language_Python-master/Language_Python-master/LC4_HW3.py
|
7acbc1e8e531e1ec64cb6af03598dee0507db0cb
|
[] |
no_license
|
reshmaladi/Python
|
d1953497703aa15e163cd8ac27be23e3e5c3e947
|
8e9092af63476fef35d221e20acf418983957e53
|
refs/heads/master
| 2021-10-15T00:55:08.136039
| 2021-10-01T14:32:16
| 2021-10-01T14:32:16
| 165,836,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
x = input("Enter a 1st string \t")
y = input("Enter a 2nd string \t")
print("Swap : \n" + y[0:2] +x[2:] + "\n" + x[0:2] + y[2:])
|
[
"reshma.ladi@gmail.com"
] |
reshma.ladi@gmail.com
|
765503c6c7b8f463814da29afd3332f8f03d5e40
|
a053d60e2c84750cf1c51142bfdf6dec5048bf25
|
/demo.py
|
4cfc291b204313593868375ac5df2099451fc16d
|
[] |
no_license
|
Sharpiless/paddlex-driver-state-recognition
|
ed57e58bebcdccc19302dcb49e950dd66be9ed45
|
81f81f72e9b893c8adca8f9aaba3615dc7aff7c7
|
refs/heads/master
| 2023-03-18T23:02:15.255664
| 2020-06-02T15:42:38
| 2020-06-02T15:42:38
| 268,839,488
| 2
| 3
| null | 2021-03-07T13:43:21
| 2020-06-02T15:32:52
|
Java
|
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
import matplotlib
import paddlex as pdx
import paddle.fluid as fluid
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from facedet import FaceDet
fontC = ImageFont.truetype('./platech.ttf', 20, 0)
def drawText(img, addText, x1, y1):
color = (20, 255, 20)
# img = Image.fromarray(image)
draw = ImageDraw.Draw(img)
draw.text((x1, y1),
addText.encode("utf-8").decode("utf-8"),
color, font=fontC)
imagex = np.array(img)
return imagex
save_dir = './best_model'
model = pdx.load_model(save_dir)
classes = {'c0': 'normal driving',
'c1': 'texting-right',
'c2': 'talking on the phone-right',
'c3': 'texting-left',
'c4': 'talking on the phone-left',
'c5': 'operating the radio',
'c6': 'drinking',
'c7': 'reaching behind',
'c8': 'hair and makeup',
'c9': 'talking to passenger'}
base = './test_images'
det = FaceDet(thread=0.1)
for im in os.listdir(base):
pt = os.path.join(base, im)
result = model.predict(pt)
print(result)
lbl = classes[result[0]['category']]+' '+str(result[0]['score'])
image = cv2.imread(pt)
image = det.detect(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = drawText(image, lbl, 0, 10)
plt.imshow(image)
plt.show()
|
[
"1691608003@qq.com"
] |
1691608003@qq.com
|
7730f23c1fe157a139eaf71edadb4982a38877c1
|
0d39e91482abe7f40523e9e225ede5464295888f
|
/mitogen/unix.py
|
1af1c0ec6b66522ccdaa603778a48f45502f81cc
|
[
"BSD-3-Clause"
] |
permissive
|
eamanu/python-mitogen
|
bdccdd7ceca4f1b114bf3e28556eb0d959b008e8
|
e93c7aae83b130abe1ef2dcf829d32e40f9fe8b1
|
refs/heads/master
| 2022-04-29T17:01:32.451975
| 2019-10-24T00:30:20
| 2019-10-24T00:45:18
| 217,181,829
| 1
| 0
|
BSD-3-Clause
| 2022-03-29T21:58:20
| 2019-10-24T01:02:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,133
|
py
|
# Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
"""
Permit connection of additional contexts that may act with the authority of
this context. For now, the UNIX socket is always mode 0600, i.e. can only be
accessed by root or the same UID. Therefore we can always trust connections to
have the same privilege (auth_id) as the current process.
"""
import errno
import logging
import os
import socket
import struct
import sys
import tempfile
import mitogen.core
import mitogen.master
LOG = logging.getLogger(__name__)
class Error(mitogen.core.Error):
"""
Base for errors raised by :mod:`mitogen.unix`.
"""
pass
class ConnectError(Error):
"""
Raised when :func:`mitogen.unix.connect` fails to connect to the listening
socket.
"""
#: UNIX error number reported by underlying exception.
errno = None
def is_path_dead(path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
try:
s.connect(path)
except socket.error:
e = sys.exc_info()[1]
return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
finally:
s.close()
return False
def make_socket_path():
return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
class ListenerStream(mitogen.core.Stream):
def on_receive(self, broker):
sock, _ = self.receive_side.fp.accept()
try:
self.protocol.on_accept_client(sock)
except:
sock.close()
raise
class Listener(mitogen.core.Protocol):
stream_class = ListenerStream
keep_alive = True
@classmethod
def build_stream(cls, router, path=None, backlog=100):
if not path:
path = make_socket_path()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(path) and is_path_dead(path):
LOG.debug('%r: deleting stale %r', cls.__name__, path)
os.unlink(path)
sock.bind(path)
os.chmod(path, int('0600', 8))
sock.listen(backlog)
stream = super(Listener, cls).build_stream(router, path)
stream.accept(sock, sock)
router.broker.start_receive(stream)
return stream
def __repr__(self):
return '%s.%s(%r)' % (
__name__,
self.__class__.__name__,
self.path,
)
def __init__(self, router, path):
self._router = router
self.path = path
def _unlink_socket(self):
try:
os.unlink(self.path)
except OSError:
e = sys.exc_info()[1]
# Prevent a shutdown race with the parent process.
if e.args[0] != errno.ENOENT:
raise
def on_shutdown(self, broker):
broker.stop_receive(self.stream)
self._unlink_socket()
self.stream.receive_side.close()
def on_accept_client(self, sock):
sock.setblocking(True)
try:
pid, = struct.unpack('>L', sock.recv(4))
except (struct.error, socket.error):
LOG.error('listener: failed to read remote identity: %s',
sys.exc_info()[1])
return
context_id = self._router.id_allocator.allocate()
try:
sock.send(struct.pack('>LLL', context_id, mitogen.context_id,
os.getpid()))
except socket.error:
LOG.error('listener: failed to assign identity to PID %d: %s',
pid, sys.exc_info()[1])
return
context = mitogen.parent.Context(self._router, context_id)
stream = mitogen.core.MitogenProtocol.build_stream(
router=self._router,
remote_id=context_id,
auth_id=mitogen.context_id,
)
stream.name = u'unix_client.%d' % (pid,)
stream.accept(sock, sock)
LOG.debug('listener: accepted connection from PID %d: %s',
pid, stream.name)
self._router.register(context, stream)
def _connect(path, broker, sock):
try:
# ENOENT, ECONNREFUSED
sock.connect(path)
# ECONNRESET
sock.send(struct.pack('>L', os.getpid()))
mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12))
except socket.error:
e = sys.exc_info()[1]
ce = ConnectError('could not connect to %s: %s', path, e.args[1])
ce.errno = e.args[0]
raise ce
mitogen.parent_id = remote_id
mitogen.parent_ids = [remote_id]
LOG.debug('client: local ID is %r, remote is %r',
mitogen.context_id, remote_id)
router = mitogen.master.Router(broker=broker)
stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id)
stream.accept(sock, sock)
stream.name = u'unix_listener.%d' % (pid,)
mitogen.core.listen(stream, 'disconnect', _cleanup)
mitogen.core.listen(router.broker, 'shutdown',
lambda: router.disconnect_stream(stream))
context = mitogen.parent.Context(router, remote_id)
router.register(context, stream)
return router, context
def connect(path, broker=None):
LOG.debug('client: connecting to %s', path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
return _connect(path, broker, sock)
except:
sock.close()
raise
def _cleanup():
"""
Reset mitogen.context_id and friends when our connection to the parent is
lost. Per comments on #91, these globals need to move to the Router so
fix-ups like this become unnecessary.
"""
mitogen.context_id = 0
mitogen.parent_id = None
mitogen.parent_ids = []
|
[
"eamanu@eamanu.com"
] |
eamanu@eamanu.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.