blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abf4e6394243a39be16d129035ad1e8ef4a0593f
|
65f863c73f6a3cd1cfdc70c94ab972a6ddac863f
|
/tests/benchmarks.py
|
c9f2c93bb951251d7afeb8fe8c816562d2b3d156
|
[
"MIT"
] |
permissive
|
marksagal/enaml-native
|
a266a631fd78ba9654412ae811bdf758566a4375
|
1606f895b0718e223cb4296fe3ead8df8018cad9
|
refs/heads/master
| 2021-09-01T04:04:59.020078
| 2017-12-12T20:43:13
| 2017-12-12T20:43:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,329
|
py
|
'''
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Oct 4, 2017
@author: jrm
'''
import re
import os
import sh
import time
import pytest
import json
import requests
from os.path import exists, join
from utils import cd, source_activated
#: Stats
config = {
'app_built':False,
'stats': {},
}
def prepare_new_app(config):
""" Init a new app, build it, and launch it on a connected device.
:param config:
:return:
"""
app_dir = 'tmp/test_benchmarks/'
config['app_dir'] = app_dir
#: Create an app to to test
if exists(app_dir):
#: If using an emulator enable forwarding
if "emulator-" in sh.adb('devices'):
sh.adb("forward", "tcp:8888", "tcp:8888")
return # App already made
#if config['app_built']:
# return # App already made
#else:
# #: Cleanup the old app
# cleanup_app(config)
enamlnative = sh.Command('./enaml-native')
print(enamlnative('init', 'Benchmarks', 'com.codelv.enamlnative.benchmarks',
'tmp/test_benchmarks/'))
config['app_built'] = True
with cd(join(app_dir,'Benchmarks')):
with source_activated('venv', 'enaml-native') as enamlnative:
#: Now build python
print(enamlnative('build-python'))
#: Build and do a gradle sync, this will NOT include jni and native libs!
print(enamlnative('build-android'))
#: Now build python (again) to put them in the correct spot
print(enamlnative('build-python'))
#: Now try to run it and see if it crashes
#: Requires emulator or device
assert len(sh.adb('devices').strip().split("\n")) > 0, "No device is connected, " \
"can't test the build!"
#: Flush logcat
sh.adb('logcat', '--clear')
#: Do a build and run
print(enamlnative('run-android'))
#: Wait a few seconds
#: If using an emulator enable forwarding
if "emulator-" in sh.adb('devices'):
sh.adb("forward", "tcp:8888", "tcp:8888")
def cleanup_app(config):
if os.path.exists(config['app_dir']):
sh.rm('-R', config['app_dir'])
@pytest.mark.parametrize("platforms, path", [
(["android"], 'activity_indicator.enaml'),
(["android"], 'auto_complete_text_view.enaml'),
(["android"], 'block.enaml'),
(["android"], 'button.enaml'),
(["android"], 'calendar_view.enaml'),
(["android"], 'card_view.enaml'),
(["android"], 'clocks.enaml'),
(["android"], 'checkbox.enaml'),
(["android"], 'chronometer.enaml'),
(["android"], 'date_picker.enaml'),
(["android"], 'dialog.enaml'),
(["android"], 'drawer_layout.enaml'),
(["android"], 'edit_text.enaml'),
(["android"], 'flexbox.enaml'),
(["android"], 'icon.enaml'),
(["android"], 'mapview.enaml'),
(["android"], 'pager_tab_strip.enaml'),
(["android"], 'picker.enaml'),
(["android"], 'progress_bar.enaml'),
(["android"], 'radio_buttons.enaml'),
(["android"], 'rating_bar.enaml'),
(["android"], 'seekbar.enaml'),
(["android"], 'snackbar.enaml'),
(["android"], 'spacer.enaml'),
(["android"], 'spinner.enaml'),
(["android"], 'switch.enaml'),
(["android"], 'swipe_refresh.enaml'),
(["android"], 'tabs.enaml'),
(["android"], 'toast.enaml'),
(["android"], 'view_pager.enaml'),
(["android"], 'webview.enaml'),
])
def test_examples_for_real(platforms, path):
""" This builds an actuall app and does full system benchmarks on loading app examples
"""
if 'TRAVIS' in os.environ:
return #: Doesn't work on travis
#: Pretty hackish but whatever
prepare_new_app(config)
#: Load the code
dir_path = os.path.abspath(os.path.split(os.path.dirname(__file__))[0])
enaml_file = os.path.join(dir_path, 'examples', os.path.normpath(path))
with open(enaml_file, 'rb') as f:
source = f.read()
#: Trigger a reload
r = requests.post("http://localhost:8888/", json={
"type": "reload",
"files": {'view.enaml': source},
}).json()
assert r['ok'], "Failed to reload {}!".format(enaml_file)
#: TODO need a way to know when everything is done...
#: should read the log unil it stops
time.sleep(5)
#: Flush logcat
#: Save it
stats = parse_stats(sh.adb('logcat', '-d'))
config['stats'][enaml_file] = stats
#: Save it
data = json.dumps(config,indent=2)
with open('tmp/stats.json', 'w') as f:
f.write(data)
#: TODO: Now compare it to the baseline
def parse_stats(output):
""" Parses logcat output and returns the stats """
lines = [line for line in output if "[Stats]" in line]
stats = {
'totals': {'time': 0, 'tasks': 0, 'avg': 0}
}
for line in lines:
m = re.search(r'\((\d+) ms\).+\((\d+)\).+\((\d+) us.+\)', line)
if not m:
continue
dt, tasks, avg = map(int, m.groups())
if 'totals' in line:
stats['totals'] = {'time': dt, 'tasks': tasks, 'avg': avg}
return stats
|
[
"frmdstryr@gmail.com"
] |
frmdstryr@gmail.com
|
0c17e9bf063a7efc0d9893b30f9499564e3969f5
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/marathon/__init__.py
|
71f0b031ebeac46c2d890ceb67264bafff348eac
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/marathon/__init__.py
pass
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
ceb44fc8f169f591ebbdf0b4d2e0a1e4b500d9fc
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/chtscan/migrations/0006_auto_20160411_0800.py
|
f59e435e4885549c1b59a6379cce6ccd44b7e5cb
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-11 08:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chtscan', '0005_auto_20160408_0723'),
]
operations = [
migrations.RemoveField(
model_name='vulnerability',
name='requestid',
),
migrations.AddField(
model_name='assessment',
name='requestid',
field=models.IntegerField(null=True, verbose_name='REQUESTID'),
),
migrations.AddField(
model_name='assessment',
name='taskid',
field=models.CharField(default='', max_length=50, verbose_name='TASKID'),
),
]
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
1745f883f6d00b6642ba7d9496082c2fc1e47628
|
a54c6117cf2bb8b33f7a1e1ce92dffa1ffa4fe94
|
/demos/mismip/plot-result.py
|
36106cbdafa2092b5925b31aca55d5b70fbaa846
|
[] |
no_license
|
barionleg/icepack-paper
|
f74d5ea9722f2bc1d51ddbb959297353bf32ecb9
|
3554c618468320c06c25bca46fd2f97c5d1e860c
|
refs/heads/master
| 2023-06-08T01:05:13.074850
| 2021-01-12T00:04:18
| 2021-01-12T00:04:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import os
import argparse
import firedrake
import icepack.plot
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--level', type=int)
parser.add_argument('--output')
args = parser.parse_args()
Lx, Ly = 640e3, 80e3
ny = 20
nx = int(Lx/Ly) * ny
coarse_mesh = firedrake.RectangleMesh(nx, ny, Lx, Ly)
mesh_hierarchy = firedrake.MeshHierarchy(coarse_mesh, args.level)
mesh = mesh_hierarchy[args.level]
Q = firedrake.FunctionSpace(mesh, family='CG', degree=1)
V = firedrake.VectorFunctionSpace(mesh, family='CG', degree=1)
h = firedrake.Function(Q)
u = firedrake.Function(V)
input_name = os.path.splitext(args.input)[0]
with firedrake.DumbCheckpoint(input_name, mode=firedrake.FILE_READ) as chk:
timesteps, indices = chk.get_timesteps()
chk.set_timestep(timesteps[-1], idx=indices[-1])
chk.load(h, name='h')
chk.load(u, name='u')
fig, axes = icepack.plot.subplots(
nrows=2, sharex=True, sharey=True, figsize=(6.4, 2.8)
)
axes[0].get_xaxis().set_visible(False)
for ax in axes:
ax.set_xlim(0, 640e3)
ax.set_ylim(0, 80e3)
ax.get_yaxis().set_visible(False)
colors_h = icepack.plot.tripcolor(h, axes=axes[0])
fig.colorbar(colors_h, ax=axes[0], fraction=0.0075, pad=0.04, label='m')
axes[0].set_title('Thickness')
colors_u = icepack.plot.tripcolor(u, axes=axes[1])
fig.colorbar(colors_u, ax=axes[1], fraction=0.0075, pad=0.04, label='m/year')
axes[1].set_title('Velocity')
fig.savefig(args.output, dpi=300, bbox_inches='tight')
|
[
"shapero.daniel@gmail.com"
] |
shapero.daniel@gmail.com
|
9c315f3e10b630f08fda544fd7dabd316ebaed05
|
1f98ccf9ef52d3adab704676480c85fe22c9542d
|
/simpledb/test/TestBlk.py
|
0ebcd6e00f84ea455edce878943380b79c66d26e
|
[] |
no_license
|
61515/simpleDB_Python
|
234c671cbbf57f3e8fc5489ec4c292365085b7a8
|
b6846da4a78369838f5b3c7a704de704e18f7be7
|
refs/heads/master
| 2023-02-22T14:07:52.660633
| 2021-01-24T02:25:40
| 2021-01-24T02:25:40
| 332,343,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from simpledb.file.BlockId import BlockId
if __name__ == '__main__':
blk = BlockId("file", 1)
buffers = {blk: 1}
pins = [blk]
# pins.remove(blk)
if blk in pins:
print(1)
print(pins.count(blk))
# print(pins)
# print(buffers.get(blk))
|
[
"1632039752@qq.com"
] |
1632039752@qq.com
|
9b43fa6a64fe1365c949bf00ef1b7be04e6c5852
|
1cfcfa686489885843b9a142c8ba980ebd5d5ffd
|
/tests/optim/test_weight_average.py
|
a421cf0328fcef62c31051e3eef5aa29b3acfded
|
[
"MIT"
] |
permissive
|
qyz-thu/gnn_vae
|
9d2d8e984a96d0f22f74362889fdd1c0613df46d
|
278aeb7038216812a94c7f7acd2ca425696f986b
|
refs/heads/master
| 2023-02-05T20:07:24.097968
| 2020-12-18T06:34:20
| 2020-12-18T06:34:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,265
|
py
|
import copy
import numpy as np
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tests.helper import *
def stepwise_average_check(ctx, factory, update_fn, get_fn):
def clone_state(val):
if isinstance(val, dict):
return {k: clone_state(v) for k, v in val.items()}
elif isinstance(val, list):
return [clone_state(v) for v in val]
elif isinstance(val, (T.Tensor, T.Variable)):
return T.copy(val)
elif isinstance(val, np.ndarray):
return np.copy(val)
else:
return copy.copy(val)
T.random.seed(1234)
weights = [
T.variable(shape=[4], initializer=tk.init.zeros, requires_grad=False),
T.variable(shape=[3], initializer=tk.init.zeros, requires_grad=False),
]
answers = [clone_state(w) for w in weights]
inputs_1 = T.random.randn([7, 4])
inputs_2 = T.random.randn([7, 3])
# do a scan
avg = factory(weights)
the_states = []
the_outputs = []
num_updates = 0
for batch_vals in zip(inputs_1, inputs_2):
for weight, val in zip(weights, batch_vals):
T.assign(weight, val)
the_states.append(clone_state(avg.get_state_dict()))
avg.update()
with avg.temporarily_commit():
the_outputs.extend(clone_state(w) for w in weights)
for i, val in enumerate(batch_vals):
answers[i] = update_fn(answers[i], val, num_updates)
num_updates += 1
for weight, ans in zip(weights, answers):
assert_allclose(weight, get_fn(ans, num_updates), rtol=1e-4, atol=1e-6)
for weight, val in zip(weights, batch_vals):
assert_allclose(weight, val, rtol=1e-4, atol=1e-6)
# test enabled = False
avg = factory(weights, enabled=False)
for x1, x2, state, output in zip(inputs_1, inputs_2, the_states, the_outputs):
batch_vals = [x1, x2]
for weight, val in zip(weights, batch_vals):
T.assign(weight, val)
avg.update()
avg.commit() # should still affect weights even if enabled is False
for avg_val in avg.get_state_dict()['averages']:
assert_allclose(avg_val, T.zeros_like(avg_val), rtol=1e-4, atol=1e-6)
for weight in weights:
assert_allclose(weight, T.zeros_like(weight), rtol=1e-4, atol=1e-6)
# do another scan using backup states
avg = factory(weights, enabled=False)
avg.set_enabled(True)
for x1, x2, state, output in zip(inputs_1, inputs_2, the_states, the_outputs):
batch_vals = [x1, x2]
for weight, val in zip(weights, batch_vals):
T.assign(weight, val)
avg.set_state_dict(state)
avg.update()
with avg.temporarily_commit():
the_outputs.extend(clone_state(w) for w in weights)
for weight, val in zip(weights, batch_vals):
assert_allclose(weight, val, rtol=1e-4, atol=1e-6)
# try set bad state
avg = factory(weights)
state = dict(avg.get_state_dict())
state['averages'] = []
with pytest.raises(ValueError, match='Bad state'):
avg.set_state_dict(state)
def full_scan_average_check(ctx, factory, input_x, expected):
weight = T.variable(T.shape(input_x)[1:], initializer=tk.init.zeros,
requires_grad=False)
avg = factory([weight])
for x in input_x:
T.assign(weight, x)
avg.update()
avg.commit()
assert_allclose(weight, expected, atol=1e-4, rtol=1e-6)
class WeightAveragingTestCase(TestCase):
def test_MeanAveraging(self):
# step-wise check
factory = tk.optim.WeightMeanAveraging
def update_fn(old_val, new_val, num_updates):
return (old_val * num_updates + new_val) / (num_updates + 1.)
def get_fn(val, num_updates):
return val
stepwise_average_check(self, factory, update_fn, get_fn)
# overall check
input_x = T.random.randn([7, 4])
full_scan_average_check(
self, factory, input_x, T.reduce_mean(input_x, axis=[0]))
def test_MovingAveraging(self):
# step-wise check
for decay in (0.9, 0.99):
for zero_debias in (True, False):
factory = lambda weights, **kwargs: tk.optim.WeightMovingAveraging(
weights, decay=decay, zero_debias=zero_debias, **kwargs)
def update_fn(old_val, new_val, num_updates):
return decay * old_val + (1. - decay) * new_val
if zero_debias:
def get_fn(val, num_updates):
if num_updates > 0:
return val / (1. - decay ** num_updates)
else:
return val
else:
def get_fn(val, num_updates):
return val
stepwise_average_check(self, factory, update_fn, get_fn)
# overall check
input_x = T.expand(T.random.randn([4]), [7, 4])
factory = lambda weights, **kwargs: tk.optim.WeightMovingAveraging(
weights, decay=0.9, zero_debias=True, **kwargs)
full_scan_average_check(self, factory, input_x, input_x[0])
|
[
"haowen.xu@outlook.com"
] |
haowen.xu@outlook.com
|
908ab077984dc561d27e833fbb86e6d280225929
|
c34308d9e283d3689baeade246b69dad13eea0c1
|
/homework/week5/study201736.py
|
015a5575dfb0de380ed5acf9ed7422c0ab64d7a2
|
[] |
no_license
|
michaelChen07/studyPython
|
d19fe5762cfbccdff17248d7d5574939296d3954
|
11a2d9dd0b730cad464393deaf733b4a0903401f
|
refs/heads/master
| 2021-01-19T00:20:27.347088
| 2017-05-13T08:43:44
| 2017-05-13T08:43:44
| 73,004,133
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
#coding:utf-8
def findMaxNum(x,y,z):
if x >=y and x >= z:
return x
elif y > z:
return y
else:
return z
if __name__=="__main__":
numList = raw_input(u"请输入3个数字,以逗号分隔:").split(",")
print numList
maxNum = findMaxNum(int(numList[0]),int(numList[1]),int(numList[2]))
print maxNum
|
[
"286522215@qq.com"
] |
286522215@qq.com
|
0b66b2d868d6ec5a557304ec44f1c3585252aa9c
|
9ea18a9a52f6fe9077a6073dac72d19e21b2b5d6
|
/setup.py
|
d2deac46ae69778452128ea1d6305c8bf86fe655
|
[] |
no_license
|
barneygale/twisted-enttec
|
2e826e09159b82d3bc1fc281f8973b43341746cb
|
dec065475ab08ad4cc7a1fef301c904f2c033953
|
refs/heads/master
| 2020-04-04T14:44:32.010652
| 2018-11-03T18:24:59
| 2018-11-03T18:24:59
| 156,010,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from setuptools import setup
setup(
name='twisted-enttec',
version='0.1',
author='Barney Gale',
author_email='barney.gale@gmail.com',
url='https://github.com/barneygale/twisted-enttec',
license='MIT',
description='Python/Twisted support for the Enttec DMX USB Pro',
long_description=open('README.rst').read(),
py_modules=['twisted_enttec'],
install_requires=[
'twisted',
'pyserial'
],
)
|
[
"barney.gale@gmail.com"
] |
barney.gale@gmail.com
|
5fb9500044bc1c606e8a9670801610d19b190611
|
c56807b801c887b8707611100efa3a0d7befea50
|
/DeepHumanPrediction/Code/DeepHumanPrediction/Motion_Prediction_encoding_decoding/bvh_reader.py
|
03027b871993c575cf4406b54021653e8f9eaf5e
|
[] |
no_license
|
wy-luke/DeepHumanPrediction
|
9e3777f6d66d4ab34bf16a935ded19422385f724
|
2e131b4b365e6f565c7ed8075a61f2b3c7b2d53a
|
refs/heads/master
| 2021-08-30T19:26:57.537964
| 2017-12-19T05:47:36
| 2017-12-19T05:47:36
| 266,292,568
| 1
| 0
| null | 2020-05-23T08:13:36
| 2020-05-23T08:13:36
| null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
# -*-coding: utf-8-*-
import numpy as np
import glob
from tqdm import *
import os
import time
def Motion_Data_Preprocessing(time_step = 100 , seed_timestep=20 , batch_Frame=5):
np.set_printoptions(threshold=1000000)
files = glob.glob("Data/ACCAD/Transform_Male1_bvh/Short_data/*.bvh")
time_step = time_step
seed_timestep = seed_timestep
batch_Frame = batch_Frame
xyz_position=3
complexity = False
Data = []
train_label_motion=[]
file_directory=[]
'''data normalization'''
Normalization_factor=1
dtype="int"
#Extract only file names, not path names.
for i in range(len(files)):
file_directory.append(os.path.basename(files[i]))
for file_name, i in tqdm(zip(files, range(len(files)))):
# time.sleep(0.01)
Raw = []
Mdata = []
MOTION = False
'''1.basic - Motion data preprocessing'''
print('Processed Data : {}'.format(i + 1))
try:
with open(file_name, 'r') as f:
while True:
line = f.readline()
if line == 'MOTION' + "\n" or MOTION:
MOTION = True
Raw.append(line)
if not line:
break
for raw in Raw[3:]:
#Xposition Yposition Zposition 는 제외
if dtype=="int":
temp=raw.split()[xyz_position:]
if complexity :
temp = [np.float32(i) * Normalization_factor for i in temp]
else : # complexity = False
temp=[np.floor(np.float32(i))*Normalization_factor for i in temp]
else:# dtype="str"
temp=raw.split()[xyz_position:]
Mdata.append(temp)
#Remove the blank line..
Mdata.pop()
'''2. Motion data preprocessing - easy for deeplearning'''
#data padding
if len(Mdata) < time_step:
frame = np.zeros(shape=(time_step - len(Mdata), len(Mdata[0])))
for i in range(time_step - len(Mdata)):
frame[i] = Mdata[-1]
Mdata = np.concatenate((Mdata, frame), axis=0)
else:
Mdata = Mdata[:time_step]
Data.append(Mdata)
except Exception as e:
raise e
'''3.final - Motion data preprocessing'''
for i in range(len(files)):
train_label_motion.append(Data[i][seed_timestep:])
print("train_motion shape = {}".format(np.shape(Data)))
print("train_label_motion shape = {}".format(np.shape(train_label_motion)))
train_motion = np.reshape(Data,(len(files),int(time_step/batch_Frame),len(Data[0][0])*batch_Frame))
train_label_motion = np.reshape(train_label_motion,(len(files),int(time_step-seed_timestep)*len(Data[0][0])))
print("-------------------Transform data shape--------------------")
print("transform_motion shape = {}".format(np.shape(train_motion)))
print("transform_label_motion shape = {}".format(np.shape(train_label_motion)))
return Normalization_factor , train_motion , train_label_motion , int(seed_timestep/batch_Frame) , int((time_step-seed_timestep)/batch_Frame) , len(train_motion[0][0]) , file_directory
if __name__ == "__main__":
print('Motion_Data_Preprocessing_Starting In Main')
Normalization_factor, train_motion, train_label_motion ,seed_timestep , pre_timestep , column , file_directory = Motion_Data_Preprocessing(time_step = 140, seed_timestep=10 , batch_Frame=5)
print("new seed_timestep : {}".format(seed_timestep))
print("new prediction_timestep : {}".format(pre_timestep))
print("new Motion_rotation_data : {}".format(column))
else:
print("Motion_Data_Preprocessing_Imported")
|
[
"medical18@naver.com"
] |
medical18@naver.com
|
6b55090216aeb866ba417c914c071c4a2a7a7054
|
257bd63361aa846ffdacdc15edaecf84c6364e78
|
/psou2/pyanal2_Tensorflow/pack1/tensor13_linear3.py
|
9774a149e61814165285e2e138c00a80426e6f28
|
[] |
no_license
|
gom4851/hcjeon
|
86dcfd05ce47a13d066f13fe187d6a63142fb9fe
|
59a00ca9499f30e50127bb16eb510553e88ace43
|
refs/heads/master
| 2020-06-04T23:16:08.632278
| 2019-01-15T09:54:08
| 2019-01-15T09:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
'''
Created on 2018. 12. 13.
시험성적 총점 예측. 독립변수 복수
'''
import tensorflow as tf
x1_data = [73., 93., 89., 96., 73.]
x2_data = [80., 88., 91., 98., 66.]
x3_data = [75., 93., 90., 100., 70.]
y_data = [152., 185., 180., 196., 142.]
x1 = tf.placeholder(tf.float32)
x2 = tf.placeholder(tf.float32)
x3 = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
w1 = tf.Variable(tf.random_normal([1]))
w2 = tf.Variable(tf.random_normal([1]))
w3 = tf.Variable(tf.random_normal([1]))
b = tf.Variable(tf.random_normal([1]))
h = x1 * w1 + x2 * w2 + x3 * w3 + b
cost = tf.reduce_mean(tf.square(h - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.00001)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for s in range(5000):
cost_v, h_v, _ = sess.run([cost, h, train], feed_dict={x1: x1_data, x2: x2_data, x3: x3_data, y: y_data})
if s % 500 == 0:
print(s, 'cost : ', cost_v, 'pred : ', h_v)
|
[
"wer104@naver.com"
] |
wer104@naver.com
|
efbec588a9aeeb513cd86ea9ad278914f7c0bd82
|
542f898adea1b36d627d4bf437731022f242d2dd
|
/detectron2/evaluation/fast_eval_api.py
|
2eb202bd5efa3ec3d366027b1debffc269ae8b17
|
[
"Apache-2.0"
] |
permissive
|
facebookresearch/detectron2
|
24bf508e374a98a5e5d1bd4cc96556d5914215f4
|
80307d2d5e06f06a8a677cc2653f23a4c56402ac
|
refs/heads/main
| 2023-08-30T17:00:01.293772
| 2023-08-25T22:10:24
| 2023-08-25T22:10:24
| 206,660,580
| 27,469
| 8,047
|
Apache-2.0
| 2023-09-13T09:25:57
| 2019-09-05T21:30:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,078
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import time
from pycocotools.cocoeval import COCOeval
from detectron2 import _C
logger = logging.getLogger(__name__)
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
logger.info("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare() # bottleneck
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
} # bottleneck
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = _C.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = _C.COCOevalEvaluateImages(
p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
logger.info("Accumulating evaluation results...")
tic = time.time()
assert hasattr(
self, "_evalImgs_cpp"
), "evaluate() must be called before accmulate() is called."
self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
958c86a3c3755e7c41208db86b0cfacf371bd9fc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_028/ch40_2020_05_04_14_38_41_592304.py
|
bb131b1a8c53321e179a9a8c32bbde7f8aaea50d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def soma_valores(lista):
i = 0
soma = 0
while i < len(lista):
soma += lista[i]
i += 1
return soma
print(soma_valores(lista))
|
[
"you@example.com"
] |
you@example.com
|
9cef99f3e88b40bf782aea1a88cb3f12a81a1997
|
3e713a67f370d1cc1ba0882159a03b673bd22f9a
|
/PYTHON/[hackerrank]- Power - Mod Power.py
|
536c0081d1c4363908466e17fb5e5605f36cc315
|
[] |
no_license
|
s-abhishek2399/competitive-progamming--PYTHON
|
739797ffea0b92cc2781559e7d4eed1d274678a6
|
29f9e63cfc05c01fa605c14fb8a3a55920296d43
|
refs/heads/master
| 2023-03-08T02:40:00.962109
| 2021-02-16T15:07:52
| 2021-02-16T15:07:52
| 328,732,345
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
n1=int(input())
n2=int(input())
n3=int(input())
print(pow(n1,n2))
print(pow(n1,n2,n3))
|
[
"s.abhishek2399@gmail.com"
] |
s.abhishek2399@gmail.com
|
a9247a4ab26d1108aacf1f62fa5a92520e837613
|
eb937d6c7e10b451390007868df8de4912b1d098
|
/AutoInerface_project/Day11_Pytest/plugins/test_plugin_06.py
|
5dcecf8909521ba54473980deb165062b3000fdf
|
[] |
no_license
|
chenbaoshun/AutomationTesting
|
01bbc3dc84c5ce26a75909a60bb304f7a06253b5
|
98882c3599d0eb9ac84e74193c584ba7b78ecfab
|
refs/heads/master
| 2023-03-14T01:44:07.163998
| 2021-02-24T15:35:01
| 2021-02-24T15:35:01
| 290,236,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @File : test_plugin_06.py
# @Author : Baoshun.Chin
# @Time : 2021-01-07 22:04
# @Site :
# @version : V1.0
import pytest
@pytest.mark.dependency()
def test_a():
assert False
@pytest.mark.dependency()
def test_b():
assert True
@pytest.mark.dependency(depends=['test_a'])
def test_c():
pass
@pytest.mark.dependency(depends=['test_b'])
def test_d():
pass
|
[
"baoshunchin@aliyun.com"
] |
baoshunchin@aliyun.com
|
7b181876bac13acb503e5f2cfa17bd79be20980c
|
36978086cf5f34e16ceac7c2649b49ccb4c5ac90
|
/config/munin/mongo_indexsize
|
15898bb14f1297f7e488deac7f1647d64ca39769
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
aragilar/NewsBlur
|
04e754093cd52bc2d9957ea767747d6d604dfbba
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
refs/heads/master
| 2021-08-28T17:39:50.734396
| 2013-06-06T01:52:20
| 2013-06-06T01:52:37
| 10,520,281
| 0
| 0
|
MIT
| 2021-08-13T05:35:33
| 2013-06-06T06:26:24
|
Objective-C
|
UTF-8
|
Python
| false
| false
| 3,371
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set sts=4 sw=4 encoding=utf-8
# Copyright (c) 2010, Rene Jochum
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Rene Jochum nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pymongo import Connection
import os
settings_host = os.environ.get("host", "127.0.0.1")
settings_port = 27017
settings_db = 'newsblur'
def getCollstats():
global settings_host, settings_port, settings_db, settings_user, settings_password
print locals(), settings_host, settings_port
if settings_user and settings_password:
settings_host = "%s:%s@%s" % (settings_user, settings_password, settings_host)
con = Connection(settings_host, int(settings_port), slave_okay=True)
db = con[settings_db]
for coll in db.collection_names():
if coll.startswith('system.'):
continue
stats = db.command("collstats", coll)
yield ("%s_size" % coll.replace('.', '_'), long(stats['totalIndexSize']),)
con.disconnect()
def doData():
for coll, stats in getCollstats():
print "%s.value %s" % (coll, stats)
def doConfig():
print "graph_title MongoDB collection index sizes"
print "graph_args --base 1024 -l 0"
print "graph_vlabel Kb"
print "graph_category MongoDB"
print "graph_total total"
for k,v in getCollstats():
print "%s.label %s" % (k, k)
print "%s.min 0" % k
print "%s.draw LINE1" % k
if __name__ == "__main__":
from sys import argv
from os import environ
# Could be done by a for loop
# but i think if's are faster
if 'HOST' in environ:
settings_host = environ['HOST']
if 'PORT' in environ:
settings_port = environ['PORT']
if 'DB' in environ:
settings_db = environ['DB']
if 'user' in environ:
settings_user = environ['user']
if 'password' in environ:
settings_password = environ['password']
print locals()
if len(argv) > 1 and argv[1] == "config":
doConfig()
else:
doData()
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
|
35e0c88ae6ffeb513481031097e2e553bf772f79
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/application_gateway_probe_py3.py
|
60baab8984853cbcc41461da947ca52283319939
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,676
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: The protocol used for the probe. Possible values are
'Http' and 'Https'. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header
should be picked from the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked
healthy. Default value is 0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match:
~azure.mgmt.network.v2018_11_01.models.ApplicationGatewayProbeHealthResponseMatch
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the probe that is unique within an Application
Gateway.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, protocol=None, host: str=None, path: str=None, interval: int=None, timeout: int=None, unhealthy_threshold: int=None, pick_host_name_from_backend_http_settings: bool=None, min_servers: int=None, match=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayProbe, self).__init__(id=id, **kwargs)
self.protocol = protocol
self.host = host
self.path = path
self.interval = interval
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
self.pick_host_name_from_backend_http_settings = pick_host_name_from_backend_http_settings
self.min_servers = min_servers
self.match = match
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
e6eba346ebf55540f84c30d63ab8d3f50f3fdb65
|
96e0dd08563b1f579992c14207d103ee80222b1b
|
/Algorithm-master/find_max_crossing_subarray_brute_force.py
|
f9d6443d36f700b3418debb05985ef52be6aee92
|
[] |
no_license
|
tonygodspeed/pytest
|
4030e21f3206e3c5cb58aac870e3a1a57cd6943d
|
2e87b91c148ff6966096bb8b197c0a84f5a1e7e2
|
refs/heads/master
| 2020-04-02T13:14:20.811887
| 2018-10-24T09:00:57
| 2018-10-24T09:00:57
| 154,472,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
def find_max_subarray(A, low, high):
left = 0
right = 0
sum = float("-inf")
for i in xrange(low, high):
current_sum = 0
for j in xrange(i, high):
current_sum += A[j]
if sum < current_sum:
sum = current_sum
left = i
right = j
return (left, right, sum)
A = [0, -1, 3, -4, 6, -1, 4]
print find_max_subarray(A, 0, 7)
|
[
"412291198@qq.com"
] |
412291198@qq.com
|
b011d6a3cdd94bed52a26b5f0ccaf26cf8e75541
|
9e271a3bc1bf388d82bc5a01d275d910c00f315c
|
/event/templatetags/event_tags.py
|
78506192f1ce6a67fb4e2104e5a7dc4ca144e7cb
|
[
"MIT"
] |
permissive
|
kthaisociety/website
|
36f11b704f9c38414e0999b55db4513444b53f9e
|
4c4efb8a93218ae128d203b15c4340f90fe9f6a6
|
refs/heads/master
| 2023-08-09T19:44:16.968356
| 2023-05-20T20:33:05
| 2023-05-20T20:33:05
| 218,593,606
| 2
| 3
|
MIT
| 2023-05-20T20:33:06
| 2019-10-30T18:17:10
|
Python
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django import template
from django.utils import timezone
from event.consts import SCHEDULE_EMOJIS
from event.enums import ScheduleType
register = template.Library()
ctz = timezone.get_current_timezone()
@register.filter
def display_clock(time: timezone.datetime):
time = time.astimezone(ctz)
base = int("1F54F", 16)
hour = time.hour % 12
if hour == 0:
hour = 12
return chr(base + hour)
@register.filter
def one_year(time: timezone.datetime):
return time.replace(year=time.year - 1)
@register.filter
def schedule_emoji(type: ScheduleType):
return SCHEDULE_EMOJIS.get(type, "")
|
[
"oriol.closa@est.fib.upc.edu"
] |
oriol.closa@est.fib.upc.edu
|
32447f30f4cc21c660188aee5e87920ec6663c17
|
59a688e68421794af64bfe69a74f64b2c80cd79d
|
/utils_all.py
|
932ceb655f16948c0aec5628b7ec1358379f78e7
|
[] |
no_license
|
hearues-zueke-github/python_programs
|
f23469b306e057512aadecad0ca0a02705667a15
|
d24f04ca143aa93f172210a4b9dfdd9bf1b79a15
|
refs/heads/master
| 2023-07-26T00:36:56.512635
| 2023-07-17T12:35:16
| 2023-07-17T12:35:16
| 117,093,746
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
import datetime
import string
from time import time
import numpy as np
from PIL import Image, ImageTk
from tkinter import Tk, Label, BOTH
from tkinter.ttk import Frame, Style
all_symbols_16 = np.array(list("0123456789ABCDEF"))
def get_random_str_base_16(n):
l = np.random.randint(0, 16, (n, ))
return "".join(all_symbols_16[l])
all_symbols_64 = np.array(list(string.ascii_lowercase+string.ascii_uppercase+string.digits+"-_"))
def get_random_str_base_64(n):
l = np.random.randint(0, 64, (n, ))
return "".join(all_symbols_64[l])
def get_date_time_str_full():
dt = datetime.datetime.now()
dt_params = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
return "Y{:04}_m{:02}_d{:02}_H{:02}_M{:02}_S{:02}_f{:06}".format(*dt_params)
def get_date_time_str_full_short():
dt = datetime.datetime.now()
dt_params = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
return "{:04}_{:02}_{:02}_{:02}_{:02}_{:02}_{:06}".format(*dt_params)
def time_measure(f, args):
start_time = time()
ret = f(*args)
end_time = time()
diff_time = end_time-start_time
return ret, diff_time
class ShowImg(Frame, object):
def __init__(self, img):
parent = Tk()
Frame.__init__(self, parent)
self.pack(fill=BOTH, expand=1)
label1 = Label(self)
label1.photo= ImageTk.PhotoImage(img)
label1.config(image=label1.photo)
label1.pack(fill=BOTH, expand=1)
parent.mainloop()
def int_sqrt(n):
x_prev = n
x_now = (n//1+1)//2
while x_now<x_prev:
t = (n//x_now+x_now)//2
x_prev = x_now
x_now = t
return x_now
def get_current_datetime_str():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
|
[
"hziko314@gmail.com"
] |
hziko314@gmail.com
|
355f9aba0beb6cce657ff0a0adfe78d8f056baeb
|
07622a0fb38e843ab0eef4f69bb8fb25d107c06d
|
/pretrained_mol_sim/Theano-master/theano/tensor/tests/test_type_other.py
|
436dcdb243a457abb91f7885ffe9150030f48544
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
andreeadeac22/graph_coattention
|
fa59d77252625e4bee1cb9670e4a0fd0fec98135
|
23781fedaa942ca5614054f965cb7b6543e533fa
|
refs/heads/master
| 2023-08-08T01:51:51.368457
| 2020-02-19T04:56:59
| 2020-02-19T04:56:59
| 207,414,336
| 15
| 4
|
MIT
| 2023-07-22T15:47:39
| 2019-09-09T22:13:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
""" This file don't test everything. It only test one past crash error."""
from __future__ import absolute_import, print_function, division
import theano
from theano.gof import Constant
from theano.tensor.type_other import MakeSlice, make_slice, NoneTypeT, NoneConst
def test_make_slice_merge():
# In the past, this was crahsing during compilation.
i = theano.tensor.iscalar()
s1 = make_slice(0, i)
s2 = make_slice(0, i)
f = theano.function([i], [s1, s2])
nodes = f.maker.fgraph.apply_nodes
assert len([n for n in nodes if isinstance(n.op, MakeSlice)]) == 1
theano.printing.debugprint(f)
def test_none_Constant():
""" Tests equals
We had an error in the past with unpickling
"""
o1 = Constant(NoneTypeT(), None, name='NoneConst')
o2 = Constant(NoneTypeT(), None, name='NoneConst')
assert o1.equals(o2)
assert NoneConst.equals(o1)
assert o1.equals(NoneConst)
assert NoneConst.equals(o2)
assert o2.equals(NoneConst)
# This trigger equals that returned the wrong answer in the past.
import six.moves.cPickle as pickle
import theano
from theano import tensor
x = tensor.vector('x')
y = tensor.argmax(x)
kwargs = {}
# We can't pickle DebugMode
if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
kwargs = {'mode': 'FAST_RUN'}
f = theano.function([x], [y], **kwargs)
pickle.loads(pickle.dumps(f))
|
[
"andreeadeac22@gmail.com"
] |
andreeadeac22@gmail.com
|
d6172bb361027fd1a364a8217aeb5faa9291df2f
|
2584c50ff47765db9df565a2254f762a15821fe0
|
/relationshipmvt/app/forms.py
|
77fae4641a90bdd6dc4ef45ae365ba6399268ecc
|
[] |
no_license
|
vipuldhandre/Django
|
f75cb135761e54eadec57c59c052f676eae1469e
|
51db8a59e068deb855e39c8bcc79e819b135f7d8
|
refs/heads/master
| 2020-10-01T19:50:11.317032
| 2020-01-13T17:00:19
| 2020-01-13T17:00:19
| 227,611,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.forms import ModelForm
from app.models import (Company,Languages,Programmer)
class ProgrammerForm(ModelForm):
class Meta:
model = Programmer
fields = '__all__' # or ['pname','company','languages']
|
[
"dhandrevips@gmail.com"
] |
dhandrevips@gmail.com
|
0e493ff5100b18ede19dccc8f1f910fcdfea6413
|
75b289e20c24c07b64a89935f3f671d19b15d387
|
/0x02-python-import_modules/100-my_calculator.py
|
cb22700f9ceae5482fd430d778134408e6c6c0e4
|
[] |
no_license
|
luroto/holbertonschool-higher_level_programming
|
33c5a2c3b323e89391b9e110da846876085f3b8e
|
63efcc1f91207dee9fc095884551333b91674587
|
refs/heads/master
| 2021-07-08T12:17:25.378993
| 2020-09-02T05:20:41
| 2020-09-02T05:20:41
| 184,124,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
#!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
from calculator_1 import add, sub, mul, div
total = len(argv)
if total != 4:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
exit(1)
opera = argv[2]
if opera != "+" and opera != "-" and opera != "*" and opera != "/":
print("Unknown operator. Available operators: +, -, * and /")
exit(1)
a = int(argv[1])
b = int(argv[3])
if opera == "+":
print("{:d} + {:d} = {:d}".format(a, b, add(a, b)))
if opera == "-":
print("{:d} - {:d} = {:d}".format(a, b, sub(a, b)))
if opera == "*":
print("{:d} * {:d} = {:d}".format(a, b, mul(a, b)))
if opera == "/":
print("{:d} / {:d} = {:d}".format(a, b, div(a, b)))
|
[
"774@holbertonschool.com"
] |
774@holbertonschool.com
|
bf4c983ab86fc148223b793eef9b2d6247bb60e6
|
f7a718425de1447836b547f831a120937f1fcf40
|
/plumbum/core.py
|
94ae4b1f55d439ace21402d8d93e42a6468797b5
|
[
"BSD-3-Clause"
] |
permissive
|
coyotevz/plumbum-old-1
|
ad8ce697ffb4cbd0a6f238f66a1c546800e47024
|
c0f769ca525298ab190592d0997575d917a4bed4
|
refs/heads/master
| 2021-01-20T10:50:32.516766
| 2016-11-18T04:20:32
| 2016-11-18T04:20:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,879
|
py
|
# -*- coding: utf-8 -*-
__all__ = ['Component', 'ExtensionPoint', 'implements', 'Interface',
'PlumbumBaseError', 'PlumbumError']
class PlumbumBaseError(Exception):
"""Base class for all exceptions defined in Plumbum."""
title = "Plumbum Error"
class PlumbumError(PlumbumBaseError):
"""Standard exception for errors in Plumbum."""
def __init__(self, message, title=None, show_traceback=False):
super(PlumbumError, self).__init__(message)
self._message = message
self.title = title
self.show_traceback = show_traceback
message = property(lambda x: x._message,
lambda x, v: setattr(x, '_message', v))
def __str__(self):
return self.message
class Interface(object):
"""Marker base class for extension point interfaces."""
class ExtensionPoint(property):
"""Marker class for extension points in components."""
def __init__(self, interface):
"""Create the extension point.
@param interface: the `Interface` subclass that defined the protocol
for the extension point.
"""
property.__init__(self, self.extensions)
self.interface = interface
self.__doc__ = ("List of components that implement: `%s.%s`" %
(self.interface.__module__, self.interface.__name__))
def extensions(self, component):
"""Return a list of components that declare to implement the extension
point interface.
"""
classes = ComponentMeta._registry.get(self.interface, ())
components = [component.compmgr[cls] for cls in classes]
return [c for c in components if c]
def __repr__(self):
"""Return a textual representation of the extension point."""
return "<ExtensionPoint %s>" % self.interface.__name__
class ComponentMeta(type):
"""Meta class for components.
Takes care of component and extension point registration.
"""
_components = []
_registry = {}
def __new__(mcs, name, bases, d):
"""Create the component class."""
new_class = type.__new__(mcs, name, bases, d)
if name == 'Component':
# Don't put the Component base class in the registry
return new_class
if d.get('abstract'):
# Don't put abstract component classes in the registry
return new_class
ComponentMeta._components.append(new_class)
registry = ComponentMeta._registry
for cls in new_class.__mro__:
for interface in cls.__dict__.get('_implements', ()):
classes = registry.setdefault(interface, [])
if new_class not in classes:
classes.append(new_class)
return new_class
def __call__(cls, *args, **kwargs):
"""Return an existing instance of the component if it has already been
activated, otherwise create a new instance.
"""
# If this component is also the component manager, just invoke that
if issubclass(cls, ComponentManager):
self = cls.__new__(cls)
self.compmgr = self
self.__init__(*args, **kwargs)
return self
# The normal case where the component is not also the component manager
assert len(args) >= 1 and isinstance(args[0], ComponentManager), \
"First argument must be a ComponentManager instance"
compmgr = args[0]
self = compmgr.components.get(cls)
# Note that this check is racy, we intentionally don't use a lock in
# order to keep things simple and avoid the risk of deadlocks, as the
# impact of having temporarily two (or more) instance for a given `cls`
# is negligible.
if self is None:
self = cls.__new__(cls)
self.compmgr = compmgr
compmgr.component_activated(self)
self.__init__()
# Only register the instance once it is fully initialized (#9418)
compmgr.components[cls] = self
return self
class Component(object, metaclass=ComponentMeta):
"""Base class for components.
Every component can declare what extension points it provides, as well as
what extension point of other components extends.
"""
@staticmethod
def implements(*interfaces):
"""Can be used in the class definition of `Component` subclasses to
declare the extension points that are extended.
"""
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
assert locals_ is not frame.f_globals and '__module__' in locals_, \
'implements() can only be used in a class definition'
locals_.setdefault('_implements', []).extend(interfaces)
def __repr__(self):
"""Return a textus representantion of the component."""
return '<Component {}.{}>'.format(self.__class__.__module__,
self.__class__.__name__)
implements = Component.implements
class ComponentManager(object):
"""The component manager keeps a pool of active components."""
def __init__(self):
"""Initialize the component manager."""
self.components = {}
self.enabled = {}
if isinstance(self, Component):
self.components[self.__class__] = self
def __contains__(self, cls):
"""Return wether the given class is in the list of active components"""
return cls in self.components
def __getitem__(self, cls):
"""Activate the component instance for the given class, or return the
existing instance if the component has already been activated.
Note that `ComponentManager` components can't be activated that way.
"""
if not self.is_enabled(cls):
return None
component = self.components.get(cls)
if not component and not issubclass(cls, ComponentManager):
if cls not in ComponentMeta._components:
raise PlumbumError('Component "{}" not registered'
.format(cls.__name__))
try:
component = cls(self)
except TypeError as e:
raise PlumbumError('Unable to instantiate component {!r} ({})'
.format(cls, e))
return component
def is_enabled(self, cls):
"""Return whether the given component class is enabled."""
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls]
def disable_component(self, component):
"""Force a component to be disabled.
@param component: can be a class or an instance.
"""
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = False
self.components[component] = None
def enable_component(self, component):
"""Force a component to be enabled.
@param component: can be a class or an instance.
"""
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = True
def component_activated(self, component):
"""Can be overridden by sub-classes so that special initialization for
components ca be provided.
"""
def is_component_enabled(self, cls):
"""Can be overridden by sub-classes to veto the activation of a
component.
If this method returns `False`, the component was disabled explicitly.
If it returns `None`, the component was neither enabled nor disabled
explicitly. In both cases, the component with the given class will not
be available.
"""
return True
|
[
"augusto@rioplomo.com.ar"
] |
augusto@rioplomo.com.ar
|
e61ea84cae50e126560594d6977dfe14b17266b9
|
1e528494a929deada984822438b3ab569762e6c6
|
/rx/testing/recorded.py
|
d8ab083f222f404edc7a4af700ecdfcb0f8686bf
|
[
"MIT"
] |
permissive
|
Sprytile/Sprytile
|
a0233a00a243f263691921d7e1f6af05c5eb5442
|
6b68d0069aef5bfed6ab40d1d5a94a3382b41619
|
refs/heads/master
| 2022-07-10T06:54:01.003723
| 2020-09-26T07:25:35
| 2020-09-26T07:25:35
| 72,276,917
| 860
| 91
|
MIT
| 2022-07-07T23:37:19
| 2016-10-29T09:47:09
|
Python
|
UTF-8
|
Python
| false
| false
| 597
|
py
|
from rx.internal.basic import default_comparer
class Recorded(object):
def __init__(self, time, value, comparer=None):
self.time = time
self.value = value
self.comparer = comparer or default_comparer
def __eq__(self, other):
"""Returns true if a recorded value matches another recorded value"""
time_match = self.time == other.time
return time_match and self.comparer(self.value, other.value)
equals = __eq__
def __repr__(self):
return str(self)
def __str__(self):
return "%s@%s" % (self.value, self.time)
|
[
"jeiel.aranal@gmail.com"
] |
jeiel.aranal@gmail.com
|
f99c83d2712349c0946f6e3b580ce1a637ca20d9
|
f444eede3cd341afc969756b00a34816f949238a
|
/encode.py
|
02cc03704c672bb641575cb3f6609f413ad7a7eb
|
[
"MIT"
] |
permissive
|
dcbriccetti/StegaPy
|
28ce6007c0c8a2dbb38de76e52344ec621c4a8ac
|
a20bb263737ae445e65a602c728acc4e3602baed
|
refs/heads/master
| 2021-07-10T03:28:28.214664
| 2020-10-03T21:11:52
| 2020-10-03T21:11:52
| 204,844,779
| 7
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
from stegapy import create_image
message = '''Steganography is the practice of concealing a file,
message, image, or video within another file, message, image,
or video. The word steganography combines the Greek words
steganos, meaning "covered or concealed", and graphe meaning
"writing".'''
create_image(message, 'original-image.png', 'secret-image.png')
|
[
"daveb@davebsoft.com"
] |
daveb@davebsoft.com
|
1fa31a59bd30ae57f8c0e2ffec65c5ef13811b6d
|
528dd70727c0da10483323ae0ef2db6d01124e2d
|
/scrape all quotes from website/scrape all quotes from website with bs4.py
|
37f07196dd6e399837b02c997a7e89f5c2b5f56c
|
[] |
no_license
|
aadarshraj4321/Simple-Scraping-With-BeautifulSoup-
|
051b764cb256a9b44ef73cbc1aacdb20ba764add
|
79ef2d748af2df28f8cb6a1d02ed0ec76605cb14
|
refs/heads/master
| 2022-12-12T09:14:34.080825
| 2020-09-10T13:54:04
| 2020-09-10T13:54:04
| 293,579,101
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import requests
from bs4 import BeautifulSoup
## url of site which we scrape
url = "https://parade.com/937586/parade/life-quotes/"
## request the url to get data from that website and check status code
page = requests.get(url)
#print(page.status_code)
## call BeautifulSoup and store in soup variable
soup = BeautifulSoup(page.text,"html.parser")
#print(soup)
quote = soup.find(class_= "page_content")
#print(quote)
p_class = soup.find_all("p")
#print(p_class)
#print(len(p_class))
#print(p_class[8])
## slice from 8:161
main_p = p_class[8:161]
#print(main_p)
for i in main_p:
print(i.text,end="\n\n")
#### Done ####
|
[
"noreply@github.com"
] |
aadarshraj4321.noreply@github.com
|
3e7c3413828b2ba8defb1d25594e2f29762dd7ea
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2333/60716/257539.py
|
d12c33899ad0ff7c29ed096997fe168b11f5d990
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import math
x = int(input())
y = int(input())
bound = int(input())
lists = list()
for i in range(int(math.log(bound,x))+1):
for j in range(int(math.log(bound,y))+1):
temp = x**i+y**j
if temp<=bound:
lists.append(temp)
alist = list(set(lists))
alist.sort()
print(alist)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
adec619e3b44525d2a8552143b3945a1d9f4e157
|
2424063d657d643c1f8ccc6cca343271d6d0f708
|
/Project24/app24/models.py
|
5a8ad3f6ed2fce774fd34f11bc0610adb453e749
|
[] |
no_license
|
pythonwithnaveen/DjangoExamples
|
a0a07cbc53564522cf39649c235716ef5c3a4ba0
|
57c7a6302ada4079bd3625481e660587bf8015c6
|
refs/heads/main
| 2023-07-16T02:36:01.283938
| 2021-08-12T07:26:22
| 2021-08-12T07:26:22
| 371,881,524
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
from django.db import models
class EmployeeModel(models.Model):
desig = (('Manager','Manager'),
('Developer','Developer'),
('Tester','Tester'))
idno = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
salary = models.FloatField()
designation = models.CharField(max_length=100,choices=desig,default='Developer')
def __str__(self):
return self.name
|
[
"="
] |
=
|
57d922bb376a8688b87bd15551f00ed4bc091aa1
|
78c3082e9082b5b50435805723ae00a58ca88e30
|
/03.AI알고리즘 소스코드/venv/Lib/site-packages/caffe2/experiments/python/device_reduce_sum_bench.py
|
04628c97394f2cab1ab8c5c006c88f2316bc60eb
|
[] |
no_license
|
jinStar-kimmy/algorithm
|
26c1bc456d5319578110f3d56f8bd19122356603
|
59ae8afd8d133f59a6b8d8cee76790fd9dfe1ff7
|
refs/heads/master
| 2023-08-28T13:16:45.690232
| 2021-10-20T08:23:46
| 2021-10-20T08:23:46
| 419,217,105
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,076
|
py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package device_reduce_sum_bench
# Module caffe2.experiments.python.device_reduce_sum_bench
import argparse
import itertools
import logging
import os
from six import add_metaclass
import numpy as np
from caffe2.python import workspace, core
from caffe2.python.hypothesis_test_util import runOpBenchmark, gpu_do
logging.basicConfig()
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
ALL_BENCHMARKS = {}
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
@add_metaclass(BenchmarkMeta)
class Benchmark(object):
def __init__(self):
self.results = []
def display(self):
print('Results ({}):'.format(type(self).__name__))
print('input size ms/iter')
print('------------------------------ -----------')
for size, ms in self.results:
print('{!s:<30} {:.4f}'.format(size, ms))
class SumElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SumSqrElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SoftMaxWithLoss(Benchmark):
def run(self):
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(__file__))
parser.add_argument('-b', '--benchmarks', nargs='+',
default=ALL_BENCHMARKS.keys(),
help='benchmarks to run (default: %(default)s))')
return parser.parse_args()
def main():
args = parse_args()
benchmarks = [ALL_BENCHMARKS[name]() for name in args.benchmarks]
for bench in benchmarks:
bench.run()
for bench in benchmarks:
bench.display()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
[
"gudwls3126@gmail.com"
] |
gudwls3126@gmail.com
|
b3852a41403ec640fd038714c357b524040ec896
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/word-count/14cde529d953488da375451dc827b0ba.py
|
51da302e01332cc4536cbc94b62458702df054c3
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
def word_count(s):
"""
Return a dict with 'word': number of occurrences for each word in `s`.
"""
counts = {}
for word in s.split():
counts[word] = counts.get(word, 0) + 1
return counts
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
133c0a93bb9ae4ac98104c70fb11d0e6f8f560f2
|
5e2a66e0416dcaf4674bd58f7dd7bc905800aa92
|
/chapter1_intro_to_wx/hello_with_classes.py
|
65730999e668f7a09c7a3dd0cb78c78784a4fe2c
|
[] |
no_license
|
gridl/applications_with_wxpython
|
c96ed05b49e3494323e612afb1baccc8ea1e6f93
|
1fc63c384b7856402b99a97bf91fe0966a5ec413
|
refs/heads/master
| 2020-05-17T08:37:01.672727
| 2019-04-25T19:24:14
| 2019-04-25T19:24:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# hello_with_classes.py
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title='Hello World')
self.Show()
if __name__ == '__main__':
app = wx.App(redirect=False)
frame = MyFrame()
app.MainLoop()
|
[
"mike@pythonlibrary.org"
] |
mike@pythonlibrary.org
|
019b8a110b6d7f632b7b2b3980b4615fba637ae6
|
2d2c10ffa7aa5ee35393371e7f8c13b4fab94446
|
/projects/ai/sentiment/prepare.test/to-chars.py
|
c4b01deea6d39c8ddb5798ccb4ffe0b1d88f440d
|
[] |
no_license
|
faker2081/pikachu2
|
bec83750a5ff3c7b5a26662000517df0f608c1c1
|
4f06d47c7bf79eb4e5a22648e088b3296dad3b2d
|
refs/heads/main
| 2023-09-02T00:28:41.723277
| 2021-11-17T11:15:44
| 2021-11-17T11:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file to-chars.py
# \author chenghuige
# \date 2018-10-28 08:37:28.846557
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import six
assert six.PY3
import pandas as pd
from projects.ai2018.sentiment.prepare import filter
from tqdm import tqdm
import traceback
ifile = sys.argv[1]
ofile = sys.argv[2]
ids_set = set()
fm = 'w'
if os.path.exists(ofile):
fm = 'a'
for line in open(ofile):
ids_set.add(line.split('\t')[0])
print('%s already done %d' % (ofile, len(ids_set)))
num_errs = 0
with open(ofile, fm) as out:
df = pd.read_csv(ifile, lineterminator='\n')
contents = df['content'].values
ids = df['id'].values
for i in tqdm(range(len(df)), ascii=True):
if str(ids[i]) in ids_set:
continue
#if i != 2333:
# continue
#print(gezi.cut(filter.filter(contents[i]), type_))
try:
l = []
for ch in filter.filter(contents[i]):
l.append(ch)
print(' '.join(l), file=out)
except Exception:
if num_errs == 0:
print(traceback.format_exc())
num_errs += 1
continue
#exit(0)
print('num_errs:', num_errs, 'ratio:', num_errs / len(df))
|
[
"chenghuige@gmail.com"
] |
chenghuige@gmail.com
|
5c23552b144d0f866937c9b01f222bb74d2a6c65
|
c415caab95b63c8b3dd217cd5cf2845362e5df77
|
/concept/auto_generated/afrl/cmasi/WavelengthBand.py
|
4ff57fc6791f5a2d3d3092ee43b40dcb4fdd41b1
|
[] |
no_license
|
GaloisInc/amase-code-generator
|
10fe109061c78a7a41ebee6a805476eb0fde9b73
|
2d18cc8e25b86dd22e1b3d2862178e2f598b18ab
|
refs/heads/master
| 2020-09-27T10:26:35.411118
| 2017-01-06T23:52:38
| 2017-01-06T23:52:38
| 66,033,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
#! /usr/bin/python
class WavelengthBand:
AllAny = 0
EO = 1
LWIR = 2
SWIR = 3
MWIR = 4
Other = 5
def get_WavelengthBand_str(str):
"""
Returns a numerical value from a string
"""
if str == "AllAny": return WavelengthBand.AllAny
if str == "EO": return WavelengthBand.EO
if str == "LWIR": return WavelengthBand.LWIR
if str == "SWIR": return WavelengthBand.SWIR
if str == "MWIR": return WavelengthBand.MWIR
if str == "Other": return WavelengthBand.Other
def get_WavelengthBand_int(val):
"""
Returns a string representation from an int
"""
if val == WavelengthBand.AllAny: return "AllAny"
if val == WavelengthBand.EO: return "EO"
if val == WavelengthBand.LWIR: return "LWIR"
if val == WavelengthBand.SWIR: return "SWIR"
if val == WavelengthBand.MWIR: return "MWIR"
if val == WavelengthBand.Other: return "Other"
return WavelengthBand.AllAny
|
[
"sahabi@gmail.com"
] |
sahabi@gmail.com
|
3c854ed5a7b301776fed37dd14bf8b1ac5d6ea1b
|
d539072e556343c748619883a525a88318cf6003
|
/db/models.py
|
d48d8080051b2dc5314381da4892bbaecb802c2e
|
[] |
no_license
|
W1ntersnow/fastapi_simple_sku
|
87dcf861ba02e0314e9f2c48a1ac82ef0e0d8969
|
f62abc33810e17bac7a7227f76a6fcfafa0f5979
|
refs/heads/master
| 2022-07-31T06:14:41.011961
| 2020-05-19T08:14:59
| 2020-05-19T08:14:59
| 265,180,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from . import Base
class ItemType(Base):
__tablename__ = "item_types"
id = Column(Integer, primary_key=True, index=True)
title = Column(String, unique=True)
description = Column(String)
items = relationship("Item", back_populates="type")
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
sku = Column(String)
balance = Column(Integer, default=0)
description = Column(String)
type_id = Column(Integer, ForeignKey("item_types.id"))
type = relationship("ItemType", back_populates="items")
|
[
"example@example.com"
] |
example@example.com
|
06e6786e163a00c5c9e3fc1ccb82b1b65a94661e
|
2576f319799e2a6ed6f4b7025ab87bf7262a3a9f
|
/eng_text_norm/cmudict.py
|
6caf56c745ddc90b522cee6bded625d78724f2aa
|
[] |
no_license
|
soon14/TTS_text_norm
|
f262a6ca04dfcfb6137662ceee9fe59617266dab
|
d094160a958b9117f15131742b4751acf45d1249
|
refs/heads/master
| 2022-06-19T10:37:47.872213
| 2020-05-12T07:06:54
| 2020-05-12T07:06:54
| 264,113,334
| 2
| 1
| null | 2020-05-15T06:23:01
| 2020-05-15T06:23:01
| null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
|
[
"CCS695146667@163.com"
] |
CCS695146667@163.com
|
1ea61fd5bebffa3f350071c167bcb521e65cee9d
|
5fda498ef0bfc06962ad9b864d229193c45ccb4a
|
/Project2_Data_Wrangle_OpenStreetMaps_Data_R1/problem_sets1-6/Lesson_3_Problem_Set/01-Auditing_Data_Quality/audit.py
|
0f95f771d5fb8eea4a50b47eeedc344417c91214
|
[] |
no_license
|
prabhurgit/Data_Aanlyst_Nanodegree_projects
|
7934869b63cae57cb2851e22a5023c6cbe3d18ba
|
a7a13d93c632cd1840ba3a00fff80a60a131b7f3
|
refs/heads/master
| 2021-05-31T18:47:48.669414
| 2016-03-30T04:08:39
| 2016-03-30T04:08:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then
clean it up. In the first exercise we want you to audit the datatypes that can be found in some
particular fields in the dataset.
The possible types of values can be:
- 'NoneType' if the value is a string "NULL" or an empty string ""
- 'list', if the value starts with "{"
- 'int', if the value can be cast to int
- 'float', if the value can be cast to float, but is not an int
- 'str', for all other values
The audit_file function should return a dictionary containing fieldnames and a set of the datatypes
that can be found in the field.
All the data initially is a string, so you have to do some checks on the values first.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
FIELDS = ["name", "timeZone_label", "utcOffset", "homepage", "governmentType_label", "isPartOf_label", "areaCode", "populationTotal",
"elevation", "maximumElevation", "minimumElevation", "populationDensity", "wgs84_pos#lat", "wgs84_pos#long",
"areaLand", "areaMetro", "areaUrban"]
def audit_file(filename, fields):
fieldtypes = {}
for key in fields:
fieldtypes[key] = set([])
with open(filename, "rb") as file_data:
reader = csv.DictReader(file_data)
for line in reader:
if line["URI"][:18] == "http://dbpedia.org":
for key in FIELDS:
fieldtypes[key].add(check_type(line, key))
# YOUR CODE HERE
return fieldtypes
def check_type(row_dict, key):
if row_dict[key] == "NULL" or "":
return type(None)
elif row_dict[key][0] == "{":
return type([])
elif row_dict[key].isdigit():
return type(1)
try:
float(row_dict[key])
return type(1.0)
except ValueError:
pass
return type("a")
def test():
fieldtypes = audit_file(CITIES, FIELDS)
pprint.pprint(fieldtypes)
assert fieldtypes["areaLand"] == set([type(1.1), type([]), type(None)])
assert fieldtypes['areaMetro'] == set([type(1.1), type(None)])
if __name__ == "__main__":
test()
|
[
"xiewisdom@gmail.com"
] |
xiewisdom@gmail.com
|
7e8beeca2a34635f4596d185c06264add736b251
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/python/Q_17_VendingMachine.py
|
be91ce07b43dce97430308e53ad4bab5ac50180e
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089
| 2023-02-07T04:11:09
| 2023-02-07T04:11:09
| 170,044,224
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,591
|
py
|
from abc import abstractmethod, ABC
from enum import Enum
from collections import Counter
class ProductType(Enum):
COKE = 1
class State(ABC):
def __init__(self, vendingMachine):
self.__vendingMachine = vendingMachine
@abstractmethod
def selectItem(self, item: Product):
pass
@abstractmethod
def insertPayment(self, value: int):
pass
@abstractmethod
def executeTransaction(self):
pass
@abstractmethod
def cancelTransaction(self):
pass
@abstractmethod
def name(self):
pass
class NoSelectionState(State):
def selectItem(self, item: Product):
self.__vendingMachine.addSelectedItem(item)
self.__vendingMachine.changeToHasSelectionState()
def insertPayment(self, value: int):
raise Exception('No selected item.')
def executeTransaction(self):
raise Exception('No selected item.')
def cancelTransaction(self):
return 0
def name(self):
return 'No selection'
class HasSelectionState(State):
def selectItem(self, item: Product):
raise Exception('Has selected item.')
def insertPayment(self, value: int):
self.__vendingMachine.addMoney(value)
self.__vendingMachine.changeToInsertMoneyState()
def executeTransaction(self):
raise Exception('No payment made')
def cancelTransaction(self):
self.__vendingMachine.changeToNoSelectionState()
self.__vendingMachine.cancelSelectedItem()
return 0
def name(self):
return 'Has selection'
class InsertMoneyState(State):
def selectItem(self, item: Product):
raise Exception('Has selected item.')
def insertPayment(self, value: int):
self.__vendingMachine.addMoney(value)
def executeTransaction(self):
diff = self.__vendingMachine.getInsertedMoney() - self.__vendingMachine.getPrice()
if diff >= 0:
self.__vendingMachine.setSelectedItem(None)
self.__vendingMachine.changeToNoSelectionState()
else:
raise Exception('Not enough')
def cancelTransaction(self):
money = self.__vendingMachine.getInsertedMoney()
self.__vendingMachine.changeToNoSelectionState()
self.__vendingMachine.cancelSelectedItem()
return money
def name(self):
return 'Has selection'
class Product(ABC):
def __init__(self, name, price):
self.__name = name
self.__price = price
def getName(self):
return self.__name
def getPrice(self):
return self.__price
def updatePrice(self, price):
self.__price = price
class Coke(Product):
def __init__(self, price):
super(Coke, self).__init__(ProductType.COKE, price)
class VendingMachine:
def __init__(self):
self.__inventory = Counter()
self.__selectedItem = None
self.__noSelectionState = NoSelectionState(self)
self.__hasSelectionState = HasSelectionState(self)
self.__insertPaymentState = InsertPaymentState(self)
self.__state = self.__noSelectionState
self.__currentMoney = 0
def getMoney(self):
return self.__currentMoney
def addMoney(self, value):
self.__currentMoney += value
def clearMoney(self):
self.__currentMoney = None
def addInventory(self, product, quantity):
self.__inventory[product] += quantity
def getPrice(self):
if self.__selectedItem is None:
return 0
return self.__selectedItem.getPrice()
def setSelectedItem(self, item):
self.__selectedItem = item
def addSelectedItem(self, item):
if self.__inventory[item] == 0:
raise Exception('')
self.__inventory[item] -= 1
self.__selectedItem = item
def cancelSelectedItem(self):
item = self.__selectedItem
self.__inventory[item] += 1
self.__selectedItem = None
self.__currentMoney = 0
def changeToNoSelectionState(self):
self.__state = self.__noSelectionState
def changeToHasSelectionState(self):
self.__state = self.__hasSelectionState
def changeToInsertPaymentState(self):
self.__state = self.__insertPaymentState
def selectItem(self, item: Product):
self.__state.selectItem(item)
def insertPayment(self, value: int):
self.__state.insertPayment(value)
def executeTransaction(self):
self.__state.executeTransaction()
def cancelTransaction(self):
self.__state.cancelTransaction()
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
1ac4ce437d608a5a1b7a11050359959e8ed317f5
|
4881b8c2c038d449485598c5761d4a3ca098792c
|
/LeetcodeNew/python/LC_793.py
|
ae0d5246551452e2c7ebeb32dad1b1c5ba7528e0
|
[] |
no_license
|
derrickweiruluo/OptimizedLeetcode-1
|
2ae3b177e7bd39ceb45993f4de9cc10e40fadc5b
|
a4d8b54d3004866fd304e732707eef4401dfdb0a
|
refs/heads/master
| 2023-08-29T03:46:44.177836
| 2021-10-10T19:22:50
| 2021-10-10T19:22:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
class Solution:
def preimageSizeFZF(self, K: int) -> int:
left = 0
right = 10 ** 5 * (K + 1)
while left < right:
mid = left + (right - left) // 2
count = self.cal2(mid)
if count == K:
return 5
elif count < K:
left = mid + 1
else:
right = mid
return 0
def cal(self, num):
res = 0
while num > 0:
res += num // 5
num //= 5
return res
def cal2(self, num):
if num == 0:
return 0
else:
return num // 5 + self.cal2(num // 5)
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
7b95fcb647de0f245df18616325ca3e65b5ff326
|
ac608801a67543c800d3534a961359592019f1ed
|
/My stuff/Listss/Main.py
|
b48f4977635a5e0a60f821bd7762a0e277723384
|
[] |
no_license
|
Codingmace/PiHole-2020
|
4834dbf605aa3469141ca0e4895b89c39a61bf43
|
42cab0cc3ba1803a18fec6ad6e0b37d0de51d696
|
refs/heads/main
| 2023-03-20T22:28:52.114619
| 2021-03-18T16:37:18
| 2021-03-18T16:37:18
| 329,333,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,694
|
py
|
import dns.resolver
import os.path
debug = True
def get_records(domain):
ids = ['A', 'AAAA', 'SOA', 'CNAME', 'MX', 'NS', 'PTR', 'CERT', 'SRV', 'TXT']
for a in ids:
try:
answers = dns.resolver.query(domain, a)
for rdata in answers:
return a
except Exception as e:
print(e) # or pass
return "NA"
def validation(filename):
a = open(filename, "r") # The current file
b = open("valid.txt", "w") # For the shorter valid types
if (debug):
c = open("invalid.txt", "w") # For the shorter invalid types
lines = a.readlines()
lines.sort()
a.close()
for line in lines:
ans = get_records(line)
if (!(ans == "NA")):
b.write(line)
elif (debug):
c.write(line)
b.close()
if (debug):
c.close()
def removeDuplicates(filename):
a = open(filename, "r")
b = open("unique.txt", "w") # The new shorter list
if(debug):
c = open("dipli.txt", "w") # Duplicate ones that are detected
lines = a.readlines()
a.close()
fir = 0 # First Value
sec = 1 # Comparing value
lim = len(lines) # Limit AKA number of lines
while(sec < lim):
if(lines[fir] == lines[sec]): # Same keep going
if(debug):
c.write(lines[sec])
sec = sec + 1
else: # Not the same
b.write(lines[fir])
fir = sec
sec = fir + 1
b.write(lines[len(lines)-1]) # for the last element
b.close()
if(debug):
c.close()
""" CAN'T GET THIS FUCKING THING WORKING """
def subFiles(foldPath):
# walk the folder for files
""" GOING TO THE DEFAULT OF FOLDER SEPERATING BECAUSE I CANT GET THE OTHER THING TO WORK"""
fileList = []
foldPath = "Seperating\\"
folderpath = os.listdir(foldPath)
for f in folderpath:
if (os.path.isfile(f)):
fileList.append(f)
print(fileList)
def merger(files):
# Merge the files together into one
def mergeFiles(foldername):
# Walk the path of the files
fileList = subFiles(foldername)
# Merge the files together
merger(fileList)
filename = "valid.txt" # The merged file name
def main():
print("Let us start this out with a few questions")
print("What do we want to do. Keep in mind 3 can also do steps 1 and 2")
print("1. Validate List\n2. Merge List, Sort, and remove duplicates")
print("3. Split up the list\n4. Crawl for new list")
selection = input()
if(selection == 1):
print("Awesome you are going easy on me. All I need you to do is enter the path of the file and we will be on our way")
filepath = input()
validation(filepath)
print("Ok that is it. I am done")
if(selction == 2):
print("Ok, a little bit of work but still easy.")
print("I need you to now input the folder path")
foldPath = input()
newFilename = "mergedList.txt"
mergeFiles(foldPath)
doValid = input("Just making sure, do you want to validate (Yes/No): ")
if (doValid == "Yes"):
print("Ok validating")
validation("valid.txt")
else:
print("Awesome, no validation")
print("Removing Duplicates")
removeDuplicates("valid.txt")
print("Ok we are all done. The requested file is named unique.txt")
if (selection == 3):
print("Picky one are we. I want to make sure that we are not going to waste time")
firstStep = input("Do you want to merge any files (Yes/No): ")
currentFile = "" # Name of the file reading from
if (firstStep == "Yes"):
foldPath =input("Enter the folder path: ")
mergeFiles(foldPath)
currentFile = "valid.txt"
else:
filepath =input("Enter the file path: ")
currentFile = filepath
removeDuplicates(currentFile)
currentFile = "unique.txt"
secondStep = input("Do you want to validate the entries (Yes/No): ")
if (secondStep == "Yes"):
print("Dang it you are making me do so much work")
validation(currentFile)
else:
print("Ok. That will make things go quicker")
print("Now for the seperation. I bet you don't know what you want to seperate by.")
print("I will make it easy and give you some options")
foldPath = input("Enter in the folder with the files")
if (selection == 4):
print("Oh my, you want the hardest thing. I haven't programmed that far so I will let you answer some more questions.")
firstStep = input("Do you want to merge any files (Yes/No): ")
currentFile = "" # Name of the file reading from
if (firstStep == "Yes"):
foldPath =input("Enter the folder path: ")
mergeFiles(foldPath)
currentFile = "valid.txt"
else:
filepath =input("Enter the file path: ")
currentFile = filepath
removeDuplicates(currentFile)
currentFile = "unique.txt"
secondStep = input("Do you want to validate the entries (Yes/No): ")
if (secondStep == "Yes"):
print("Dang it you are making me do so much work")
validation(currentFile)
else:
print("Ok. That will make things go quicker")
print("Sucks if you want to seperate the files. That is your punishment for choosing an advanced thing.")
print("Rerun the program if you want to do Selection 3")
print("Goodbye")
main()
|
[
"codingmace@gmail.com"
] |
codingmace@gmail.com
|
2688aa4039d5144652063095a7e8cdde6888dcbe
|
4e229e075a3f5e71a33525981fa51fd7878c9715
|
/sacrerouge/metrics/sumqe.py
|
0286a60a36075834184076678d59a089418e5a63
|
[] |
no_license
|
CogComp/content-analysis-experiments
|
57d68441272c39b687656976d20eddd817c28250
|
f6abd72029b6853627ddd191979f105a9385eed7
|
refs/heads/master
| 2023-06-27T00:29:34.115264
| 2021-08-04T14:36:17
| 2021-08-04T14:36:17
| 305,768,372
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,996
|
py
|
import argparse
import json
import os
from overrides import overrides
from subprocess import Popen, PIPE
from typing import List
from sacrerouge.commands import Subcommand
from sacrerouge.common import DATA_ROOT, TemporaryDirectory
from sacrerouge.io import JsonlWriter
from sacrerouge.data import MetricsDict
from sacrerouge.data.fields import SummaryField
from sacrerouge.data.types import SummaryType
from sacrerouge.metrics import Metric
@Metric.register('sum-qe')
class SumQE(Metric):
def __init__(self,
model_file: str = f'{DATA_ROOT}/metrics/SumQE/models/multitask_5-duc2006_duc2007.npy',
sum_qe_root: str = f'{DATA_ROOT}/metrics/SumQE',
environment_name: str = None,
verbose: bool = False):
super().__init__([])
self.model_file = os.path.abspath(model_file)
self.sum_qe_root = sum_qe_root
self.environment_name = environment_name
self.verbose = verbose
def _flatten_summary(self, summary: SummaryType) -> str:
if isinstance(summary, list):
return ' '.join(summary)
return summary
def _run(self, summaries_list: List[List[SummaryType]]) -> List[List[MetricsDict]]:
with TemporaryDirectory() as temp_dir:
summaries_file = f'{temp_dir}/summaries.jsonl'
predictions_file = f'{temp_dir}/predictions.json'
# Save all of the summaries to a file
with JsonlWriter(summaries_file) as out:
for summaries in summaries_list:
for summary in summaries:
out.write({'summary': self._flatten_summary(summary)})
commands = [f'cd {self.sum_qe_root}']
if self.environment_name:
commands += [f'source activate {self.environment_name}']
commands += [
' '.join([
'python', '-m', 'src.BERT_experiments.predict',
summaries_file,
self.model_file,
predictions_file
])
]
redirect = None if self.verbose else PIPE
process = Popen(' && '.join(commands), stdout=redirect, stderr=redirect, shell=True)
stdout, stderr = process.communicate()
predictions = json.loads(open(predictions_file, 'r').read())
index = 0
metrics_lists = []
for summaries in summaries_list:
metrics_lists.append([])
for summary in summaries:
preds = predictions[index]
metrics_lists[-1].append(MetricsDict({
'SumQE': {
'Q1': preds[0],
'Q2': preds[1],
'Q3': preds[2],
'Q4': preds[3],
'Q5': preds[4]
}
}))
index += 1
return metrics_lists
def score_multi_all(self, summaries_list: List[List[SummaryField]]) -> List[List[MetricsDict]]:
# Just take the summaries themselves, not the fields
summaries_list = [[field.summary for field in fields] for fields in summaries_list]
return self._run(summaries_list)
class SumQESetupSubcommand(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction):
self.parser = parser.add_parser('sum-qe')
self.parser.add_argument('--download-2005-2006-model', action='store_true')
self.parser.add_argument('--download-2005-2007-model', action='store_true')
self.parser.add_argument('--download-2006-2007-model', action='store_true')
self.parser.set_defaults(subfunc=self.run)
@overrides
def run(self, args):
commands = [
f'mkdir -p {DATA_ROOT}/metrics',
f'cd {DATA_ROOT}/metrics',
f'git clone https://github.com/danieldeutsch/SumQE',
f'mkdir -p SumQE/models'
]
if args.download_2005_2006_model:
commands.append('wget https://danieldeutsch.s3.amazonaws.com/sacrerouge/metrics/SumQE/models/multitask_5-duc2005_duc2006.npy -O SumQE/models/multitask_5-duc2005_duc2006.npy')
if args.download_2005_2007_model:
commands.append('wget https://danieldeutsch.s3.amazonaws.com/sacrerouge/metrics/SumQE/models/multitask_5-duc2005_duc2007.npy -O SumQE/models/multitask_5-duc2005_duc2007.npy')
if args.download_2006_2007_model:
commands.append('wget https://danieldeutsch.s3.amazonaws.com/sacrerouge/metrics/SumQE/models/multitask_5-duc2006_duc2007.npy -O SumQE/models/multitask_5-duc2006_duc2007.npy')
command = ' && '.join(commands)
process = Popen(command, shell=True)
process.communicate()
if process.returncode == 0:
print('SumQE setup success')
else:
print('SumQE setup failure')
|
[
"danfdeutsch@gmail.com"
] |
danfdeutsch@gmail.com
|
436bd3b89520e5808d16be5ff1543ca6a878491e
|
79bf797423e4c591e33b199ae578fff328c811cd
|
/practico_02/ejercicio_04.py
|
9503962a31047e707994cd7f7088dd3dae758180
|
[] |
no_license
|
DanielDruetta/frro-soporte-2019-25
|
6512c7c7ebaca8429883a09dbaac5c8b4e49bf0c
|
5244116177a67023694cfd6966ff35d22d31c284
|
refs/heads/master
| 2020-04-29T05:15:10.050444
| 2019-11-01T18:46:54
| 2019-11-01T18:46:54
| 175,875,936
| 0
| 0
| null | 2019-08-16T18:44:23
| 2019-03-15T18:58:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
# Escribir una clase Estudiante, que herede de Persona, y que agregue las siguientes condiciones:
# Atributos:
# - nombre de la carrera.
# - año de ingreso a la misma.
# - cantidad de materias de la carrera.
# - cantidad de materias aprobadas.
# Métodos:
# - avance(): indica que porcentaje de la carrera tiene aprobada.
# - edad_ingreso(): indica que edad tenia al ingresar a la carrera (basándose en el año actual).
import time
from practico_02.ejercicio_03 import Persona
class Estudiante(Persona):
def __init__(self,nombre,edad,sexo,peso,altura,carrera,anioIngreso,cantidadMaterias,cantidadMateriasAprobadas):
Persona.__init__(self,nombre,edad,sexo,peso,altura)
self.carrera=carrera
self.anioIngreso=anioIngreso
self.cantidadMaterias=cantidadMaterias
self.cantidadMateriasAprobadas=cantidadMateriasAprobadas
def avance(self):
porcentaje=('{0:.2f}'.format((self.cantidadMateriasAprobadas/self.cantidadMaterias)*100))
return porcentaje
def edad_ingreso(self):
edadIng=(self.edad-(int(time.strftime('%Y'))-self.anioIngreso))
return edadIng
estudiante=Estudiante('Agustin Yurescia',22,'H',69.60,1.75,'ISI',2015,41,27)
assert estudiante.edad_ingreso() == 18
assert estudiante.avance() == '65.85'
|
[
"franmrivera@gmail.com"
] |
franmrivera@gmail.com
|
65ce00e4875680bca80aae59bd314f775465f09e
|
5430cd3b483c858567f1687b0bed43eccd4d0fe0
|
/gpregression_gpy/main.py
|
4981085b19c277a8043ea4726eb72657fbb8eb5f
|
[] |
no_license
|
roholazandie/gaussian_process_tutorial
|
a1b46c712d31545ee428db04a511150933902de8
|
f7efbaa534f6834447cb77a97e5162efd92f9a1a
|
refs/heads/master
| 2021-02-14T15:40:12.649801
| 2020-05-21T16:17:37
| 2020-05-21T16:17:37
| 244,816,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
import numpy as np
from GPy.kern import Matern32, GridRBF, RBF
from GPy.models import GPRegression
import pods
import matplotlib.pyplot as plt
# read data
from visualization import plot_gp
data = pods.datasets.olympic_marathon_men()
x_train = data["X"]
y_train = data["Y"]
# choose a kernel
#kernel = Matern32(input_dim=1, variance=2.0)
#kernel = GridRBF(input_dim=1)
#kernel = RBF(input_dim=1, variance=2.0)
# gp regression and optimize the paramters using logliklihood
gp_regression = GPRegression(x_train, y_train)
#gp_regression.kern.lengthscale = 500
#gp_regression.likelihood.variance = 0.001
print("loglikelihood: ", gp_regression.log_likelihood())
gp_regression.optimize()
print("loglikelihood: ", gp_regression.log_likelihood())
# predict new unseen samples
x_test = np.linspace(1870, 2030, 200)[:, np.newaxis]
yt_mean, yt_var = gp_regression.predict(x_test)
yt_sd = np.sqrt(yt_var)
# draw some samples from the posterior
samples = gp_regression.posterior_samples(x_test, size=1).squeeze(1)
# plot
plot_gp(yt_mean, yt_var, x_test, X_train=x_train, Y_train=y_train, samples=samples)
|
[
"hilbert.cantor@gmail.com"
] |
hilbert.cantor@gmail.com
|
c02214574892daaeca723ea71aff7f4af91b48a2
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/d67af04c45b8cfc4e7e19cf2d2af2f980db88e7d-<_get_body>-fix.py
|
636c93a950b87104066d3e66dc628ca3297088b7
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
def _get_body(self, commands, output, reqid=None):
'Create a valid eAPI JSON-RPC request message\n '
if (output not in EAPI_FORMATS):
msg = ('invalid format, received %s, expected one of %s' % (output, ', '.join(EAPI_FORMATS)))
self._error(msg=msg)
params = dict(version=1, cmds=commands, format=output)
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c79b99caca7a967e2ae90f0c8801dfaa8b685e62
|
257cd39cce602506f6c892584f79180f96ce8729
|
/artevenue/migrations/0059_remove_amazon_data_parent_key.py
|
24664ad5899666727752f7395adff6e692cd06a5
|
[] |
no_license
|
santhoshanandhan/artevenue
|
7cbfac2e4ef8f03f44c085a8ce3823504a8ecc7e
|
0ce86149a0b706cb2ffa672b7b066e3bfeeef74c
|
refs/heads/master
| 2022-12-26T12:34:55.685330
| 2020-10-13T07:04:28
| 2020-10-13T07:04:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# Generated by Django 2.2.4 on 2019-10-21 05:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('artevenue', '0058_auto_20191021_1101'),
]
operations = [
migrations.RemoveField(
model_name='amazon_data',
name='parent_key',
),
]
|
[
"shekhart@hotmail.com"
] |
shekhart@hotmail.com
|
9164964ff22bbcd7e054324032425a0ebdfb679c
|
aaaf3b641bbc03180646c427bbfc52510357e55e
|
/models_inheritance/wsgi.py
|
dc5064ebe45456dddd93e47d2a99208ebd5499a4
|
[] |
no_license
|
deepanshu-jain1999/inheritance_in_model
|
5b47677575b1c8c9d6168b1ba1979a231ed78ba2
|
2d3080a5affa561aacc02081252d6e8ebb90d7dc
|
refs/heads/master
| 2021-04-12T09:52:47.816940
| 2018-03-23T20:02:57
| 2018-03-23T20:02:57
| 126,522,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for models_inheritance project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "models_inheritance.settings")
application = get_wsgi_application()
|
[
"deepanshuj1999@gmail.com"
] |
deepanshuj1999@gmail.com
|
926f48063b1bb48ae00d07dc888717f3e602f13d
|
3451a6d056098c83ff517960d1ecef51b35d266e
|
/blog_02/app_usuarios/views.py
|
08f0a3f2b0f0672632b0e4a96daf608083748fed
|
[] |
no_license
|
luisdebia123/blog_2
|
aa9e3f03ebafdbad8741d30205dd1de5c48b83d0
|
dc945114b7558e03e72dc084b48692a16fd8cee2
|
refs/heads/master
| 2023-05-28T11:54:03.444971
| 2021-06-17T04:23:14
| 2021-06-17T04:23:14
| 377,480,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
# app_detalle
import json
from django.shortcuts import render, redirect, get_object_or_404 #
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login
import random
import csv
from django.contrib import messages #
from django.contrib.messages.views import SuccessMessageMixin #
from functools import wraps
from urllib.parse import urlparse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
from django.shortcuts import reverse, redirect #
from django.utils.http import urlencode
from django import forms #
#---------------------------------------------#
#from .forms import CustomUserCreationForm (sólo si esta creado en el forms.py)
from .models import Usuarios #
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView #
from django.views.generic import TemplateView
#from django.views.generic.edit import CreateView, UpdateView, DeleteView #
# Create your views here.
# app_usuarios #
#def index(request):
# return render(request,'app_categorias/index.html')
class UsuariosListView(ListView) :
model = Usuarios
template_name = 'app_usuarios/index.html'
class Create_Usuarios(TemplateView) :
template_name = 'app_usuarios/crear.html'
|
[
"luis.debia@gmail.com"
] |
luis.debia@gmail.com
|
08116a00157fc76d178c93e63aba8866d03e1c4e
|
b9963ffb80aad7e057bc375edb85ac7ed5a837d0
|
/adventofcode2019/02.py
|
1f51e1764d11d946029f5ae774e31ffbf6f4705c
|
[
"MIT"
] |
permissive
|
matslindh/codingchallenges
|
a2db9f4579e9f35189f5cdf74590863cf84bdf95
|
a846e522f7a31e988c470cda87955ee3ef20a274
|
refs/heads/main
| 2022-12-23T15:56:19.776354
| 2022-12-15T21:03:37
| 2022-12-15T21:03:37
| 76,491,177
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
def run(memory):
pc = 0
while memory[pc] != 99:
if memory[pc] == 1:
memory[memory[pc+3]] = memory[memory[pc+1]] + memory[memory[pc+2]]
elif memory[pc] == 2:
memory[memory[pc+3]] = memory[memory[pc+1]] * memory[memory[pc+2]]
pc += 4
return memory
def evaluate(s):
memory = [int(r.strip()) for r in s.split(',')]
return run(memory)
def find_parameters(s, goal):
memory = [int(r.strip()) for r in s.split(',')]
for x in range(0, 99):
for y in range(0, 99):
mem = list(memory)
mem[1] = x
mem[2] = y
if run(mem)[0] == goal:
return x,y
def test_evaluate():
assert evaluate('1,0,0,0,99') == [2,0,0,0,99]
assert evaluate('2,3,0,3,99') == [2,3,0,6,99]
assert evaluate('2,4,4,5,99,0') == [2,4,4,5,99,9801]
assert evaluate('1,1,1,4,99,5,6,0,99') == [30,1,1,4,2,5,6,0,99]
if __name__ == '__main__':
print(evaluate('1,12,2,3,1,1,2,3,1,3,4,3,1,5,0,3,2,13,1,19,1,19,10,23,2,10,23,27,1,27,6,31,1,13,31,35,1,13,35,39,1,39,10,43,2,43,13,47,1,47,9,51,2,51,13,55,1,5,55,59,2,59,9,63,1,13,63,67,2,13,67,71,1,71,5,75,2,75,13,79,1,79,6,83,1,83,5,87,2,87,6,91,1,5,91,95,1,95,13,99,2,99,6,103,1,5,103,107,1,107,9,111,2,6,111,115,1,5,115,119,1,119,2,123,1,6,123,0,99,2,14,0,0'))
print(find_parameters('1,12,2,3,1,1,2,3,1,3,4,3,1,5,0,3,2,13,1,19,1,19,10,23,2,10,23,27,1,27,6,31,1,13,31,35,1,13,35,39,1,39,10,43,2,43,13,47,1,47,9,51,2,51,13,55,1,5,55,59,2,59,9,63,1,13,63,67,2,13,67,71,1,71,5,75,2,75,13,79,1,79,6,83,1,83,5,87,2,87,6,91,1,5,91,95,1,95,13,99,2,99,6,103,1,5,103,107,1,107,9,111,2,6,111,115,1,5,115,119,1,119,2,123,1,6,123,0,99,2,14,0,0', 19690720))
|
[
"mats@lindh.no"
] |
mats@lindh.no
|
84c5d9573226e76a809023134456aa0ebbf95103
|
639359b9cfc88e02968923c9dfc57d626cdaec9b
|
/boardapp/board/migrations/0004_board.py
|
d7726e44bbd0dfeab8ee0697fc670fc156bdeaec
|
[] |
no_license
|
ElvinKim/django-angular-board-project
|
ee220585a1f64804dff718066ca2d00f749e8c6c
|
de06a560c16a4f1db66afb15e54471ad0e9d104b
|
refs/heads/master
| 2021-01-20T21:11:51.009496
| 2019-02-09T07:04:46
| 2019-02-09T07:04:46
| 62,854,114
| 3
| 1
| null | 2018-11-07T09:46:29
| 2016-07-08T02:55:53
|
Python
|
UTF-8
|
Python
| false
| false
| 939
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-11 05:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('board', '0003_delete_board'),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('user', models.IntegerField(default=0)),
('view_cnt', models.IntegerField(default=0)),
('moddt', models.DateTimeField()),
('regdt', models.DateTimeField()),
],
options={
'db_table': 'tbl_board',
},
),
]
|
[
"bonwho09@gmail.com"
] |
bonwho09@gmail.com
|
e81b788920912c0d9f56722dd3d855601c8582d6
|
40b42ccf2b6959d6fce74509201781be96f04475
|
/mmocr/datasets/base_dataset.py
|
5a39bf46548e9d20996abe52a3d2ffdda518eaeb
|
[
"Apache-2.0"
] |
permissive
|
xdxie/WordArt
|
2f1414d8e4edaa89333353d0b28e5096e1f87263
|
89bf8a218881b250d0ead7a0287526c69586c92a
|
refs/heads/main
| 2023-05-23T02:04:22.185386
| 2023-03-06T11:51:43
| 2023-03-06T11:51:43
| 515,485,694
| 106
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,469
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmcv.utils import print_log
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.pipelines import Compose
from torch.utils.data import Dataset
from mmocr.datasets.builder import build_loader
@DATASETS.register_module()
class BaseDataset(Dataset):
"""Custom dataset for text detection, text recognition, and their
downstream tasks.
1. The text detection annotation format is as follows:
The `annotations` field is optional for testing
(this is one line of anno_file, with line-json-str
converted to dict for visualizing only).
.. code-block:: json
{
"file_name": "sample.jpg",
"height": 1080,
"width": 960,
"annotations":
[
{
"iscrowd": 0,
"category_id": 1,
"bbox": [357.0, 667.0, 804.0, 100.0],
"segmentation": [[361, 667, 710, 670,
72, 767, 357, 763]]
}
]
}
2. The two text recognition annotation formats are as follows:
The `x1,y1,x2,y2,x3,y3,x4,y4` field is used for online crop
augmentation during training.
format1: sample.jpg hello
format2: sample.jpg 20 20 100 20 100 40 20 40 hello
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
loader (dict): Dictionary to construct loader
to load annotation infos.
img_prefix (str, optional): Image prefix to generate full
image path.
test_mode (bool, optional): If set True, try...except will
be turned off in __getitem__.
"""
def __init__(self,
ann_file,
loader,
pipeline,
img_prefix='',
test_mode=False):
super().__init__()
self.test_mode = test_mode
self.img_prefix = img_prefix
self.ann_file = ann_file
# load annotations
loader.update(ann_file=ann_file)
self.data_infos = build_loader(loader)
# processing pipeline
self.pipeline = Compose(pipeline)
# set group flag and class, no meaning
# for text detect and recognize
self._set_group_flag()
self.CLASSES = 0
def __len__(self):
return len(self.data_infos)
def _set_group_flag(self):
"""Set flag."""
self.flag = np.zeros(len(self), dtype=np.uint8)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
def prepare_train_img(self, index):
"""Get training data and annotations from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.data_infos[index]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, img_info):
"""Get testing data from pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
return self.prepare_train_img(img_info)
def _log_error_index(self, index):
"""Logging data info of bad index."""
try:
data_info = self.data_infos[index]
img_prefix = self.img_prefix
print_log(f'Warning: skip broken file {data_info} '
f'with img_prefix {img_prefix}')
except Exception as e:
print_log(f'load index {index} with error {e}')
def _get_next_index(self, index):
"""Get next index from dataset."""
self._log_error_index(index)
index = (index + 1) % len(self)
return index
def __getitem__(self, index):
"""Get training/test data from pipeline.
Args:
index (int): Index of data.
Returns:
dict: Training/test data.
"""
if self.test_mode:
return self.prepare_test_img(index)
while True:
try:
data = self.prepare_train_img(index)
if data is None:
raise Exception('prepared train data empty')
break
except Exception as e:
print_log(f'prepare index {index} with error {e}')
index = self._get_next_index(index)
return data
def format_results(self, results, **kwargs):
"""Placeholder to format result to dataset-specific output."""
pass
def evaluate(self, results, metric=None, logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]
"""
raise NotImplementedError
|
[
"xudongxie77@gmail.com"
] |
xudongxie77@gmail.com
|
754c57955dc4711c3f1b16359dad22056c067dde
|
d97b9dc98c65ed3114a6449e0dab9c6c9bd1c01a
|
/tests/parser_tests.py
|
7a9a7b1dc3b44895dab24b0cf46a9bec9402325c
|
[
"MIT"
] |
permissive
|
blitzmann/evepaste
|
19f7edbb0d15e44e898f575952d6322fe60a18ce
|
78b2ca1553b0773e68978bce6858d37f445a927e
|
refs/heads/master
| 2021-01-18T09:28:00.967532
| 2014-07-23T05:43:28
| 2014-07-23T05:43:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
from evepaste import parse
from tests import parsers, TableChecker
ALL_TABLES = [parsers.ASSET_TABLE,
parsers.BOM_TABLE,
parsers.CARGO_SCAN_TABLE,
parsers.CHAT_TABLE,
parsers.CONTRACT_TABLE,
parsers.DSCAN_TABLE,
parsers.EFT_TABLE,
parsers.FITTING_TABLE,
parsers.KILLMAIL_TABLE,
parsers.LOOT_HISTORY_TABLE,
parsers.PI_TABLE,
parsers.SURVEY_SCANNER_TABLE,
parsers.VIEW_CONTENTS_TABLE,
parsers.WALLET_TABLE]
def test_generator():
# Perform each table test with their associated callable
for table in ALL_TABLES + [parsers.PARSE_TABLE, parsers.LISTING_TABLE]:
for i, (input_str, expected) in enumerate(table.tests):
name = ('test_%s[%s]' % (str(table.funct.__name__), i))
checker = TableChecker(table.funct, name)
yield checker, input_str, expected
# Perform each table test with parse() instead of the associated callable
for table in ALL_TABLES:
for i, (input_str, expected) in enumerate(table.tests):
if isinstance(expected, tuple) and not expected[1]:
name = 'test_parse(%s)[%s]' % (str(table.funct.__name__), i)
checker = TableChecker(parse, name)
result, bad_lines = expected
_type = table.funct.__name__.split('_', 1)[1]
yield checker, input_str, (_type, result, bad_lines)
|
[
"k3vinmcdonald@gmail.com"
] |
k3vinmcdonald@gmail.com
|
812face81685d31ba6466f309191646f1a14e8ad
|
cf4958e16c65c4445d0c3e749bb407a452a1d722
|
/tests/identify/test_identify.py
|
2a4a77aacfaad76c0adf1e9794bbf74a80fecb94
|
[
"Apache-2.0"
] |
permissive
|
greschd/NodeFinder
|
a4775566f22c85f880bd34ea4bcbc24e4e006000
|
0204789afb13fcd1ffb86fd3c013e7debafb2590
|
refs/heads/dev
| 2021-10-08T04:14:51.391769
| 2020-03-13T20:37:59
| 2020-03-13T20:37:59
| 96,325,459
| 3
| 2
|
Apache-2.0
| 2020-03-13T20:38:01
| 2017-07-05T14:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
# -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the identify step.
"""
import os
import pytest
import numpy as np
import nodefinder as nf
@pytest.fixture
def run_identify(sample):
"""
Fixture to run the identification step for a given sample file.
"""
def inner(sample_name):
search_res = nf.io.load(sample(os.path.join('search', sample_name)))
return nf.identify.run(search_res)
return inner
@pytest.fixture
def run_single_identify(run_identify): # pylint: disable=redefined-outer-name
"""
Fixture to run the identification step for a given sample file which should
contain only one cluster, and return the result for that cluster.
"""
def inner(sample_name):
res = run_identify(sample_name)
assert len(res) == 1
return res[0]
return inner
def test_point(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that a point is correctly identified.
"""
res = run_single_identify('point.hdf5')
assert res.dimension == 0
assert np.allclose(res.shape.position, [0.2, 0.4, 0.8])
def test_line(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that a line is correctly identified.
"""
res = run_single_identify('line.hdf5')
assert res.dimension == 1
assert len(res.shape.graph.nodes) > 10
assert res.shape.degree_count == dict()
def test_surface(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that a surface is correctly identified.
"""
res = run_single_identify('surface.hdf5')
assert res.dimension == 2
def test_two_lines(run_identify): # pylint: disable=redefined-outer-name
"""
Test that two 2D lines are correctly identified.
"""
res = run_identify('two_lines.hdf5')
assert len(res) == 2
for identified_object in res:
assert identified_object.shape.degree_count == dict()
assert identified_object.dimension == 1
assert len(identified_object.shape.graph.nodes) > 10
def test_cross(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that the cross is identified without holes.
"""
res = run_single_identify('cross.hdf5')
assert res.dimension == 1
assert res.shape.degree_count == {4: 1}
def test_open_line(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that an open line is identified correctly.
"""
res = run_single_identify('line_open.hdf5')
assert res.dimension == 1
assert res.shape.degree_count == {1: 2}
positions = np.array(list(res.shape.graph.nodes))
assert np.min(positions) < -0.49
assert np.max(positions) > 0.49
def test_line_short_paths(run_single_identify): # pylint: disable=redefined-outer-name
"""
Test that a short line is identified correctly, without holes.
"""
res = run_single_identify('line_1d.hdf5')
assert res.dimension == 1
assert res.shape.degree_count == dict()
|
[
"greschd@gmx.ch"
] |
greschd@gmx.ch
|
c5c9b79a713e8c961f1d210aadef8e76768b6c54
|
adfb55c4fe2027986cabb49bf58081241bbba5cc
|
/SingletonPyton/SingletonPY.py
|
d5768bd1084ab78954806511859b90bec435b92c
|
[] |
no_license
|
juanmontes1224/SingletonJavayPython
|
04003061fcf0a990455689c90fa227e8e921541b
|
68cd5f09709d503b8ab755676cc357bda7442077
|
refs/heads/master
| 2020-07-25T00:18:23.250152
| 2019-09-12T16:34:11
| 2019-09-12T16:34:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
import singleton
class PatronSingleton:
def __new__(obj):
try:
obj.instance
except AttributeError:
obj.instance = super(PatronSingleton, obj).__new__(obj)
return obj.instance
x = singleton.PatronSingleton()
y = singleton.PatronSingleton()
print("LA INSTANCIA "+str(x)+ " ")
print("ES IGUAL A LA INSTANCIA "+str(y)+" ")
|
[
"you@example.com"
] |
you@example.com
|
6a80192155d9e0511c8cc5db8443d8d3333429d0
|
f70da0d011ad2d96ffd6a693e6cd36f1e1df56cb
|
/Proyecto2/Optimization/Instructions/Assignment.py
|
fd3c7e81489ded2d4756af82502cbb7a0c91433f
|
[] |
no_license
|
diemorales96/OLC_Proyecto2_201503958
|
aeaa2ba013f9ed643b324537cc6493710d407226
|
7779c619e635b7dc7cc2e47130a1c654ac84889a
|
refs/heads/main
| 2023-09-02T05:11:57.732595
| 2021-11-13T04:54:04
| 2021-11-13T04:54:04
| 427,418,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
from Optimization.C3DInstruction import *
from Optimization.Expressions import *
class Assignment(C3DInstruction):
def __init__(self, place, exp, line, column):
C3DInstruction.__init__(self, line, column)
self.place = place
self.exp = exp
def selfAssignment(self):
if type(self.exp) is Literal:
aux = self.place.getCode() == self.exp.getCode()
else:
aux = self.place.getCode() == self.exp.right.getCode() or self.place.getCode() == self.exp.left.getCode()
return aux
def getCode(self):
if self.deleted:
return ''
return f'{self.place.getCode()} = {self.exp.getCode()};'
|
[
"diemorab@gmail.com"
] |
diemorab@gmail.com
|
39887b28df78f262c8bbab74d1632da71f0305cb
|
13130259156f6f9d16670cea88aa2608dd477d16
|
/goeievraag/api.py
|
da5550ee4d634d841e456869f5e506e812c80368
|
[] |
no_license
|
fkunneman/DiscoSumo
|
d459251d543be5f4df38292a96f52baf4b520a0b
|
ed8f214834cf0c2e04a3bc429253502f7e79fbf8
|
refs/heads/master
| 2022-12-14T13:34:41.496963
| 2019-07-31T15:57:02
| 2019-07-31T15:57:02
| 140,422,779
| 2
| 1
| null | 2022-12-08T04:57:55
| 2018-07-10T11:36:00
|
Python
|
UTF-8
|
Python
| false
| false
| 689
|
py
|
import json
from flask import Flask, request
from main import GoeieVraag
app = Flask(__name__)
model = GoeieVraag()
@app.route("/rank", methods=['GET'])
def search():
'''
:return: return the 10 most semantic-similar questions to the query based on our official sysmte
'''
questions = {'code': 400}
query, method = '', 'ensemble'
if 'q' in request.args:
query = request.args['q'].strip()
if 'method' in request.args:
method = request.args['method'].strip()
if request.method == 'GET':
questions = model(query=query.strip(), method=method)
questions = { 'code':200, 'result': questions }
return json.dumps(questions)
|
[
"thiago.castro.ferreira@gmail.com"
] |
thiago.castro.ferreira@gmail.com
|
88aef09482138586ab261da2c44dcb42137269ec
|
e8912ed90e97730b465b1e65084c1dbcc741a73e
|
/기본/Cage D4/practice.py
|
61641fe72db9d47335a7aec6862d65842133b0aa
|
[] |
no_license
|
yhnb3/Algorithm_lecture
|
a0dcefc27ed17bec3cadae56d69e3cc64239cbfb
|
461367e907e2b8a6a0cdc629e6a9029d9b03fba1
|
refs/heads/master
| 2020-12-22T05:39:05.412680
| 2020-04-10T09:16:19
| 2020-04-10T09:16:19
| 236,685,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
import sys
sys.stdin = open('samsung_bus.txt')
T = int(input())
for tc in range(1, T + 1):
N = int(input())
A = [list(map(int, input().split())) for _ in range(N)]
P = int(input())
C = [list(map(int, input())) for _ in range(P)]
cnt = [0] * 5002
print(C)
for i in range(N):
for j in range(A[i][0], A[i][1] + 1):
cnt[j] += 1
print('#{}'.format(tc), end='')
for a in range(P):
print(' {}'.format(cnt[C[a][0]]), end='')
print()
|
[
"yhnb33@gmail.com"
] |
yhnb33@gmail.com
|
8207e03ca2def6f397ee315a6a5d7197f231c0b7
|
e51c1abe850bacebaed50db66cf05f613db8c2f6
|
/djangoevents/djangoevents/settings.py
|
b13380b36ea4611488a6ef4c35face2ce45651f9
|
[] |
no_license
|
zurcx/bkp_provi_djangoevents
|
26a5932c170b52b2dd0e983265778d35bb9103ad
|
b7b4e5ec6ca3730c0c52b7e10341b10157104737
|
refs/heads/master
| 2021-01-19T13:33:25.967838
| 2013-06-10T12:43:08
| 2013-06-10T12:43:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,302
|
py
|
# Django settings for djangoevents project.
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'djangoevents.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static_files')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'yg2qqmd0xlo)1ybqq)cyxz-*7w*n^3ds_oo7j3^&q=#*a3b6qa'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoevents.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'djangoevents.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'core',
'events',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"luizfabiodacruz@gmail.com"
] |
luizfabiodacruz@gmail.com
|
2b40d9cd03bf736e19b0d5cf91b979b04c27b6f2
|
4f3e9294fff97ec4d14bfa43e45501a5ba53de00
|
/ver0028/py_subplot2.py
|
c752457f7da5ccf4fa5e05eaba84f239d2189e43
|
[] |
no_license
|
corrod/mittet_main
|
6e7264df1d93d8f00437d4ab153d9c906057ce13
|
4dc60eee8ca1daf9ef70fd10ed300b6fa6a1e67d
|
refs/heads/master
| 2021-01-06T20:38:30.150942
| 2015-02-26T05:17:44
| 2015-02-26T05:17:44
| 20,375,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
# coding:utf-8
from pylab import *
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
f = figure()
subplots_adjust(hspace=0.001)
ax1 = subplot(411)
data1 = np.loadtxt('./ex1010.d')
x1 = data1[:,0]
y1 = data1[:,1]
plt.plot(x1,y1)
ax2 = subplot(412)
data2 = np.loadtxt('./ex1020.d')
x2 = data2[:,0]
y2 = data2[:,1]
plt.plot(x2,y2)
ax3 = subplot(413)
data3 = np.loadtxt('./ex1030.d')
x3 = data3[:,0]
y3 = data3[:,1]
plt.plot(x3,y3)
# ax4 = subplot(414)
# data4 = np.loadtxt('./ex1040.d')
# x4 = data4[:,0]
# y4 = data4[:,1]
# plt.plot(x4,y4)
# ax5 = subplot(615)
# data5 = np.loadtxt('./hz1040.d')
# x5 = data5[:,0]
# y5 = data5[:,1]
# plt.plot(x5,y5)
# ax6 = subplot(616)
# data6 = np.loadtxt('./hz1050.d')
# x6 = data6[:,0]
# y6 = data6[:,1]
# plt.plot(x6,y6)
show()
# from pylab import *
# t = arange(0.0, 2.0, 0.01)
# s1 = sin(2*pi*t)
# s2 = exp(-t)
# s3 = s1*s2
# # axes rect in relative 0,1 coords left, bottom, width, height. Turn
# # off xtick labels on all but the lower plot
# f = figure()
# subplots_adjust(hspace=0.001)
# ax1 = subplot(311)
# ax1.plot(t,s1)
# yticks(arange(-0.9, 1.0, 0.4))
# ylim(-1,1)
# ax2 = subplot(312, sharex=ax1)
# ax2.plot(t,s2)
# yticks(arange(0.1, 1.0, 0.2))
# ylim(0,1)
# ax3 = subplot(313, sharex=ax1)
# ax3.plot(t,s3)
# yticks(arange(-0.9, 1.0, 0.4))
# ylim(-1,1)
# xticklabels = ax1.get_xticklabels()+ax2.get_xticklabels()
# setp(xticklabels, visible=False)
# show()
|
[
"takuya025@gmail.com"
] |
takuya025@gmail.com
|
a1638f815e06cb7c87660b8bb36c0ca73466c002
|
aa9f21f74196960ab57ffa0ff664b2a19de17e0b
|
/surrogate/files/moeaJSON.py
|
18cd24118fa15aa75333f0d8743d49d67f29db5d
|
[
"MIT"
] |
permissive
|
onlymezhong/Surrogate-Model
|
36a73a40f38bc9e4d86123a2a12f11a369195024
|
1e47b5eaff74ee3491ef436dd604fe2bc3af8f57
|
refs/heads/master
| 2021-04-28T22:54:48.947536
| 2016-12-22T15:38:09
| 2016-12-22T15:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,651
|
py
|
import json
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import cm
class JSON(object):
def __init__(self, fileName, numVar, numPop, numCon, numObj, numGen):
self.fileName = fileName
self.numPop = numPop
self.numVar = numVar
self.numCon = numCon
self.numObj = numObj
self.numGen = numGen
def writeHeader(self):
outFile = open(self.fileName, "wt")
outFile.write("{\n")
outFile.write("\"generation\": [\n")
outFile.close()
def writeEnd(self):
outFile = open(self.fileName, "a")
outFile.write("]\n}\n")
outFile.close()
def writePareto(self, individuals, igen):
outFile = open(self.fileName, "a")
outFile.write(" {\n")
outFile.write(" \"variable\" : [")
outFile.write("[%f" % (individuals[0].variable[0]))
for j in range(1, self.numVar):
outFile.write(",%f" % (individuals[0].variable[j]))
outFile.write("]")
for i in range(1, self.numPop):
outFile.write(",[%f" % (individuals[i].variable[0]))
for j in range(1, self.numVar):
outFile.write(",%f" % (individuals[i].variable[j]))
outFile.write("]")
outFile.write("],\n")
outFile.write(" \"objective\" : [[")
outFile.write("[%f" % (individuals[0].fitness.values[0]))
for j in range(1, self.numObj):
outFile.write(",%f" % (individuals[0].fitness.values[j]))
outFile.write("]")
for i in range(1, self.numPop):
outFile.write(",[%f" % (individuals[i].fitness.values[0]))
for j in range(1, self.numObj):
outFile.write(",%f" % (individuals[i].fitness.values[j]))
outFile.write("]")
outFile.write("]]")
if self.numCon > 0:
outFile.write(",")
outFile.write("\n")
if self.numCon > 0:
outFile.write(" \"constraint\" : [")
outFile.write("[%f" % (individuals[0].constraint[0]))
for j in range(1, self.numCon):
outFile.write(",%f" % (individuals[0].constraint[j]))
outFile.write("]")
for i in range(1, self.numPop):
outFile.write(",[%f" % (individuals[i].constraint[0]))
for j in range(1, self.numCon):
outFile.write(",%f" % (individuals[i].constraint[j]))
outFile.write("]")
outFile.write("]")
outFile.write("\n")
outFile.write(" }")
if igen < self.numGen - 1:
outFile.write(",")
outFile.write("\n")
outFile.close()
def plot_json(self):
with open(self.fileName) as data_file:
data = json.load(data_file)
gen = data["generation"]
gen_tot = len(gen)
color = iter(cm.gray(np.linspace(1, 0.1, gen_tot)))
# color = iter(cm.rainbow(np.linspace(0,1,gen_tot)))
for index, item in enumerate(gen):
obj = item["objective"][0]
obj_tot = len(obj)
x = []
y = []
r = index / gen_tot
g = index / gen_tot
b = index / gen_tot
for iobj in obj:
x.append(iobj[0])
y.append(iobj[1])
plt.plot(x, y, '.', color=next(color), label=str(index))
plt.title('moea.json')
plt.xlabel('obj1')
# plt.xlim([0.7,1.1])
plt.ylabel('obj2')
# plt.ylim([6,9])
plt.grid(True)
# plt.legend(loc='best')
plt.show()
|
[
"quanpan302@hotmail.com"
] |
quanpan302@hotmail.com
|
d67369c8830125c0b3fbd5e61988f907aa13951b
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/solem/examples/ch1_morphology.py
|
e0fe53f9ce60b8c8db29fecba431b2392b479042
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676
| 2022-11-26T20:35:21
| 2022-11-26T20:35:21
| 60,254,898
| 0
| 2
| null | 2023-03-24T22:58:39
| 2016-06-02T10:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
from __future__ import print_function
from PIL import Image
from numpy import *
from scipy.ndimage import measurements, morphology
"""
This is the morphology counting objects example in Section 1.4.
"""
# load image and threshold to make sure it is binary
im = array(Image.open('./data/houses.png').convert('L'))
im = (im < 128)
labels, nbr_objects = measurements.label(im)
print(("Number of objects:", nbr_objects))
# morphology - opening to separate objects better
im_open = morphology.binary_opening(im, ones((9, 5)), iterations=2)
labels_open, nbr_objects_open = measurements.label(im_open)
print(("Number of objects:", nbr_objects_open))
|
[
"gmonkman@mistymountains.biz"
] |
gmonkman@mistymountains.biz
|
1ff723030c57bb8f1374e9cf2945a6d7a1a2132d
|
fd8f429f146ae2e4455b5f9a19dfdfec9dcaaabf
|
/utils/fairseq_mod/fairseq_mod/modules/multi_head_attention_temp.py
|
206260b43ac447601c45434adb4acedd15a7021b
|
[
"MIT"
] |
permissive
|
mayank-k-jha/Knowledge-Distillation-Toolkit
|
274448a10b7f9108bf886120481137e77eb6c348
|
2d4a2b87c757462a980f0ed0c2a7a9ccea0be683
|
refs/heads/main
| 2023-04-02T21:50:51.083071
| 2021-03-19T21:22:12
| 2021-03-19T21:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,605
|
py
|
"""
We simplified the logics in torch.nn.functional.multi_head_attention_forward, and only kept operations that are relevant to wav2vec 2.0.
"""
from __future__ import division
import warnings
import math
import torch
from torch._C import _infer_size, _add_docstr
from torch.nn import _reduction as _Reduction
from torch.nn.modules import utils
from torch.nn.modules.utils import _single, _pair, _triple, _list_with_default
from torch.nn import grad # noqa: F401
from torch import _VF
from torch._jit_internal import boolean_dispatch, List, Optional, _overload
from torch._overrides import has_torch_function, handle_torch_function
from torch.nn.functional import linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attention_forward_temp(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
q_proj_bias = None,
k_proj_bias = None,
v_proj_bias = None
):
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
# !!! Assume use_separate_proj_weight = True
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
# !!! Did not indexing in_proj_bias because biases are supplied separately
q = linear(query, q_proj_weight_non_opt, q_proj_bias)
k = linear(key, k_proj_weight_non_opt, k_proj_bias)
v = linear(value, v_proj_weight_non_opt, v_proj_bias)
q = q * scaling
# !!! Assumed that attn_mask is None
# !!! Assumed that key_padding_mask.dtype = torch.Bool
# !!! Assumed that bias_k and bias_v are None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
# !!! Assumed add_zero_attn is False
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
|
[
"zilunpeng@gmail.com"
] |
zilunpeng@gmail.com
|
76f9d4a607fd502c71c1b37b1e0d92d48b6e5780
|
6c14069181f313e84eeb524dd495e3882156ef50
|
/samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-ip-domain-cfg/nc-read-xr-ip-domain-cfg-10-ydk.py
|
f4b17cee4430379ce46492911c842f1526de80e5
|
[
"Apache-2.0"
] |
permissive
|
decolnz/ydk-py-samples
|
dde0fd64fd4df12a215588766a0f1fb8baf07fcd
|
7fa3f53c4d458c3332d372fb2fe3c46c5e036f07
|
refs/heads/master
| 2021-01-19T03:24:19.877929
| 2017-04-04T17:16:46
| 2017-04-04T17:16:46
| 87,310,389
| 1
| 0
| null | 2017-04-05T13:06:57
| 2017-04-05T13:06:57
| null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read all data for model Cisco-IOS-XR-ip-domain-cfg.
usage: nc-read-xr-ip-domain-cfg-10-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import logging
def process_ip_domain(ip_domain):
"""Process data in ip_domain object."""
pass
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
ip_domain = xr_ip_domain_cfg.IpDomain() # create object
# read data from NETCONF device
# ip_domain = crud.read(provider, ip_domain)
process_ip_domain(ip_domain) # process object data
provider.close()
exit()
# End of script
|
[
"saalvare@cisco.com"
] |
saalvare@cisco.com
|
f312c517f23c163fd877ebd3e6cd379560543899
|
4df3712caff818c0554e7fbe4b97dee5fcfd8675
|
/testCase/is/test_registerUsingPOST.py
|
b24a2919f9393b559958aa146eef8ac54bfcca4d
|
[] |
no_license
|
Qingyaya/interface
|
456057a740bd77ba6c38eda27dd1aef658e0add9
|
3ae37816f52ad8c45e192596a854848d8e546b14
|
refs/heads/master
| 2020-03-22T07:16:04.171904
| 2018-12-05T05:20:25
| 2018-12-05T05:20:25
| 139,690,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
#-*-coding:utf-8-*-
import unittest
import paramunittest
from common import myHttp
from common.get_csv import *
from common.Log import Log
from common.checkResult import checkResult
import os
apifile,datapath,casename=get_dataname(os.path.abspath(__file__))
load_csv=get_testdata(datapath)
package=get_package(os.path.abspath(__file__))
@paramunittest.parametrized(*load_csv)
class test_registerUsingPOST(unittest.TestCase):
def setParameters(self,caseId,caseName,assertKey,assertValue,params):
self.caseId=caseId
self.caseName=caseName
self.assertKey=assertKey
self.assertValue=assertValue
self.params=eval(params)
def setUp(self):
self.url=get_url(apifile,casename)
self.log=Log()
self.log.build_start_line(self.caseId+ ":"+ self.caseName)
def test_registerUsingPOST(self):
u"""eps_云超市用户注册"""
self.re=myHttp.post(self.url,self.params,package)
checkResult().ck(self.caseId,self.caseName,self.assertKey,self.assertValue,self.params,self.url,self.re)
def tearDown(self):
self.log.build_end_line(self.caseId +":"+ self.caseName)
if __name__ == "__main__":
unittest.main()
|
[
"dongchunyi@idscloud.cn"
] |
dongchunyi@idscloud.cn
|
22e90031b1c24c768d56487151773a8de59326f2
|
88e03e66109adb6325ccace96f37b31e15c5e86c
|
/docopt/example1.py
|
91b2d16a7304e135fafe8a1dc1496d307bbc53e8
|
[] |
no_license
|
abevieiramota/learning-python
|
53ee5d158af33f627c65a7d3960083a1242713ed
|
c9dfa37e5dd547ab03d1ff67932ff28be70bfbeb
|
refs/heads/master
| 2021-01-10T21:05:48.021976
| 2014-07-11T20:28:03
| 2014-07-11T20:28:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
"""Naval Fate.
Usage:
naval_fate.py ship new <name>...
naval_fate.py ship <name> move <x> <y> [--speed=<kn>]
naval_fate.py ship shoot <x> <y>
naval_fate.py mine (set|remove) <x> <y> [--moored|--drifting]
naval_fate.py -h | --help
naval_fate.py --version
Options:
-h --help Show this screen.
--version Show version.
--speed=<kn> Speed in knots [default: 10].
--moored Moored (anchored) mine.
--drifting Drifting mine."""
from docopt import docopt
args = docopt(__doc__, version='1.2.3.4')
print args
|
[
"abevieiramota@gmail.com"
] |
abevieiramota@gmail.com
|
a01ada960931c5a2d450ca04cd612c3e4ced1d26
|
4d2a789ccc9fac1380b80b558569e29a71980c0f
|
/backend/model/open_chat/chatterbot_chat/chattrbot_chat.py
|
ef256414eafbdae32a642ceff4bfb9c5f2f4a4f0
|
[] |
no_license
|
zhuxianzhong/libot
|
d493853ca847498b7ea12fc22b62c8a4f831dd62
|
59f70c4e41e36f3743bd659d0fb038bedb40be72
|
refs/heads/master
| 2020-07-13T12:53:11.850328
| 2019-08-28T12:41:16
| 2019-08-28T12:41:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
# -*- coding: utf-8 -*-
# File: chatterbot_chat.py
# Author: Hualong Zhang <nankaizhl@gmail.com>
# CreateDate: 19-03-07
import os
import sys
# 模块路径引用统一回退到Libbot目录下
project_path = os.path.abspath(os.path.join(os.getcwd(), "../.."))
sys.path.append(project_path)
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
import logging
# logging.basicConfig(level=logging.INFO)
class ChatterbotChat():
@classmethod
def create_chatterbot(cls):
"""
用语料训练一个chatbot
:return:
"""
cn_chatter = ChatBot("National Lib Chatter",
storage_adapter='chatterbot.storage.SQLStorageAdapter',
input_adapter='chatterbot.input.TerminalAdapter',
output_adapter='chatterbot.output.TerminalAdapter',
logic_adapters=[
'chatterbot.logic.BestMatch',
'chatterbot.logic.MathematicalEvaluation',
],
database='./db.sqlite3'
)
trainer = ChatterBotCorpusTrainer(cn_chatter)
trainer.train('./xiaohuangji_chatterbot_corpus.json')
# trainer.export_for_training('./my_export.json')
return cn_chatter
@classmethod
def load_chatterbot(cls):
"""
加载训练好的bot
:return:
"""
cn_chatterbot = ChatBot('National Lib Chatter',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
input_adapter = 'chatterbot.input.TerminalAdapter',
output_adapter = 'chatterbot.output.TerminalAdapter',
logic_adapters = [
'chatterbot.logic.BestMatch',
'chatterbot.logic.MathematicalEvaluation',
],
database = './db.sqlite3'
)
return cn_chatterbot
if __name__ == '__main__':
test_chatter = ChatterbotChat.create_chatterbot()
test_chatter = ChatterbotChat.load_chatterbot()
while True:
try:
user_input = input('USER:')
response = test_chatter.get_response(user_input)
print('BOT:', response)
# 直到按ctrl-c 或者 ctrl-d 才会退出
except (KeyboardInterrupt, EOFError, SystemExit):
break
|
[
"nankaizhl@gmail.com"
] |
nankaizhl@gmail.com
|
190a82839258ab6b37fcae6033559509b366b1d2
|
ea57ef44636ce151b3ef5322466cdfcb02482515
|
/tests/formatting/test_formatter.py
|
14106396206bd4fd77b1e2c26fe6f06344ede303
|
[
"MIT"
] |
permissive
|
Sn3akyP3t3/pendulum
|
acb3dc5067576c4569a08b1d8a8ecfce918b4724
|
7ce170bdc64199d74e09e347402983f1bb015f63
|
refs/heads/master
| 2020-03-22T01:15:01.160870
| 2018-07-01T15:49:09
| 2018-07-01T15:49:09
| 139,292,657
| 0
| 0
|
MIT
| 2018-07-01T01:46:00
| 2018-07-01T01:46:00
| null |
UTF-8
|
Python
| false
| false
| 7,274
|
py
|
# -*- coding: utf-8 -*-
import pytest
import pendulum
from pendulum.formatting import Formatter
from pendulum.locales.locale import Locale
@pytest.fixture(autouse=True)
def setup():
Locale._cache['dummy'] = {}
yield
del Locale._cache['dummy']
def test_year_tokens():
d = pendulum.datetime(2009, 1, 14, 15, 25, 50, 123456)
f = Formatter()
assert f.format(d, 'YYYY') == '2009'
assert f.format(d, 'YY') == '09'
assert f.format(d, 'Y') == '2009'
def test_quarter_tokens():
f = Formatter()
d = pendulum.datetime(1985, 1, 4)
assert f.format(d, 'Q') == '1'
d = pendulum.datetime(2029, 8, 1)
assert f.format(d, 'Q') == '3'
d = pendulum.datetime(1985, 1, 4)
assert f.format(d, 'Qo') == '1st'
d = pendulum.datetime(2029, 8, 1)
assert f.format(d, 'Qo') == '3rd'
d = pendulum.datetime(1985, 1, 4)
assert f.format(d, 'Qo', locale='fr') == '1er'
d = pendulum.datetime(2029, 8, 1)
assert f.format(d, 'Qo', locale='fr') == '3e'
def test_month_tokens():
f = Formatter()
d = pendulum.datetime(2016, 3, 24)
assert f.format(d, 'MM') == '03'
assert f.format(d, 'M') == '3'
assert f.format(d, 'MMM') == 'Mar'
assert f.format(d, 'MMMM') == 'March'
assert f.format(d, 'Mo') == '3rd'
assert f.format(d, 'MMM', locale='fr') == 'mars'
assert f.format(d, 'MMMM', locale='fr') == 'mars'
assert f.format(d, 'Mo', locale='fr') == '3e'
def test_day_tokens():
f = Formatter()
d = pendulum.datetime(2016, 3, 7)
assert f.format(d, 'DD') == '07'
assert f.format(d, 'D') == '7'
assert f.format(d, 'Do') == '7th'
assert f.format(d.first_of('month'), 'Do') == '1st'
assert f.format(d, 'Do', locale='fr') == '7e'
assert f.format(d.first_of('month'), 'Do', locale='fr') == '1er'
def test_day_of_year():
f = Formatter()
d = pendulum.datetime(2016, 8, 28)
assert f.format(d, 'DDDD') == '241'
assert f.format(d, 'DDD') == '241'
assert f.format(d.start_of('year'), 'DDDD') == '001'
assert f.format(d.start_of('year'), 'DDD') == '1'
assert f.format(d, 'DDDo') == '241st'
assert f.format(d.add(days=3), 'DDDo') == '244th'
assert f.format(d, 'DDDo', locale='fr') == '241e'
assert f.format(d.add(days=3), 'DDDo', locale='fr') == '244e'
def test_week_of_year():
f = Formatter()
d = pendulum.datetime(2016, 8, 28)
assert f.format(d, 'wo') == '34th'
def test_day_of_week():
f = Formatter()
d = pendulum.datetime(2016, 8, 28)
assert f.format(d, 'd') == '0'
assert f.format(d, 'dd') == 'Sun'
assert f.format(d, 'ddd') == 'Sun'
assert f.format(d, 'dddd') == 'Sunday'
assert f.format(d, 'dd', locale='fr') == 'dim.'
assert f.format(d, 'ddd', locale='fr') == 'dim.'
assert f.format(d, 'dddd', locale='fr') == 'dimanche'
assert f.format(d, 'do') == '0th'
def test_am_pm():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 23)
assert f.format(d, 'A') == 'PM'
assert f.format(d.set(hour=11), 'A') == 'AM'
def test_hour():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7)
assert f.format(d, 'H') == '7'
assert f.format(d, 'HH') == '07'
d = pendulum.datetime(2016, 8, 28, 0)
assert f.format(d, 'h') == '12'
assert f.format(d, 'hh') == '12'
def test_minute():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3)
assert f.format(d, 'm') == '3'
assert f.format(d, 'mm') == '03'
def test_second():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6)
assert f.format(d, 's') == '6'
assert f.format(d, 'ss') == '06'
def test_fractional_second():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456)
assert f.format(d, 'S') == '1'
assert f.format(d, 'SS') == '12'
assert f.format(d, 'SSS') == '123'
assert f.format(d, 'SSSS') == '1234'
assert f.format(d, 'SSSSS') == '12345'
assert f.format(d, 'SSSSSS') == '123456'
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 0)
assert f.format(d, 'S') == '0'
assert f.format(d, 'SS') == '00'
assert f.format(d, 'SSS') == '000'
assert f.format(d, 'SSSS') == '0000'
assert f.format(d, 'SSSSS') == '00000'
assert f.format(d, 'SSSSSS') == '000000'
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123)
assert f.format(d, 'S') == '0'
assert f.format(d, 'SS') == '00'
assert f.format(d, 'SSS') == '000'
assert f.format(d, 'SSSS') == '0001'
assert f.format(d, 'SSSSS') == '00012'
assert f.format(d, 'SSSSSS') == '000123'
def test_timezone():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456, tz='Europe/Paris')
assert f.format(d, 'zz') == 'CEST'
assert f.format(d, 'z') == 'Europe/Paris'
d = pendulum.datetime(2016, 1, 28, 7, 3, 6, 123456, tz='Europe/Paris')
assert f.format(d, 'zz') == 'CET'
assert f.format(d, 'z') == 'Europe/Paris'
def test_timezone_offset():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456, tz='Europe/Paris')
assert f.format(d, 'ZZ') == '+0200'
assert f.format(d, 'Z') == '+02:00'
d = pendulum.datetime(2016, 1, 28, 7, 3, 6, 123456, tz='Europe/Paris')
assert f.format(d, 'ZZ') == '+0100'
assert f.format(d, 'Z') == '+01:00'
d = pendulum.datetime(2016, 1, 28, 7, 3, 6, 123456, tz='America/Guayaquil')
assert f.format(d, 'ZZ') == '-0500'
assert f.format(d, 'Z') == '-05:00'
def test_timestamp():
f = Formatter()
d = pendulum.datetime(1970, 1, 1)
assert f.format(d, 'X') == '0'
assert f.format(d.add(days=1), 'X') == '86400'
def test_date_formats():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456)
assert f.format(d, 'LT') == '7:03 AM'
assert f.format(d, 'LTS') == '7:03:06 AM'
assert f.format(d, 'L') == '08/28/2016'
assert f.format(d, 'LL') == 'August 28, 2016'
assert f.format(d, 'LLL') == 'August 28, 2016 7:03 AM'
assert f.format(d, 'LLLL') == 'Sunday, August 28, 2016 7:03 AM'
assert f.format(d, 'LT', locale='fr') == '07:03'
assert f.format(d, 'LTS', locale='fr') == '07:03:06'
assert f.format(d, 'L', locale='fr') == '28/08/2016'
assert f.format(d, 'LL', locale='fr') == u'28 août 2016'
assert f.format(d, 'LLL', locale='fr') == u'28 août 2016 07:03'
assert f.format(d, 'LLLL', locale='fr') == u'dimanche 28 août 2016 07:03'
def test_escape():
f = Formatter()
d = pendulum.datetime(2016, 8, 28)
assert f.format(d, '[YYYY] YYYY \[YYYY\]') == 'YYYY 2016 [2016]'
assert f.format(d, '\D D \\\D') == 'D 28 \\28'
def test_date_formats_missing():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456)
assert f.format(d, 'LT', locale='dummy') == '7:03 AM'
assert f.format(d, 'LTS', locale='dummy') == '7:03:06 AM'
assert f.format(d, 'L', locale='dummy') == '08/28/2016'
assert f.format(d, 'LL', locale='dummy') == 'August 28, 2016'
assert f.format(d, 'LLL', locale='dummy') == 'August 28, 2016 7:03 AM'
assert f.format(d, 'LLLL', locale='dummy') == 'Sunday, August 28, 2016 7:03 AM'
def test_unknown_token():
f = Formatter()
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456)
assert f.format(d, 'J') == 'J'
|
[
"sebastien@eustace.io"
] |
sebastien@eustace.io
|
8bb167bb9814265161c1b8eeddb4dcacc76b6bbf
|
87879af0c48875acc14d7b31842f403cb29d93e3
|
/isitfit/cli/core.py
|
74f848a438e7cb075a711fcbda411d875f9bc413
|
[
"Apache-2.0"
] |
permissive
|
pgupta35/isitfit
|
bfcec5e03778837c7040a45fbc710c7d8dcc5366
|
db69f0501486346e3794f1013ed0c57ce3895b8c
|
refs/heads/master
| 2020-09-14T16:50:23.454724
| 2019-11-20T17:39:40
| 2019-11-20T17:39:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
# RuntimeError: Click will abort further execution because Python 3 was configured to use ASCII as encoding for the environment.
# Consult https://click.palletsprojects.com/en/7.x/python3/ for mitigation steps.
#
# Edit 2019-10-08: whatsapp's wadebug uses "click.disable_unicode_literals_warning = True"
# Ref: https://github.com/WhatsApp/WADebug/blob/958ac37be804cc732ae514d4872b93d19d197a5c/wadebug/cli.py#L23
from ..utils import mysetlocale
mysetlocale()
import logging
logger = logging.getLogger('isitfit')
import click
from .. import isitfit_version
# For the --share-email "multiple options"
# https://click.palletsprojects.com/en/7.x/options/#multiple-options
@click.group(invoke_without_command=True)
@click.option('--debug', is_flag=True, help='Display more details to help with debugging')
@click.option('--optimize', is_flag=True, help='DEPRECATED: use "isitfit cost optimize" instead')
@click.option('--version', is_flag=True, help='DEPRECATED: use "isitfit version" instead')
@click.option('--share-email', multiple=True, help='Share result to email address')
@click.option('--skip-check-upgrade', is_flag=True, help='Skip step for checking for upgrade of isitfit')
@click.pass_context
def cli_core(ctx, debug, optimize, version, share_email, skip_check_upgrade):
logLevel = logging.DEBUG if debug else logging.INFO
ch = logging.StreamHandler()
ch.setLevel(logLevel)
logger.addHandler(ch)
logger.setLevel(logLevel)
if debug:
logger.debug("Enabled debug level")
logger.debug("-------------------")
# After adding the separate command for "cost" (i.e. `isitfit cost analyze`)
# putting a note here to notify user of new usage
# Ideally, this code would be deprecated though
if ctx.invoked_subcommand is None:
# if still used without subcommands, notify user of new usage
#from .cost import analyze as cost_analyze, optimize as cost_optimize
#if optimize:
# ctx.invoke(cost_optimize, filter_tags=filter_tags, n=n)
#else:
# ctx.invoke(cost_analyze, filter_tags=filter_tags)
from click.exceptions import UsageError
if optimize:
raise UsageError("As of version 0.11, please use `isitfit cost optimize` instead of `isitfit --optimize`.")
elif version:
# ctx.invoke(cli_version)
raise UsageError("As of version 0.11, please use `isitfit version` instead of `isitfit --version`.")
else:
raise UsageError("As of version 0.11, please use `isitfit cost analyze` instead of `isitfit` to calculate the cost-weighted utilization.")
# make sure that context is a dict
ctx.ensure_object(dict)
# check if emailing requested
if share_email is not None:
max_n_recipients = 3
if len(share_email) > max_n_recipients:
from click.exceptions import BadParameter
raise BadParameter("Maximum allowed number of email recipients is %i. Received %i"%(max_n_recipients, len(share_email)), param_hint="--share-email")
ctx.obj['share_email'] = share_email
# check if current version is out-of-date
if not skip_check_upgrade:
from ..utils import prompt_upgrade
is_outdated = prompt_upgrade('isitfit', isitfit_version)
ctx.obj['is_outdated'] = is_outdated
from .tags import tags as cli_tags
from .cost import cost as cli_cost
from .version import version as cli_version
cli_core.add_command(cli_version)
cli_core.add_command(cli_cost)
cli_core.add_command(cli_tags)
#-----------------------
if __name__ == '__main__':
cli_core()
|
[
"shadiakiki1986@gmail.com"
] |
shadiakiki1986@gmail.com
|
4f1e75369d5f0420fdc4f28eb1188153e869a7d9
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/B_HashTable/Basic/L0_1282_Group_the_People_Given_the_Group_Size_They_Belong_To.py
|
92ea13493ced4cb225285f580300bf6fa20a0bcb
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 440
|
py
|
""" https://leetcode.com/problems/group-the-people-given-the-group-size-they-belong-to/
"""
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
cnt = defaultdict(list)
for i, x in enumerate(groupSizes):
cnt[x].append(i)
ans = []
for k, v in cnt.items():
for i in range(0, len(v), k):
ans.append(v[i:i+k])
return ans
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
4a59b66d1aab3d4e8c93955f5bad4b91c49bb18f
|
b0a350df2e6eef86b753e2eff9e57b21bd113019
|
/users/models.py
|
9ea4638742a4c278f8a25a672944e04bdd4c7bab
|
[] |
no_license
|
fazer1929/Go-Collab
|
8ba92ce926bb163e50ffcf3eb4eecb29d8e0a94b
|
75efb243ee6143603026dee45ca73aabf8d9ddff
|
refs/heads/main
| 2023-02-24T01:08:25.440449
| 2021-01-30T21:50:08
| 2021-01-30T21:50:08
| 334,199,821
| 0
| 0
| null | 2021-01-30T21:30:58
| 2021-01-29T16:24:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,null=True,blank=True)
bio = models.TextField(max_length=500, blank=True)
skills = models.CharField(max_length=40,blank=True)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
[
"abhishekagrawal8888@gmail.com"
] |
abhishekagrawal8888@gmail.com
|
69ac7f2c16d34374a622fb6417163aa64a013b79
|
1df82fa8ef888b74fb9095c9ade89e16895366b1
|
/14.Lambdas and Buit In Functions - Exercise/06. Unique Numbers.py
|
6980fd7dc051a806142c78f6ce701771e8ffbacc
|
[] |
no_license
|
filipov73/python_advanced_january_2020
|
868eb4bc365f7774c373183760e7ac584e1bd20c
|
a5e24190ee08bd1a0534dc04f91a5ba1927d1b19
|
refs/heads/master
| 2020-11-26T14:07:12.423309
| 2020-02-23T15:20:13
| 2020-02-23T15:20:13
| 229,097,988
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
numbers = [float(x) for x in input().split()]
round_numbers = sorted(set(map(round, numbers)))
print(min(round_numbers))
print(max(round_numbers))
mul_x_3 = [x * 3 for x in round_numbers]
print(" ".join(map(str, mul_x_3)))
# min and max and multiply the numbers by 3.
# Print only the unique numbers in ascending order separated by space
|
[
"m_filipov@yahoo.com"
] |
m_filipov@yahoo.com
|
73ce286191945ea0d7b3750e084b44c850201d99
|
bbf7787d94e97d4e0c9bceb46203c08939e6e67d
|
/python_test_case/Data.py
|
de2ca81bcab87030d2f956c769b3e395ba99f41f
|
[] |
no_license
|
llanoxdewa/python
|
076e6fa3ed2128c21cdd26c1be6bc82ee6917f9c
|
6586170c5f48827a5e1bcb35656870b5e4eed732
|
refs/heads/main
| 2023-06-16T05:31:52.494796
| 2021-07-09T09:04:30
| 2021-07-09T09:04:30
| 362,782,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
class Siswa:
def __init__(self,nama,kelas,nilai):
self.__nama = nama
self.__kelas = kelas
self.__nilai = nilai
def hasil(self):
return {
'nama':self.__nama,
'kelas':self.__kelas,
'nilai':self.__nilai
}
def getNama(self):
return self.__nama
def getKelas(self):
return self.__kelas
def getNilai(self):
return self.__nilai
if __name__=='__main__':
siswa1 = Siswa('llano','11 elektro 1',100)
siswa2 = Siswa('ujang','10 Las 3',95)
print(siswa1.hasil())
print(siswa2.hasil())
|
[
"llanoxdew4@gmail.com"
] |
llanoxdew4@gmail.com
|
8bd5f7297672cd1bcc6f7bac4d676841b7226b48
|
f5c3fde6a1f0825ef03c73c659ad61a9ca15eac9
|
/backend/course/api/v1/urls.py
|
d3fda365b5a60402915d8df5400dd7132d425b80
|
[] |
no_license
|
crowdbotics-apps/meira-20558
|
654cf2c71b7c1bb22a419dc4eb20f41391ca20bb
|
2ff22fb39ca7c380389524805c21b70bf4c53611
|
refs/heads/master
| 2022-12-25T00:22:10.427658
| 2020-09-22T12:23:05
| 2020-09-22T12:23:05
| 297,630,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
RecordingViewSet,
EventViewSet,
SubscriptionViewSet,
CourseViewSet,
GroupViewSet,
ModuleViewSet,
PaymentMethodViewSet,
SubscriptionTypeViewSet,
EnrollmentViewSet,
LessonViewSet,
CategoryViewSet,
)
router = DefaultRouter()
router.register("paymentmethod", PaymentMethodViewSet)
router.register("category", CategoryViewSet)
router.register("module", ModuleViewSet)
router.register("lesson", LessonViewSet)
router.register("subscriptiontype", SubscriptionTypeViewSet)
router.register("enrollment", EnrollmentViewSet)
router.register("event", EventViewSet)
router.register("subscription", SubscriptionViewSet)
router.register("recording", RecordingViewSet)
router.register("group", GroupViewSet)
router.register("course", CourseViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f030a759d8cdbb9ea7ca151b2e0defbfaa9762d6
|
48894ae68f0234e263d325470178d67ab313c73e
|
/sa/profiles/DLink/DGS3100/get_interface_status.py
|
c9e8246ac2d799ba19927cd9ff87b7157fcd836d
|
[
"BSD-3-Clause"
] |
permissive
|
DreamerDDL/noc
|
7f949f55bb2c02c15ac2cc46bc62d957aee43a86
|
2ab0ab7718bb7116da2c3953efd466757e11d9ce
|
refs/heads/master
| 2021-05-10T18:22:53.678588
| 2015-06-29T12:28:20
| 2015-06-29T12:28:20
| 118,628,133
| 0
| 0
| null | 2018-01-23T15:19:51
| 2018-01-23T15:19:51
| null |
UTF-8
|
Python
| false
| false
| 2,088
|
py
|
# -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## DLink.DGS3100.get_interface_status
##----------------------------------------------------------------------
## Copyright (C) 2007-2014 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
"""
"""
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces import IGetInterfaceStatus
import re
class Script(NOCScript):
name = "DLink.DGS3100.get_interface_status"
implements = [IGetInterfaceStatus]
rx_line = re.compile(
r"^\s*(?P<interface>\S+)\s+(Enabled|Disabled)\s+\S+\s+"
r"(?P<status>.+)\s+(Enabled|Disabled)\s*$",
re.IGNORECASE | re.MULTILINE)
def execute(self, interface=None):
# Not tested. Must be identical in different vendors
if self.snmp and self.access_profile.snmp_ro:
try:
# Get interface status
r = []
# IF-MIB::ifName, IF-MIB::ifOperStatus
for n, s in self.snmp.join_tables(
"1.3.6.1.2.1.31.1.1.1.1",
"1.3.6.1.2.1.2.2.1.8", bulk=True):
if not n.startswith("802.1Q Encapsulation Tag"):
if interface is not None and interface == n:
r += [{"interface": n, "status": int(s) == 1}]
else:
r += [{"interface": n, "status": int(s) == 1}]
return r
except self.snmp.TimeOutError:
pass
# Fallback to CLI
if interface is None:
interface = "all"
try:
s = self.cli("show ports %s" % interface)
except self.CLISyntaxError:
raise self.NotSupportedError()
r = []
for match in self.rx_line.finditer(s):
r += [{
"interface": match.group("interface"),
"status": match.group("status").strip() != "Link Down"
}]
return r
|
[
"dmitryluhtionov@gmail.com"
] |
dmitryluhtionov@gmail.com
|
72d33eb12312e3ec1ce19ff5b0ab6f6eae075df6
|
1d01149498a26385eb0e47b35a1045d37a1584b4
|
/pyntcloud/io/ply.py
|
938cdcd52aabd2ce9263e5e594e767cec8d040fe
|
[
"Unlicense"
] |
permissive
|
iindovina/pyntcloud
|
f54c0dc528385bbeb1d37952abbfe631594e7060
|
e2499f7f1a897d3ff0b526a12b88f3914a663067
|
refs/heads/master
| 2021-07-21T07:35:19.129049
| 2017-10-31T14:34:23
| 2017-10-31T14:34:23
| 107,396,333
| 0
| 0
| null | 2017-10-18T11:03:46
| 2017-10-18T11:03:46
| null |
UTF-8
|
Python
| false
| false
| 7,055
|
py
|
# HAKUNA MATATA
import sys
import numpy as np
import pandas as pd
from collections import defaultdict
ply_dtypes = dict([
(b'int8', 'i1'),
(b'char', 'i1'),
(b'uint8', 'u1'),
(b'uchar', 'b1'),
(b'uchar', 'u1'),
(b'int16', 'i2'),
(b'short', 'i2'),
(b'uint16', 'u2'),
(b'ushort', 'u2'),
(b'int32', 'i4'),
(b'int', 'i4'),
(b'uint32', 'u4'),
(b'uint', 'u4'),
(b'float32', 'f4'),
(b'float', 'f4'),
(b'float64', 'f8'),
(b'double', 'f8')
])
valid_formats = {'ascii': '', 'binary_big_endian': '>',
'binary_little_endian': '<'}
def read_ply(filename):
""" Read a .ply (binary or ascii) file and store the elements in pandas DataFrame
Parameters
----------
filename: str
Path tho the filename
Returns
-------
data: dict
Elements as pandas DataFrames; comments and ob_info as list of string
"""
with open(filename, 'rb') as ply:
if b'ply' not in ply.readline():
raise ValueError('The file does not start whith the word ply')
# get binary_little/big or ascii
fmt = ply.readline().split()[1].decode()
# get extension for building the numpy dtypes
ext = valid_formats[fmt]
line = []
dtypes = defaultdict(list)
count = 2
points_size = None
mesh_size = None
while b'end_header' not in line and line != b'':
line = ply.readline()
if b'element' in line:
line = line.split()
name = line[1].decode()
size = int(line[2])
if name == "vertex":
points_size = size
elif name == "face":
mesh_size = size
elif b'property' in line:
line = line.split()
# element mesh
if b'list' in line:
mesh_names = ['n_points', 'v1', 'v2', 'v3']
if fmt == "ascii":
# the first number has different dtype than the list
dtypes[name].append(
(mesh_names[0], ply_dtypes[line[2]]))
# rest of the numbers have the same dtype
dt = ply_dtypes[line[3]]
else:
# the first number has different dtype than the list
dtypes[name].append(
(mesh_names[0], ext + ply_dtypes[line[2]]))
# rest of the numbers have the same dtype
dt = ext + ply_dtypes[line[3]]
for j in range(1, 4):
dtypes[name].append((mesh_names[j], dt))
else:
if fmt == "ascii":
dtypes[name].append(
(line[2].decode(), ply_dtypes[line[1]]))
else:
dtypes[name].append(
(line[2].decode(), ext + ply_dtypes[line[1]]))
count += 1
# for bin
end_header = ply.tell()
data = {}
if fmt == 'ascii':
top = count
bottom = 0 if mesh_size is None else mesh_size
names = [x[0] for x in dtypes["vertex"]]
data["points"] = pd.read_csv(filename, sep=" ", header=None, engine="python",
skiprows=top, skipfooter=bottom, usecols=names, names=names)
for n, col in enumerate(data["points"].columns):
data["points"][col] = data["points"][col].astype(
dtypes["vertex"][n][1])
if mesh_size is not None:
top = count + points_size
names = [x[0] for x in dtypes["face"]][1:]
usecols = [1, 2, 3]
data["mesh"] = pd.read_csv(
filename, sep=" ", header=None, engine="python", skiprows=top, usecols=usecols, names=names)
for n, col in enumerate(data["mesh"].columns):
data["mesh"][col] = data["mesh"][col].astype(
dtypes["face"][n + 1][1])
else:
with open(filename, 'rb') as ply:
ply.seek(end_header)
data["points"] = pd.DataFrame(np.fromfile(
ply, dtype=dtypes["vertex"], count=points_size))
if mesh_size is not None:
data["mesh"] = pd.DataFrame(np.fromfile(
ply, dtype=dtypes["face"], count=mesh_size))
data["mesh"].drop('n_points', axis=1, inplace=True)
return data
def write_ply(filename, points=None, mesh=None, as_text=False):
"""
Parameters
----------
filename: str
The created file will be named with this
points: ndarray
mesh: ndarray
as_text: boolean
Set the write mode of the file. Default: binary
Returns
-------
boolean
True if no problems
"""
if not filename.endswith('ply'):
filename += '.ply'
# open in text mode to write the header
with open(filename, 'w') as ply:
header = ['ply']
if as_text:
header.append('format ascii 1.0')
else:
header.append('format binary_' + sys.byteorder + '_endian 1.0')
if points is not None:
header.extend(describe_element('vertex', points))
if mesh is not None:
mesh = mesh.copy()
mesh.insert(loc=0, column="n_points", value=3)
mesh["n_points"] = mesh["n_points"].astype("u1")
header.extend(describe_element('face', mesh))
header.append('end_header')
for line in header:
ply.write("%s\n" % line)
if as_text:
if points is not None:
points.to_csv(filename, sep=" ", index=False, header=False, mode='a',
encoding='ascii')
if mesh is not None:
mesh.to_csv(filename, sep=" ", index=False, header=False, mode='a',
encoding='ascii')
else:
# open in binary/append to use tofile
with open(filename, 'ab') as ply:
if points is not None:
points.to_records(index=False).tofile(ply)
if mesh is not None:
mesh.to_records(index=False).tofile(ply)
return True
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int vertex_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element
|
[
"daviddelaiglesiacastro@gmail.com"
] |
daviddelaiglesiacastro@gmail.com
|
e913c4515484c39cb110b8eeb50f63d53599c617
|
cec916f882afbd09fe68f6b88879e68eaea976f6
|
/bigmler/options/multilabel.py
|
c7cad984064814fe938c6fa487987c77e151eef4
|
[
"Apache-2.0"
] |
permissive
|
jaor/bigmler
|
d86db6d7950768d7ba3e21b5f29bc265467f4cad
|
bbf221e41ef04e8d37a511a35a63216b64689449
|
refs/heads/master
| 2023-04-26T12:07:49.428263
| 2023-04-12T15:22:20
| 2023-04-12T15:22:20
| 15,663,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,530
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2023 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer multi-label processing
"""
def get_multi_label_options(defaults=None):
"""Multi-label-related options
"""
if defaults is None:
defaults = {}
options = {
# Multi-label labels. If set, only the given labels are expanded
'--labels': {
'action': 'store',
'dest': 'labels',
'default': defaults.get('labels', None),
'help': ("Comma-separated list of the labels"
" to be expanded from a multi-label field.")},
# Multi-label label separator. Separator used when splitting labels in
# the objective field.
'--label-separator': {
'action': 'store',
'dest': 'label_separator',
'default': defaults.get('label_separator', None),
'help': ("Separator used when splitting labels in the"
" objective field.")},
# Multi-label fields. Comma-separated list of fields that should be
# treated as being multi-label fields.
# Either its name or column number.
'--multi-label-fields': {
'action': 'store',
'dest': 'multi_label_fields',
'default': defaults.get('multi_label_fields', None),
'help': ("Comma-separated list of the fields"
" to be expanded as being multi-label. Name"
" or column number.")},
# Label-aggregates. Comma-separated list of aggregation functions
# for the multi-label fields.
'--label-aggregates': {
'action': 'store',
'dest': 'label_aggregates',
'default': defaults.get('label_aggregates', None),
'help': ("Comma-separated list of aggregation functions "
"for the multi-label field labels."
" Allowed aggregates: count, first and last")}}
return options
|
[
"merce@bigml.com"
] |
merce@bigml.com
|
e4f471f5de8a7768f739889d5f7bdae7996aecb4
|
de75304d96e433f67dba3438f2456dd3dbb2ce08
|
/scriptsLinAlg/06_matrix-properties.py
|
dd6fc05f9695dbbf6cadf91808dc93466ce05c6f
|
[] |
no_license
|
dalerxli/slides_linear-algebra-intro
|
ef7486a2779d5cd6633222662c629eae0ee59997
|
9bdbafeecd620a13e2c152bc3eb331543a5d7674
|
refs/heads/master
| 2023-07-14T14:35:24.395828
| 2021-08-10T17:55:04
| 2021-08-10T17:55:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import numpy as np
A = np.array([
[7, 1],
[2, -3],
[4, 8],
])
B = np.array([
[ 1, 6 ],
[-2, 3 ],
])
C = np.array([
[4, 1],
[7, 3],
])
# Associative
ABC = A.dot(B.dot(C))
AB_C = A.dot(B).dot(C)
# Distributive
D = A.dot(B + C)
E = A.dot(B) + A.dot(C)
# Commutative
print('\n', B.dot(C))
print('\n', C.dot(B))
print('\n', B.dot(C) == C.dot(B))
v1 = np.array([[3],
[8],
[1],
])
v2 = np.array([[4],
[8],
[3],
])
print('\n', v1.T.dot(v2))
print('\n', v2.T.dot(v1))
|
[
"torresc.rafael@gmail.com"
] |
torresc.rafael@gmail.com
|
da6b586298b34518a6dec9ed0a3e1211f0ea97b6
|
cda2c95ee6167a34ce9ba3ea25707469a2c357ca
|
/neural_augmented_simulator/old-code/simple_joints_lstm/dataset_ergoreachersimple_v2.py
|
ef2f878e4c98cdf6b58abc25f8daf9dec3c33b7a
|
[] |
no_license
|
fgolemo/neural-augmented-simulator
|
f43d9a88187fbef478aba9b4399eaa59d8795746
|
eb02f20d92e6775824dbac221771f8b8c6dda582
|
refs/heads/master
| 2020-06-23T15:50:35.957495
| 2020-03-06T21:16:34
| 2020-03-06T21:16:34
| 198,666,041
| 0
| 1
| null | 2019-12-04T22:19:27
| 2019-07-24T15:49:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
import os
from torch.utils.data import Dataset
import numpy as np
class DatasetErgoreachersimpleV2(Dataset):
def __init__(self, path="~/data/sim2real/data-ergoreachersimple-v2.npz", train=True, nosim=False):
super().__init__()
ds = np.load(os.path.expanduser(path))
self.nosim = nosim
self.curr_real = ds["state_current_real"]
self.next_real = ds["state_next_real"]
self.next_sim = ds["state_next_sim"]
self.action = ds["actions"]
if train:
self.curr_real = self.curr_real[:900]
self.next_real = self.next_real[:900]
self.next_sim = self.next_sim[:900]
self.action = self.action[:900]
else:
self.curr_real = self.curr_real[900:]
self.next_real = self.next_real[900:]
self.next_sim = self.next_sim[900:]
self.action = self.action[900:]
def __len__(self):
return len(self.curr_real)
def format_data(self, idx):
if not self.nosim:
diff = self.next_real[idx] - self.next_sim[idx]
else:
diff = self.next_real[idx] - self.curr_real[idx]
return (
np.hstack((self.next_sim[idx], self.curr_real[idx], self.action[idx])),
diff
)
def __getitem__(self, idx):
x, y = self.format_data(idx)
return {"x": x, "y": y}
if __name__ == '__main__':
dsr = DatasetErgoreachersimpleV2(train=False)
print("len test", len(dsr))
print(dsr[10])
dsr = DatasetErgoreachersimpleV2(train=True)
print("len train", len(dsr))
print(dsr[10]["x"].shape, dsr[10]["y"].shape)
for i in range(10, 20):
print("real t1:", dsr[0]["x"][i, 8:16].round(2))
print("sim_ t2:", dsr[0]["x"][i, :8].round(2))
print("action_:", dsr[0]["x"][i, 16:].round(2))
print("real t2:", (dsr[0]["x"][i, :8] + dsr[0]["y"][i]).round(2))
print("delta__:", dsr[0]["y"][i].round(2))
print("===")
max_x = -np.inf
min_x = +np.inf
max_y = -np.inf
min_y = +np.inf
for item in dsr:
if item["x"].max() > max_x:
max_x = item["x"].max()
if item["y"].max() > max_y:
max_y = item["y"].max()
if item["x"].min() < min_x:
min_x = item["x"].min()
if item["y"].min() < min_y:
min_y = item["y"].min()
print("min x {}, max x {}\n"
"min y {}, max y {}".format(min_x, max_x, min_y, max_y))
|
[
"fgolemo@gmail.com"
] |
fgolemo@gmail.com
|
bc7b26f53ee58f08c0235b1f7d00ab1788620dcd
|
3899dd3debab668ef0c4b91c12127e714bdf3d6d
|
/venv/Lib/site-packages/tensorflow/python/util/decorator_utils.py
|
4b4ce90e1210574d10a44de4deb88c68fb84c884
|
[] |
no_license
|
SphericalPotatoInVacuum/CNNDDDD
|
b2f79521581a15d522d8bb52f81b731a3c6a4db4
|
03c5c0e7cb922f53f31025b7dd78287a19392824
|
refs/heads/master
| 2020-04-21T16:10:25.909319
| 2019-02-08T06:04:42
| 2019-02-08T06:04:42
| 169,691,960
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,741
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for writing decorators (which modify docstrings)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
def get_qualified_name(function):
# Python 3
if hasattr(function, '__qualname__'):
return function.__qualname__
# Python 2
if hasattr(function, 'im_class'):
return function.im_class.__name__ + '.' + function.__name__
return function.__name__
def _normalize_docstring(docstring):
"""Normalizes the docstring.
Replaces tabs with spaces, removes leading and trailing blanks lines, and
removes any indentation.
Copied from PEP-257:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
Args:
docstring: the docstring to normalize
Returns:
The normalized docstring
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
# (we use sys.maxsize because sys.maxint doesn't exist in Python 3)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def add_notice_to_docstring(
doc, instructions, no_doc_str, suffix_str, notice):
"""Adds a deprecation notice to a docstring."""
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
lines[0] += ' ' + suffix_str
notice = [''] + notice + ([instructions] if instructions else [])
if len(lines) > 1:
# Make sure that we keep our distance from the main body
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines)
def validate_callable(func, decorator_name):
if not hasattr(func, '__call__'):
raise ValueError(
'%s is not a function. If this is a property, make sure'
' @property appears before @%s in your source code:'
'\n\n@property\n@%s\ndef method(...)' % (
func, decorator_name, decorator_name))
class classproperty(object): # pylint: disable=invalid-name
"""Class property decorator.
Example usage:
class MyClass(object):
@classproperty
def value(cls):
return '123'
> print MyClass.value
123
"""
def __init__(self, func):
self._func = func
def __get__(self, owner_self, owner_cls):
return self._func(owner_cls)
|
[
"a@bogdanov.co"
] |
a@bogdanov.co
|
4abad0338b29831728ac721fc4205a6c61efa18c
|
48c4bb95c2d49ca9dca1e6356e61784d6f36a01d
|
/analysis/summary.py
|
b14e5896cb708693292283e6c2a96ccbab90bb1e
|
[] |
no_license
|
afcarl/SpatialMoney
|
2a5e5caf91ed7b0925317d4ec949d9ca4752f416
|
6c9339ae556c842f228cd1cce65842b5b346f060
|
refs/heads/master
| 2020-03-16T16:11:36.835280
| 2018-03-19T20:31:31
| 2018-03-19T20:31:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,032
|
py
|
import numpy as np
import enum
import os
import matplotlib.pyplot as plt
class MoneyAnalysis:
def __init__(self, m0, m1, m2, interruptions):
self.m0 = m0
self.m1 = m1
self.m2 = m2
self.interruptions = interruptions
class MoneyAnalyst(object):
money_threshold = .75
@classmethod
def _test_for_money_state(cls, direct_exchange, indirect_exchange):
money = -1
# Money = 0?
# type '0' should use direct exchange
cond0 = direct_exchange[0] > cls.money_threshold
# type '1' should use indirect exchange
cond1 = indirect_exchange[1] > cls.money_threshold
# type '2' should use direct exchange
cond2 = direct_exchange[2] > cls.money_threshold
if (cond0 * cond1 * cond2) == 1:
money = 0
else:
# Money = 1?
cond0 = direct_exchange[0] > cls.money_threshold
cond1 = direct_exchange[1] > cls.money_threshold
cond2 = indirect_exchange[2] > cls.money_threshold
if (cond0 * cond1 * cond2) == 1:
money = 1
else:
# Money = 2?
cond0 = indirect_exchange[0] > cls.money_threshold
cond1 = direct_exchange[1] > cls.money_threshold
cond2 = direct_exchange[2] > cls.money_threshold
if (cond0 * cond1 * cond2) == 1:
money = 2
return money
@classmethod
def run(cls, direct_exchange, indirect_exchange, t_max):
money_time_line = np.zeros(t_max)
money = {0: 0, 1: 0, 2: 0, -1: 0}
interruptions = 0
for t in range(t_max):
money_t = cls._test_for_money_state(
direct_exchange=direct_exchange[t],
indirect_exchange=indirect_exchange[t])
money_time_line[t] = money_t
money[money_t] += 1
if t > 0:
cond0 = money_t == -1
cond1 = money_time_line[t-1] != -1
interruptions += cond0 * cond1
return MoneyAnalysis(
m0=money[0],
m1=money[1],
m2=money[2],
interruptions=interruptions)
def plot(data):
class X(enum.Enum):
alpha = enum.auto()
tau = enum.auto()
vision_area = enum.auto()
x = enum.auto()
x = {
X.alpha: [],
X.tau: [],
X.vision_area: [],
X.x: []
}
y = []
for d in data.data:
a = MoneyAnalyst.run(
t_max=data.parameters.t_max,
direct_exchange=d.direct_exchanges_proportions,
indirect_exchange=d.indirect_exchanges_proportions
)
x[X.vision_area].append(d.parameters.vision_area)
x[X.tau].append(d.parameters.tau)
x[X.alpha].append(d.parameters.alpha)
x[X.x].append(d.parameters.x0 + d.parameters.x1 + d.parameters.x2)
y.append(
a.m0 + a.m1 + a.m2
)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(221)
ax.scatter(x[X.tau], y, c="black", alpha=0.4, s=15)
ax.set_ylabel("n monetary states")
ax.set_xlabel(r"$\tau$")
ax = fig.add_subplot(222)
ax.scatter(x[X.alpha], y, c="black", alpha=0.4, s=15)
ax.set_ylabel("n monetary states")
ax.set_xlabel(r"$\alpha$")
ax = fig.add_subplot(223)
ax.scatter(x[X.vision_area], y, c="black", alpha=0.4, s=15)
ax.set_ylabel("n monetary states")
ax.set_xlabel(r"vision area")
ax = fig.add_subplot(224)
ax.scatter(x[X.x], y, c="black", alpha=0.4, s=15)
ax.set_ylabel("n monetary states")
ax.set_xlabel(r"n agents")
ax.text(0.005, 0.005, data.file_name, transform=fig.transFigure,
fontsize='x-small', color='0.5')
plt.tight_layout()
file_path = "figures/{}/{}_summary.pdf".format(
data.file_name,
data.file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
plt.savefig(file_path)
plt.close(fig)
|
[
"nioche.aurelien@gmail.com"
] |
nioche.aurelien@gmail.com
|
b042fdcacc6b466da8f278faceace2ce9a4f2584
|
ae81b16cf4242d329dfcb055e85fafe87262cc7f
|
/leetcode/509斐波那切数列.py
|
ec74e05e99dfed630718815437601a4785cd97d8
|
[] |
no_license
|
coquelin77/PyProject
|
3d2d3870b085c4b7ff41bd200fe025630969ab8e
|
58e84ed8b3748c6e0f78184ab27af7bff3778cb8
|
refs/heads/master
| 2023-03-18T19:14:36.441967
| 2019-06-19T02:44:22
| 2019-06-19T02:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
'''斐波那契数,通常用 F(n) 表示,形成的序列称为斐波那契数列。该数列由 0 和 1 开始,后面的每一项数字都是前面两项数字的和。也就是:
F(0) = 0, F(1) = 1
F(N) = F(N - 1) + F(N - 2), 其中 N > 1.
给定 N,计算 F(N)。
示例 1:
输入:2
输出:1
解释:F(2) = F(1) + F(0) = 1 + 0 = 1.
示例 2:
输入:3
输出:2
解释:F(3) = F(2) + F(1) = 1 + 1 = 2.
示例 3:
输入:4
输出:3
解释:F(4) = F(3) + F(2) = 2 + 1 = 3.'''
class Solution(object):
def fib(self, N):
"""
:type N: int
:rtype: int
"""
list = [0, 1]
if N<2 and N>=0:
if N==0:
return 0
if N==1:
return 1
else:
for i in range(2, N+1):
add = list[i - 1] + list[i - 2]
list.append(add)
e = list[-1]
return e
if __name__ == '__main__':
a=Solution()
p=a.fib(0)
print(p)
|
[
"zhangyiming748@users.noreply.github.com"
] |
zhangyiming748@users.noreply.github.com
|
31e4b5de766ec1ab842af725b94e2c8ce8339e03
|
7c04a4dd2159284fe1a1dfef1e88a53757abf843
|
/orm/sqlalchemy复习/s17.py
|
bce14ab6016578d3fbc969a6a6996fd70150afbe
|
[] |
no_license
|
RelaxedDong/flask_advance
|
f6c24c38e4f3143bb49f3c461011bcad15ea03ae
|
1cc42cb6c7607f35903f1a2edc3d11a34a5046e1
|
refs/heads/master
| 2022-01-07T18:26:00.010508
| 2019-04-13T09:51:17
| 2019-04-13T09:51:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
from sqlalchemy import create_engine, Column, Integer, String, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine("mysql+pymysql://root:root@127.0.0.1/ormreview?charset=utf8",
encoding='utf8')
session = sessionmaker(engine)()
Base = declarative_base(engine)
class Aticle(Base):
__tablename__ = "article"
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(100), nullable=False)
def reset():
Base.metadata.drop_all()
Base.metadata.create_all()
print('---重置成功---')
def query():
#查询不一样的标题
#article = session.query(Aticle.title).distinct().all()
#update
#session.query(Aticle).filter(Aticle.id==1).update({"title":'updated title'})
#delete
# session.query(Aticle).filter(Aticle.id==1).delete()
import time
start = time.time()
articles = session.query(func.count(Aticle.id)).all()
end = time.time()
print('%f'%(end-start)) #有索引 0.028020
#没有索引 0.388275
print(articles)
if __name__ == '__main__':
# reset()
query()
|
[
"1417766861@qq.com"
] |
1417766861@qq.com
|
079d5a036fab015463d7d3b7689f744e6925af06
|
2781ffdb7dd131c43d5777d33ee002643c839c28
|
/WebScraping/DatasetScriping.py
|
949a2f2d7b0afe8876a8d4464e6de15688ba91fb
|
[] |
no_license
|
AbuBakkar32/Python-Essential-Practice
|
b5e820d2e27e557b04848b5ec63dd78ae5b554c4
|
8659cf5652441d32476dfe1a8d90184a9ae92b3b
|
refs/heads/master
| 2022-11-13T23:07:16.829075
| 2020-06-27T18:46:52
| 2020-06-27T18:46:52
| 275,432,093
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
import requests
from bs4 import BeautifulSoup
link = requests.get('https://www.data.gov/')
soup = BeautifulSoup(link.text, 'html.parser')
data = soup.find_all('small')
for i in data:
a = i.find('a').text[0:]
print('This website has :{}'.format(a))
|
[
"abu35-1994@diu.edu.bd"
] |
abu35-1994@diu.edu.bd
|
74de4d362f296692eacb457593fecbd74f4e209d
|
214bfc26b3982c5c662b50a35756d7070e1664a2
|
/initial/lm/trainAttention/ARCHIVE_September_2021/matchData_EYE.py
|
dc65bdc172a3dff3c1273fc2132c6cade23b6b2c
|
[] |
no_license
|
m-hahn/forgetting-model
|
cf3f51e40fb53a49e9dfa0c648cd296803eec7ed
|
4ee0f06c6cd01ffff579cfbca67287cc32fac66c
|
refs/heads/master
| 2022-09-03T02:29:56.607656
| 2022-08-27T13:51:36
| 2022-08-27T13:51:36
| 252,456,546
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
import os
import sys
import random
from collections import defaultdict
with open("/u/scr/mhahn/Dundee/DundeeTreebankTokenized.csv", "r") as inFile:
dundee = [x.split("\t") for x in inFile.read().strip().split("\n")]
header = dundee[0]
header = dict(zip(header, list(range(len(header)))))
dundee = dundee[1:]
calibrationSentences = []
for i in range(len(dundee)):
line = dundee[i]
Itemno, WNUM, SentenceID, ID, WORD, Token = line
SentenceID = (SentenceID)
if i == 0 or SentenceID != dundee[i-1][header["SentenceID"]]:
calibrationSentences.append([])
print(SentenceID, dundee[i-1][header["SentenceID"]])
if i > 0 and SentenceID == dundee[i-1][header["SentenceID"]] and ID == dundee[i-1][header["ID"]]:
continue
else:
calibrationSentences[-1].append((WORD.strip(".").strip(",").strip("?").strip(":").strip(";").replace("’", "'").strip("!").lower(), line))
# else:
if True:
numberOfSamples = 12
with open("analyze_EYE/"+__file__+".tsv", "w") as outFile:
# print("\t".join([str(w) for w in [sentenceID, regions[i], remainingInput[i][0]] + remainingInput[i][1] ]), file=outFile) #, file=outFile)
# Itemno, WNUM, SentenceID, ID, WORD, Token = line
print("\t".join(["Sentence", "Region", "Word", "Itemno", "WNUM", "SentenceID", "ID", "WORD", "Token"]), file=outFile)
for sentenceID in range(len(calibrationSentences)):
sentence = calibrationSentences[sentenceID] #.lower().replace(".", "").replace(",", "").replace("n't", " n't").split(" ")
print(sentence)
context = sentence[0]
remainingInput = sentence[1:]
regions = range(len(sentence))
print("INPUT", context, remainingInput)
if len(sentence) < 2:
continue
assert len(remainingInput) > 0
for i in range(len(remainingInput)):
print("\t".join([str(w) for w in [sentenceID, regions[i], remainingInput[i][0]] + remainingInput[i][1] ]), file=outFile) #, file=outFile)
|
[
"mhahn29@gmail.com"
] |
mhahn29@gmail.com
|
9648e10ec3c1b48598944069a8af287b4bf97fe1
|
2a1cbd7570ade00e6efb5c39ca9246f05c7f500b
|
/21/00/0.py
|
9db06c49841196fdd6336ffd86da7b9713ae110a
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201707
|
dd5faf446cb0cc3d95f7f9db30f47e0f15400258
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
refs/heads/master
| 2020-12-03T02:19:42.341198
| 2017-07-31T00:11:35
| 2017-07-31T00:11:35
| 95,927,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from abc import ABCMeta
class MyABC(metaclass=ABCMeta): pass
MyABC.register(tuple)
assert issubclass(tuple, MyABC)
assert isinstance((), MyABC)
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
ccf77c6dbb63b159be375a0549f4330ed4b77ada
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_phalanges.py
|
95a07958093f090cc6658575ecaed636093c7c0e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from xai.brain.wordbase.nouns._phalanx import _PHALANX
#calss header
class _PHALANGES(_PHALANX, ):
def __init__(self,):
_PHALANX.__init__(self)
self.name = "PHALANGES"
self.specie = 'nouns'
self.basic = "phalanx"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
df3851986643d89986f27702969b422b0c5e4585
|
17d05858112c2aa73feab0c054457ce9808797df
|
/setup.py
|
6bcb2919589ac0e9e4988473f5290cdcb93a9395
|
[] |
no_license
|
invisibleroads/invisibleroads-macros-descriptor
|
ffad4b56c1b9191131a1d85e16a82ea19801ca30
|
de428ead5cea757ea2f800ace556ded069e53586
|
refs/heads/master
| 2023-08-11T17:33:00.458008
| 2020-11-30T03:48:54
| 2020-11-30T03:48:54
| 248,628,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
ENTRY_POINTS = '''
'''
APP_CLASSIFIERS = [
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
]
APP_REQUIREMENTS = [
]
TEST_REQUIREMENTS = [
'pytest',
'pytest-cov',
]
FOLDER = dirname(abspath(__file__))
DESCRIPTION = '\n\n'.join(open(join(FOLDER, x)).read().strip() for x in [
'README.md', 'CHANGES.md'])
setup(
name='invisibleroads-macros-descriptor',
version='1.0.2',
description='Shortcut functions for descriptor operations',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=APP_CLASSIFIERS,
author='Roy Hyunjin Han',
author_email='rhh@crosscompute.com',
url=(
'https://github.com/invisibleroads/'
'invisibleroads-macros-descriptor'),
keywords='invisibleroads',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
extras_require={'test': TEST_REQUIREMENTS},
install_requires=APP_REQUIREMENTS,
entry_points=ENTRY_POINTS)
|
[
"rhh@crosscompute.com"
] |
rhh@crosscompute.com
|
d62ec5ded84e385a5a6f52da4618333cd3e5cec7
|
037877a31670a85fa78b61df9ceabe981cfdfbf6
|
/sympy/concrete/gosper.py
|
76f439ce5d72929e5d79906a18fcaefc30adc036
|
[] |
no_license
|
certik/sympy_gamma
|
6343b02e5d6d1c7d511a3329bbbd27cd11cd7ec8
|
b0e555ca03f8476533cb1c19575f4461533837de
|
refs/heads/master
| 2020-12-25T03:52:40.132034
| 2010-02-15T08:02:31
| 2010-02-15T08:02:31
| 344,391
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
"""
"""
from sympy.core.basic import Basic, S
from sympy.core.symbol import Symbol
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core import sympify
from sympy.polys import gcd, quo, roots, resultant
def normal(f, g, n=None):
"""Given relatively prime univariate polynomials 'f' and 'g',
rewrite their quotient to a normal form defined as follows:
f(n) A(n) C(n+1)
---- = Z -----------
g(n) B(n) C(n)
where Z is arbitrary constant and A, B, C are monic
polynomials in 'n' with follwing properties:
(1) gcd(A(n), B(n+h)) = 1 for all 'h' in N
(2) gcd(B(n), C(n+1)) = 1
(3) gcd(A(n), C(n)) = 1
This normal form, or rational factorization in other words,
is crucial step in Gosper's algorithm and in difference
equations solving. It can be also used to decide if two
hypergeometric are similar or not.
This procedure will return return triple containig elements
of this factorization in the form (Z*A, B, C). For example:
>>> from sympy import Symbol
>>> n = Symbol('n', integer=True)
>>> normal(4*n+5, 2*(4*n+1)*(2*n+3), n)
(1/4, 3/2 + n, 1/4 + n)
"""
f, g = map(sympify, (f, g))
p = f.as_poly(n)
q = g.as_poly(n)
a, p = p.LC, p.as_monic()
b, q = q.LC, q.as_monic()
A = p.as_basic()
B = q.as_basic()
C, Z = S.One, a / b
h = Symbol('h', dummy=True)
res = resultant(A, B.subs(n, n+h), n)
nni_roots = roots(res, h, domain='Z',
predicate=lambda r: r >= 0).keys()
if not nni_roots:
return (f, g, S.One)
else:
for i in sorted(nni_roots):
d = gcd(A, B.subs(n, n+i), n)
A = quo(A, d, n)
B = quo(B, d.subs(n, n-i), n)
C *= Mul(*[ d.subs(n, n-j) for j in xrange(1, i+1) ])
return (Z*A, B, C)
def gosper(term, k, a, n):
from sympy.solvers import rsolve_poly
if not hyper:
return None
else:
p, q = expr.as_numer_denom()
A, B, C = normal(p, q, k)
B = B.subs(k, k-1)
R = rsolve_poly([-B, A], C, k)
symbol = []
if not (R is None or R is S.Zero):
if symbol != []:
symbol = symbol[0]
W = R.subs(symbol, S.Zero)
if W is S.Zero:
R = R.subs(symbol, S.One)
else:
R = W
Z = B*R*term/C
return simplify(Z.subs(k, n+1) - Z.subs(k, a))
else:
return None
|
[
"ondrej@certik.cz"
] |
ondrej@certik.cz
|
81efde228ef6de285080c846e3a219b46e2322e7
|
d5a4fe6a8d466dce3c131c9678e86398b17b27c5
|
/Finite Difference Computing with Exponential Decay_HPL/SF_FDCED/softeng/decay_flat.py
|
26557b62d82049ce8043028498525738a4bb08ef
|
[] |
no_license
|
AaronCHH/B_PYTHON_Numerical
|
d8fe3ec843dc1b1b35a407134980027ac5516721
|
d369d73f443cc9afdb0a10912b539bc8c48f7d4a
|
refs/heads/master
| 2021-01-19T10:14:57.903055
| 2017-06-21T05:21:21
| 2017-06-21T05:21:21
| 87,845,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
from numpy import *
from matplotlib.pyplot import *
I = 1
a = 2
T = 4
dt = 0.2
Nt = int(round(T/dt)) # no of time intervals
u = zeros(Nt+1) # array of u[n] values
t = linspace(0, T, Nt+1) # time mesh
theta = 1 # Backward Euler method
u[0] = I # assign initial condition
for n in range(0, Nt): # n=0,1,...,Nt-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
# Compute norm of the error
u_e = I*exp(-a*t) - u # exact u at the mesh points
error = u_e - u
E = sqrt(dt*sum(error**2))
print 'Norm of the error: %.3E' % E
# Compare numerical (u) and exact solution (u_e) in a plot
plot(t, u, 'r--o')
t_e = linspace(0, T, 1001) # very fine mesh for u_e
u_e = I*exp(-a*t_e)
plot(t_e, u_e, 'b-')
legend(['numerical, theta=%g' % theta, 'exact'])
xlabel('t')
ylabel('u')
show()
|
[
"aaronhsu219@gmail.com"
] |
aaronhsu219@gmail.com
|
460b60c4a8cdb5b8753f5e3e2789318b3fea73b4
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/jmZe7R4ibXkrQbogr_0.py
|
46152937d3897c2b43d87c5e4b43b440156a655f
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
"""
Write a **regular expression** that checks to see if a password is valid. For
a password to be valid, it must meet the following requirments:
1. The password must contain at least one uppercase character.
2. The password must contain at least one lowercase character.
3. The password must contain at least one number.
4. The password must contain at least one special character `! ? * #`
5. The password must be at least 8 characters in length.
### Examples
"Password*12" ➞ True
"passWORD12!" ➞ True
"Pass" ➞ False
### Notes
* The lowercase char, uppercase char, special char, and number can appear at any part of the password.
* **You will only be writing a regular expression; do not write a function.**
"""
import re
r="^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[!*#?])[A-Za-z\d!#*?]{8,}$"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fa23bb740b0242e213687ec80b61153c25233105
|
1cadec31e3f5c71407a67b4676fdc9e1ab9891bf
|
/modoboa/extensions/admin/forms/__init__.py
|
5bfbed71114c50c36aac8fb32d3ac51b92833677
|
[
"ISC"
] |
permissive
|
SonRiab/modoboa
|
9b840b639e6e05a8e6145bffd45ddd1ce90a2b8b
|
97db0811c089aa477b21f28f318ab631d20cf108
|
refs/heads/master
| 2020-12-25T09:09:27.286185
| 2014-08-13T09:58:22
| 2014-08-13T09:58:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
from .domain import DomainFormGeneral, DomainFormOptions, DomainForm
from .account import (
AccountFormGeneral, AccountFormMail, AccountPermissionsForm,
AccountForm
)
from .alias import AliasForm
from .forward import ForwardForm
from .import_ import ImportDataForm, ImportIdentitiesForm
from .export import ExportDataForm, ExportDomainsForm, ExportIdentitiesForm
__all__ = [
'DomainFormGeneral', 'DomainFormOptions', 'DomainForm',
'AccountFormGeneral', 'AccountFormMail', 'AccountPermissionsForm',
'AccountForm', 'AliasForm', 'ImportDataForm', 'ImportIdentitiesForm',
'ExportDataForm', 'ExportDomainsForm', 'ExportIdentitiesForm',
'ForwardForm'
]
|
[
"tonio@ngyn.org"
] |
tonio@ngyn.org
|
7e2e79ddfb16ee42977e799c3cf15480e2b87a9f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04046/s526215955.py
|
321044d84612a39406e8d438749ed1e56e37746f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
m=10**9+7
h,w,a,b=map(int,input().split())
d=c=1
for i in range(h-1):
d=c=c*(w+h-b-2-i)*pow(i+1,m-2,m)%m
for i in range(1,h-a):
c=c*(b-1+i)*(h-i)*pow(i*(w+h-b-1-i),m-2,m)%m
d+=c
print(d%m)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
471f4a97647808b279d4134c0ea9d027ab9c2229
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/place/different_eye/new_time.py
|
c6abfe47f36c42f4f14c519a34724907fd677864
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
#! /usr/bin/env python
def way_or_child(str_arg):
week(str_arg)
print('case_or_week')
def week(str_arg):
print(str_arg)
if __name__ == '__main__':
way_or_child('last_government_and_next_fact')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
ca7a2aa1310b5cf5fe2bc79a782b7d9bba4daaed
|
46ac378de81b13359ceff567b8ec1b04cda1d4d5
|
/luddite/app.py
|
dc79b95750eafccdf985da0f64d4caa7ea8b20f0
|
[
"MIT"
] |
permissive
|
ntoll/luddite
|
f54f8c4564590e57bdc97b7f3ee9847f7e940849
|
e234a955db16391d9cd9b4cd97c214c954e5697d
|
refs/heads/master
| 2021-01-11T22:10:52.120319
| 2017-01-23T09:53:29
| 2017-01-23T09:53:29
| 78,933,221
| 7
| 2
| null | 2017-01-20T20:46:05
| 2017-01-14T10:15:20
|
Python
|
UTF-8
|
Python
| false
| false
| 871
|
py
|
"""
Luddite - the anti-browser. :-)
"""
import os
import logging
from luddite import __version__
from luddite.browser import Browser
def setup_logging():
"""
This could probably be more elegant.
"""
home = os.path.expanduser('~')
log_dir = os.path.join(home, 'luddite')
log_file = os.path.join(log_dir, 'luddite.log')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_fmt = '%(asctime)s - %(name)s(%(funcName)s) %(levelname)s: %(message)s'
logging.basicConfig(filename=log_file, filemode='w', format=log_fmt,
level=logging.DEBUG)
print('Logging to {}'.format(log_file))
def run(urls):
setup_logging()
logging.info('Starting Luddite {}'.format(__version__))
logging.info(urls)
browser = Browser()
for url in urls:
browser.create_tab(url)
browser.mainloop()
|
[
"ntoll@ntoll.org"
] |
ntoll@ntoll.org
|
8f181a38345a7cd75b67289817a44b5871fe08f4
|
000d7a149b59b9e23ca31a34cd07ce62bb7bcae4
|
/models/linktracker.py
|
83e9e4d6130876b20bb212924e6914ad9e00e321
|
[
"MIT"
] |
permissive
|
Lumiare1/tyggbot
|
37ce0f2d2a697b0e673b83b02eb032c43deced3a
|
5a1e09504304f966f73344ca6ee27d7f4c20fc6e
|
refs/heads/master
| 2021-01-16T17:59:14.656659
| 2015-10-04T11:03:45
| 2015-10-04T11:03:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
import logging
import datetime
import pymysql
from urllib.parse import urlsplit
log = logging.getLogger('tyggbot')
class LinkTrackerLink:
@classmethod
def load(cls, cursor, url):
link = cls()
cursor.execute('SELECT * FROM `tb_link_data` WHERE `url`=%s', [url])
row = cursor.fetchone()
if row:
# We found a link matching this URL in the database!
link.id = row['id']
link.url = row['url']
link.times_linked = row['times_linked']
link.first_linked = row['first_linked']
link.last_linked = row['last_linked']
link.needs_sync = False
else:
# No link was found with this URL, create a new one!
link.id = -1
link.url = url
link.times_linked = 0
link.first_linked = datetime.datetime.now()
link.last_linked = datetime.datetime.now()
link.needs_sync = False
return link
def increment(self):
self.times_linked += 1
self.last_linked = datetime.datetime.now()
self.needs_sync = True
def sync(self, cursor):
_first_linked = self.first_linked.strftime('%Y-%m-%d %H:%M:%S')
_last_linked = self.last_linked.strftime('%Y-%m-%d %H:%M:%S')
if self.id == -1:
cursor.execute('INSERT INTO `tb_link_data` (`url`, `times_linked`, `first_linked`, `last_linked`) VALUES (%s, %s, %s, %s)',
[self.url, self.times_linked, _first_linked, _last_linked])
self.id = cursor.lastrowid
else:
cursor.execute('UPDATE `tb_link_data` SET `times_linked`=%s, `last_linked`=%s WHERE `id`=%s',
[self.times_linked, _last_linked, self.id])
class LinkTracker:
def __init__(self, sqlconn):
self.sqlconn = sqlconn
self.links = {}
def add(self, url):
url_data = urlsplit(url)
if url_data.netloc[:4] == 'www.':
netloc = url_data.netloc[4:]
else:
netloc = url_data.netloc
if url_data.path.endswith('/'):
path = url_data.path[:-1]
else:
path = url_data.path
if len(url_data.query) > 0:
query = '?' + url_data.query
else:
query = ''
url = netloc + path + query
if url not in self.links:
self.links[url] = LinkTrackerLink.load(self.sqlconn.cursor(pymysql.cursors.DictCursor), url)
self.links[url].increment()
def sync(self):
self.sqlconn.autocommit(False)
cursor = self.sqlconn.cursor()
for link in [link for k, link in self.links.items() if link.needs_sync]:
link.sync(cursor)
cursor.close()
self.sqlconn.autocommit(True)
|
[
"pajlada@bithack.se"
] |
pajlada@bithack.se
|
518672544e321b6bed89cfb0a084186f20f524b8
|
38422c3edeb269926502fed31a0761aff8dd3d3b
|
/Swanepoel_analysis/Swanepoel_analysis/config_files/ek036-4_config.py
|
79e82e0ebd6416bc263e9bebbc4bb486c1ccf30f
|
[] |
no_license
|
vfurtula/Alle-projekter
|
2dab3ccbf7ddb6be3ee09f9f5e87085f354dd84a
|
da3d7c9611088043e2aea5d844f1ae6056215e04
|
refs/heads/master
| 2022-06-07T05:17:35.327228
| 2020-04-30T10:28:48
| 2020-04-30T10:28:48
| 260,180,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
loadSubOlis=["/home/vfurtula/Documents/Projects/Swanepoel_analysis/data/sapphirejune16.asc",True]
loadSubFilmOlis=["/home/vfurtula/Documents/Projects/Swanepoel_analysis/data/ek036-4.asc",True]
loadSubFTIR=["/home/vfurtula/Documents/Projects/Swanepoel_analysis/data/SAPPHIREJUNE16.DPT",True]
loadSubFilmFTIR=["/home/vfurtula/Documents/Projects/Swanepoel_analysis/data/EK036-4.DPT",True]
fit_linear_spline="spline"
gaussian_factors=[7.25, 1.0, 0.75, 0.0]
gaussian_borders=[0.225, 0.6, 0.8, 1.6, 3.25]
ignore_data_pts=45
corr_slit=1
fit_poly_order=4
fit_poly_ranges=[[0.6, 0.95, 1.5, 2.5],True]
filename="save_to_file"
folder="save_to_folde"
timestr="180223-1627"
save_figs=True
plot_X="eV"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
49ad7fbd531626b902b58ec3a7df9d440d97e36b
|
a518141ca3ba2b6fa63a7961b51936d9438ff022
|
/401 - Palindromes.py
|
c489ec9ee8ebc9e83818d1be9f90a8116897be74
|
[] |
no_license
|
jlhung/UVA-Python
|
ec93b2c98e04c753e8356f3e4825584fae4a8663
|
7a0db4fecffd7ac4f377f93da41291a8e998ee9b
|
refs/heads/master
| 2022-11-28T04:47:49.270187
| 2020-08-10T13:19:58
| 2020-08-10T13:19:58
| 116,969,745
| 19
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
'''
20180203 jlhung v1.0
'''
a = "A000300HIL0JM0O0002TUVMXY51SE0Z0080"
def cal(z):
if z >= 65:
return z - 65
else:
return z - 49 + 26
while True:
try:
n = input()
except EOFError:
break
if n == n[::-1]:
p = 1
else:
p = 0
m = 1
for i in range(len(n)//2 + 1):
x = cal(ord(n[i]))
if a[x] != n[len(n)-1-i]:
m = 0
break
if m == 1 and p == 1:
print("{} -- is a mirrored palindrome.".format(n))
elif m == 0 and p == 1:
print("{} -- is a regular palindrome.".format(n))
elif m == 1 and p == 0:
print("{} -- is a mirrored string.".format(n))
else:
print("{} -- is not a palindrome.".format(n))
print()
|
[
"35291112+jlhung@users.noreply.github.com"
] |
35291112+jlhung@users.noreply.github.com
|
d172f7350f6aaeb3f780cbf04830ffcac3896f82
|
deae8bc2da1936c4f9afbd8e9412af8df39e96a2
|
/src/spaceone/inventory/connector/__init__.py
|
c89ed55465d6f21041cb30667531f80727c4f47b
|
[
"Apache-2.0"
] |
permissive
|
choonho/inventory
|
11a3ef0ec327214cc26260ace049305021c892cf
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
refs/heads/master
| 2023-04-17T05:51:20.373738
| 2020-06-16T07:14:27
| 2020-06-16T07:14:27
| 273,175,186
| 0
| 0
| null | 2020-06-18T07:49:19
| 2020-06-18T07:49:18
| null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
from spaceone.inventory.connector.identity_connector import IdentityConnector
from spaceone.inventory.connector.plugin_connector import PluginConnector
from spaceone.inventory.connector.secret_connector import SecretConnector
from spaceone.inventory.connector.collector_connector import CollectorPluginConnector
from spaceone.inventory.connector.repository_connector import RepositoryConnector
|
[
"whdalsrnt@megazone.com"
] |
whdalsrnt@megazone.com
|
87c88e8b5fe16b76cb62ec4af876bdc38c0526de
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q1348_Tweet_Counts_Per_Frequency.py
|
0ebd6ba5dc2c6918e1155392e2fb98e3ccab9546
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875
| 2023-03-26T12:22:42
| 2023-03-26T12:22:42
| 204,069,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce,lru_cache
import numpy as np
from heapq import *
from bisect import bisect_left
class TweetCounts:
def __init__(self):
self.counter = Counter()
def recordTweet(self, tweetName: str, time: int) -> None:
self.counter[(tweetName,time)]+=1
def getTweetCountsPerFrequency(self, freq: str, tweetName: str, startTime: int, endTime: int) -> List[int]:
freq_to_num = {"minute":60,"hour":3600,"day":86400}
return_list = []
for i in range(startTime,endTime+1,freq_to_num[freq]):
return_list.append(0)
for j in range(min(freq_to_num[freq],endTime-i+1)):
time = i+j
return_list[-1]+=self.counter[(tweetName,time)]
return return_list
# Your TweetCounts object will be instantiated and called as such:
# obj = TweetCounts()
# obj.recordTweet(tweetName,time)
# param_2 = obj.getTweetCountsPerFrequency(freq,tweetName,startTime,endTime)
sol = Solution()
# input
["TweetCounts","recordTweet","recordTweet","recordTweet","getTweetCountsPerFrequency","getTweetCountsPerFrequency","recordTweet","getTweetCountsPerFrequency"]
[[],["tweet3",0],["tweet3",60],["tweet3",10],["minute","tweet3",0,59],["minute","tweet3",0,60],["tweet3",120],["hour","tweet3",0,210]]
# output
output = sol.func()
# answer
answer = ""
print(output, answer, answer == output)
|
[
"444980834@qq.com"
] |
444980834@qq.com
|
d1a69c770860c71d4ea8cbe1f0d096cc33ed146f
|
9e36b3a0a609f862aa2894a1473896c8465c41a1
|
/arelle/ViewWinGrid.py
|
b2cea677258415d91d3d4774b0643e666a8e869e
|
[
"Apache-2.0"
] |
permissive
|
marado/Arelle
|
7cd74a66d19be174c9f1fe66f788dd53447bffac
|
7ca2bf09c852787cd7a38d68b13c11d5e33e72a2
|
refs/heads/master
| 2020-04-08T00:27:54.212337
| 2011-10-28T17:49:49
| 2011-10-28T17:49:49
| 2,658,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,703
|
py
|
'''
Created on Oct 9, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from tkinter import *
from tkinter.ttk import *
from arelle.CntlrWinTooltip import ToolTip
from arelle.UiUtil import (scrolledHeaderedFrame, scrolledFrame)
class ViewGrid:
def __init__(self, modelXbrl, tabWin, tabTitle, hasToolTip=False, lang=None):
self.tabWin = tabWin
#self.viewFrame = Frame(tabWin)
#self.viewFrame.grid(row=0, column=0, sticky=(N, S, E, W))
'''
paneWin = PanedWindow(self.viewFrame, orient=VERTICAL)
paneWin.grid(row=1, column=0, sticky=(N, S, E, W))
self.zGrid = scrollgrid(paneWin)
self.zGrid.grid(row=0, column=0, sticky=(N, S, E, W))
self.xyGrid = scrollgrid(paneWin)
self.xyGrid.grid(row=1, column=0, sticky=(N, S, E, W))
'''
'''
self.gridBody = scrollgrid(self.viewFrame)
self.gridBody.grid(row=0, column=0, sticky=(N, S, E, W))
'''
self.viewFrame = scrolledHeaderedFrame(tabWin)
self.gridTblHdr = self.viewFrame.tblHdrInterior
self.gridColHdr = self.viewFrame.colHdrInterior
self.gridRowHdr = self.viewFrame.rowHdrInterior
self.gridBody = self.viewFrame.bodyInterior
'''
self.viewFrame = scrolledFrame(tabWin)
self.gridTblHdr = self.gridRowHdr = self.gridColHdr = self.gridBody = self.viewFrame.interior
'''
tabWin.add(self.viewFrame,text=tabTitle)
self.modelXbrl = modelXbrl
self.hasToolTip = hasToolTip
self.toolTipText = StringVar()
if hasToolTip:
self.gridBody.bind("<Motion>", self.motion, '+')
self.gridBody.bind("<Leave>", self.leave, '+')
self.toolTipText = StringVar()
self.toolTip = ToolTip(self.gridBody,
textvariable=self.toolTipText,
wraplength=480,
follow_mouse=True,
state="disabled")
self.toolTipColId = None
self.toolTipRowId = None
self.modelXbrl = modelXbrl
self.contextMenuClick = self.modelXbrl.modelManager.cntlr.contextMenuClick
self.gridTblHdr.contextMenuClick = self.contextMenuClick
self.gridColHdr.contextMenuClick = self.contextMenuClick
self.gridRowHdr.contextMenuClick = self.contextMenuClick
self.gridBody.contextMenuClick = self.contextMenuClick
self.lang = lang
if modelXbrl:
modelXbrl.views.append(self)
if not lang:
self.lang = modelXbrl.modelManager.defaultLang
def close(self):
self.tabWin.forget(self.viewFrame)
self.modelXbrl.views.remove(self)
self.modelXbrl = None
def select(self):
self.tabWin.select(self.viewFrame)
def leave(self, *args):
self.toolTipColId = None
self.toolTipRowId = None
def motion(self, *args):
'''
tvColId = self.gridBody.identify_column(args[0].x)
tvRowId = self.gridBody.identify_row(args[0].y)
if tvColId != self.toolTipColId or tvRowId != self.toolTipRowId:
self.toolTipColId = tvColId
self.toolTipRowId = tvRowId
newValue = None
if tvRowId and len(tvRowId) > 0:
try:
col = int(tvColId[1:])
if col == 0:
newValue = self.gridBody.item(tvRowId,"text")
else:
values = self.gridBody.item(tvRowId,"values")
if col <= len(values):
newValue = values[col - 1]
except ValueError:
pass
self.setToolTip(newValue, tvColId)
'''
def setToolTip(self, text, colId="#0"):
self.toolTip._hide()
if isinstance(text,str) and len(text) > 0:
width = self.gridBody.column(colId,"width")
if len(text) * 8 > width or '\n' in text:
self.toolTipText.set(text)
self.toolTip.configure(state="normal")
self.toolTip._schedule()
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
def contextMenu(self):
try:
return self.menu
except AttributeError:
self.menu = Menu( self.viewFrame, tearoff = 0 )
self.gridBody.bind( self.contextMenuClick, self.popUpMenu )
if not self.gridTblHdr.bind(self.contextMenuClick):
self.gridTblHdr.bind( self.contextMenuClick, self.popUpMenu )
if not self.gridColHdr.bind(self.contextMenuClick):
self.gridColHdr.bind( self.contextMenuClick, self.popUpMenu )
if not self.gridRowHdr.bind(self.contextMenuClick):
self.gridRowHdr.bind( self.contextMenuClick, self.popUpMenu )
return self.menu
def popUpMenu(self, event):
self.menu.post( event.x_root, event.y_root )
def menuAddLangs(self):
langsMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=_("Language"), menu=langsMenu, underline=0)
for lang in sorted(self.modelXbrl.langs):
langsMenu.add_cascade(label=lang, underline=0, command=lambda l=lang: self.setLang(l))
def setLang(self, lang):
self.lang = lang
self.view()
|
[
"fischer@markv.com"
] |
fischer@markv.com
|
0b5a6d6033e0072d7b2378fe1573de7aa7606581
|
9673db0d489c0cfa0a304844b9ff5ba8c6024621
|
/cwmipermanentevents/win_cwmieventlogconsumer.py
|
319871554f23f8852e9a806d3457eb0744bf007b
|
[] |
no_license
|
daveres/Ansible-Auto-Generated-Modules
|
a9ec1ad2f9ff9c741b77f5d411b927f1a48c6ce3
|
a91b484171be12b9e2bc7c0a9c23bdd767877e38
|
refs/heads/master
| 2020-12-25T03:29:37.751104
| 2015-12-22T19:24:01
| 2015-12-22T19:24:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_cwmieventlogconsumer
version_added:
short_description: Generated from DSC module cwmipermanentevents version 1.1 at 22.12.2015 20.11.12
description:
- DSC Resources for managing WMI permanent events
options:
EventID:
description:
-
required: True
default:
aliases: []
Name:
description:
-
required: True
default:
aliases: []
Category:
description:
-
required: False
default:
aliases: []
Ensure:
description:
-
required: False
default:
aliases: []
choices:
- Absent
- Present
EventType:
description:
-
required: False
default:
aliases: []
choices:
- AuditFailure
- AuditSuccess
- Error
- Information
- Success
- Warning
InsertionStringTemplates:
description:
-
required: False
default:
aliases: []
NumberOfInsertionStrings:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
SourceName:
description:
-
required: False
default:
aliases: []
UNCServerName:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
|
[
"trond@hindenes.com"
] |
trond@hindenes.com
|
de2ae8f2273970279ea9cb452dd78cf5bfdf252b
|
9645bdfbb15742e0d94e3327f94471663f32061a
|
/Python/394 - Decode String/394_decode-string.py
|
1f0ffd16417ab8710b0220c574306888c52a0085
|
[] |
no_license
|
aptend/leetcode-rua
|
f81c080b2260adb2da677612e5c437eda256781d
|
80e44f4e9d3a5b592fdebe0bf16d1df54e99991e
|
refs/heads/master
| 2023-06-22T00:40:05.533424
| 2021-03-17T13:51:28
| 2021-03-17T13:51:28
| 186,434,133
| 2
| 0
| null | 2023-06-21T22:12:51
| 2019-05-13T14:17:27
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
from leezy import Solution, solution
class Q394(Solution):
@solution
def decodeString(self, s):
# 20ms 49.53%
i = 0
while i < len(s) and s[i].isalpha():
i += 1
if i == len(s): # pure string
return s
lead_str = s[:i]
# s[i] is digit or [
cnt = 0
while s[i].isdigit(): # no boundry check when we have trusted input
cnt = cnt * 10 + int(s[i])
i += 1
# now s[i] is '['
open_ = 1
j = i
while j < len(s) and open_:
j += 1
if s[j] == '[':
open_ += 1
elif s[j] == ']':
open_ -= 1
return lead_str + cnt * self.decodeString(s[i+1:j]) + self.decodeString(s[j+1:])
@solution
def decode_str(self, s):
pass
def main():
q = Q394()
q.add_args('3[a]2[bc]')
q.add_args('3[a2[c]]')
q.add_args('2[abc]3[cd]ef')
q.run()
if __name__ == "__main__":
main()
|
[
"crescentwhale@hotmail.com"
] |
crescentwhale@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.