blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
faeb98b87b529091b354db981aaab9a7664aa97d
|
a59d20b9918df7cc17cfaf4bd1abaa73a086eb2a
|
/tfcnn_retraining.py
|
bc37e62d3d31befff38348e8f53c5da0d95d24cf
|
[] |
no_license
|
yanqinghao/TFdeeplearning
|
5da5e7f4462c539ae25f29a42f2c5bc3b4abd171
|
8ec4f99b6e73b9d0866bc3706e2807cfa3229c58
|
refs/heads/master
| 2018-12-10T13:45:35.552852
| 2018-09-13T09:52:49
| 2018-09-13T09:52:49
| 118,085,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,866
|
py
|
import os
import tarfile
import _pickle as cPickle
import numpy as np
import urllib.request
import scipy.misc
cifar_link = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
data_dir = 'temp'
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
objects = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
target_file = os.path.join(data_dir, 'cifar-10-python.tar.gz')
if not os.path.isfile(target_file):
print('CIFAR-10 file not found. Downloading CIFAR data (Size = 163MB)')
print('This may take a few minutes, please wait.')
filename, headers = urllib.request.urlretrieve(cifar_link, target_file)
# Extract into memory
tar = tarfile.open(target_file)
tar.extractall(path=data_dir)
tar.close()
# Create train image folders
train_folder = 'train_dir'
if not os.path.isdir(os.path.join(data_dir, train_folder)):
for i in range(10):
folder = os.path.join(data_dir, train_folder, objects[i])
os.makedirs(folder)
# Create test image folders
test_folder = 'validation_dir'
if not os.path.isdir(os.path.join(data_dir, test_folder)):
for i in range(10):
folder = os.path.join(data_dir, test_folder, objects[i])
os.makedirs(folder)
def load_batch_from_file(file):
file_conn = open(file, 'rb')
image_dictionary = cPickle.load(file_conn, encoding='latin1')
file_conn.close()
return(image_dictionary)
def save_images_from_dict(image_dict, folder='data_dir'):
for ix, label in enumerate(image_dict['labels']):
folder_path = os.path.join(data_dir, folder, objects[label])
filename = image_dict['filenames'][ix]
#Transform image data
image_array = image_dict['data'][ix]
image_array.resize([3, 32, 32])
# Save image
output_location = os.path.join(folder_path, filename)
scipy.misc.imsave(output_location,image_array.transpose())
data_location = os.path.join(data_dir, 'cifar-10-batches-py')
train_names = ['data_batch_' + str(x) for x in range(1,6)]
test_names = ['test_batch']
# Sort train images
for file in train_names:
print('Saving images from file: {}'.format(file))
file_location = os.path.join(data_dir, 'cifar-10-batches-py', file)
image_dict = load_batch_from_file(file_location)
save_images_from_dict(image_dict, folder=train_folder)
# Sort test images
for file in test_names:
print('Saving images from file: {}'.format(file))
file_location = os.path.join(data_dir, 'cifar-10-batches-py', file)
image_dict = load_batch_from_file(file_location)
save_images_from_dict(image_dict, folder=test_folder)
cifar_labels_file = os.path.join(data_dir,'cifar10_labels.txt')
print('Writing labels file, {}'.format(cifar_labels_file))
with open(cifar_labels_file, 'w') as labels_file:
for item in objects:
labels_file.write("{}\n".format(item))
|
[
"187280967@qq.com"
] |
187280967@qq.com
|
f57e92ca341a4ef719218a9cc8d1a392d5f8ed20
|
f14f48e50efb50cfe7078c68f0d61015ae2d646b
|
/Stock/Select/Ui/Other/DyStockSelectTestedStocksDlg.py
|
2b6d429fd49050e93adc805172ad3f4f682f0fbe
|
[
"MIT"
] |
permissive
|
stockcode/DevilYuan
|
17a23da68954714cacae29f428c3005444e0e3a2
|
163d06cb7fd30a8f24b3f2e06206c1fd024353c3
|
refs/heads/master
| 2020-05-03T14:40:08.420822
| 2019-03-29T13:16:42
| 2019-03-29T13:16:42
| 178,683,886
| 2
| 1
|
MIT
| 2019-03-31T12:17:49
| 2019-03-31T12:17:49
| null |
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
from datetime import *
import os
import re
from PyQt5.QtWidgets import QDialog, QGridLayout, QLabel, QTextEdit, QPushButton, QApplication
from DyCommon.DyCommon import *
class DyStockSelectTestedStocksDlg(QDialog):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
self._init()
self._initUi()
def _init(self):
path = DyCommon.createPath('Stock/User/Config/Testing')
self._file = os.path.join(path, 'DyStockSelectTestedStocks.dy')
def _read(self):
if os.path.exists(self._file):
with open(self._file) as f:
codes = f.read()
else:
codes = ""
return codes
def _save(self):
with open(self._file, 'w') as f:
f.write(self._codesTextEdit.toPlainText())
def _initUi(self):
self.setWindowTitle('要调试的股票')
# 控件
descriptionLabel = QLabel('要调试的股票代码')
self._codesTextEdit = QTextEdit()
self._codesTextEdit.setPlainText(self._read())
cancelPushButton = QPushButton('Cancel')
okPushButton = QPushButton('OK')
cancelPushButton.clicked.connect(self._cancel)
okPushButton.clicked.connect(self._ok)
# 布局
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(descriptionLabel, 0, 0)
grid.addWidget(self._codesTextEdit, 1, 0, 20, 10)
grid.addWidget(okPushButton, 1, 11)
grid.addWidget(cancelPushButton, 2, 11)
self.setLayout(grid)
self.resize(QApplication.desktop().size().width()//3, QApplication.desktop().size().height()//2)
def _ok(self):
# save
self._save()
# set out data
codes = re.split(',|\n| ', self._codesTextEdit.toPlainText())
temp = []
for x in codes:
if x and x not in temp: temp.append(x)
codes = [x + '.SH' if x[0] in ['6', '5'] else x + '.SZ' for x in temp]
self._data['codes'] = codes
self.accept()
def _cancel(self):
self.reject()
|
[
"louis_chu@163.com"
] |
louis_chu@163.com
|
8b7a7bf8e469f8575e9b8af31593301da9b21c06
|
e57af4a840d1126e22363bd9611a40fe18093c92
|
/wrappers/s2i/python/test/router-template-app/MyRouter.py
|
10aa8ee91177f1fa9145641815baeec07abb43aa
|
[
"Apache-2.0"
] |
permissive
|
holdenk/seldon-core
|
ca05e12c8e568700487f9b9e4b79a900200bd03c
|
de08406883850e5566b4e08af7d6484c1960bdd6
|
refs/heads/master
| 2020-03-07T12:47:07.530623
| 2018-03-30T10:08:42
| 2018-03-30T10:08:42
| 127,485,585
| 1
| 0
|
Apache-2.0
| 2018-03-31T00:37:40
| 2018-03-31T00:37:40
| null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
class MyRouter(object):
"""
Router template.
"""
def __init__(self):
"""
Add any initialization parameters. These will be passed at runtime from the graph definition parameters defined in your seldondeployment kubernetes resource manifest.
"""
print("Initializing")
def route(self,features,feature_names):
"""
Route a request.
Parameters
----------
features : array-like
feature_names : array of feature names (optional)
"""
return 0
def send_feedback(self,features,feature_names,routing,reward,truth):
"""
Handle feedback for your routings. Optional.
"""
print("Received feedback")
|
[
"cc@seldon.io"
] |
cc@seldon.io
|
bbf67b6d8b12d59dd77937c8c5df126c1dd64d82
|
ba198225eceb33df214400c8a19a46292c051fe2
|
/conf/settings/settings_production.py
|
3e2f6aff61ff45d7a83f5a749ef78ecc26392a48
|
[] |
no_license
|
puntonim/inspire-read-api
|
64e39261b41c6610f8775645eab18252bbeeb6c2
|
876a053ae5ad2642911bab5387ed6139bc9b09ec
|
refs/heads/master
| 2020-07-11T12:36:41.358661
| 2019-04-04T12:22:38
| 2019-08-26T19:07:21
| 204,540,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
"""
Production settings.
"""
from .settings_base import *
DEBUG = False
ALLOWED_HOSTS = ['mysite.com']
SECRET_KEY = 'mysecretkey'
ORCID_TOKENS_ENCRYPTION_KEY = 'mykey'
|
[
"puntonim@gmail.com"
] |
puntonim@gmail.com
|
a5af53f0ae3c8b6d8628a1c09137e61e88a8ea9d
|
156b77dc620d47fa76baf9361b4ccac04a7f7995
|
/FSND/projects/02_trivia_api/starter/backend/test_flaskr.py
|
5adf46e7e178d309d8bf8069f3ee65a3d2663468
|
[] |
no_license
|
zyfsju/fullstack-study
|
cc43b18a47bdf277be3828d351624063d965723a
|
1172d62689e8edf60636d548cfc89c5168296d9c
|
refs/heads/master
| 2023-03-25T03:03:04.021634
| 2021-03-22T17:56:04
| 2021-03-22T17:56:04
| 350,438,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,503
|
py
|
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres://{}/{}".format(
"localhost:5432", self.database_name
)
self.new_question = {
"question": "How far is the mooon away from the earth?",
"answer": "238,900 mi",
"category": 3,
"difficulty": 4,
}
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories(self):
res = self.client().get("/categories")
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(len(data["categories"]))
def test_get_questions_success(self):
res = self.client().get("/questions?page=1")
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(len(data["questions"]))
def test_get_questions_404(self):
res = self.client().get("/questions?page=30")
self.assertEqual(res.status_code, 404)
def test_delete_question_success(self):
res = self.client().delete("/questions/9")
self.assertEqual(res.status_code, 200)
def test_delete_question_404(self):
res = self.client().delete("/questions/100")
self.assertEqual(res.status_code, 404)
def test_post_question_search(self):
res = self.client().post("/questions", json={"searchTerm": "box"})
self.assertEqual(res.status_code, 200)
def test_post_question_insert(self):
res = self.client().post("/questions", json=self.new_question)
self.assertEqual(res.status_code, 200)
def test_post_question_422(self):
res = self.client().post("/questions", json={"test": True})
self.assertEqual(res.status_code, 422)
def test_post_question_400(self):
res = self.client().post("/questions", json={})
self.assertEqual(res.status_code, 400)
def test_get_questions_by_category(self):
res = self.client().get("/categories/2/questions")
self.assertEqual(res.status_code, 200)
def test_get_questions_by_category_404(self):
res = self.client().get("/categories/tt/questions")
self.assertEqual(res.status_code, 404)
def test_get_quiz_question(self):
res = self.client().post(
"/quizzes",
json={
"previous_questions": [],
"quiz_category": {"type": "History", "id": "4"},
},
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue("question" in data.keys())
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
[
"yifan.zhou@aptiv.com"
] |
yifan.zhou@aptiv.com
|
5d2db857d942cc972b50c1533898446b0879cf70
|
53dee1a74618e5ac343c5abe0a7a46fdf287e51e
|
/modules/callbacks.py
|
24e779a197aeef08e887e7541a2d795fa423a938
|
[] |
no_license
|
SpencerRaw/GNCA
|
250e8bc0351d121ba4c752ad07d02975f9e15e17
|
f82f24925acb26c141ed02b0b0fe23ebb0878450
|
refs/heads/master
| 2023-08-29T10:27:08.590713
| 2021-11-08T07:52:12
| 2021-11-08T07:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import numpy as np
from tensorflow.keras.callbacks import Callback
from boids.evaluate_boids import evaluate_complexity
from boids.forward import forward
class ComplexityCallback(Callback):
def __init__(
self, test_every=10, n_trajectories=1, trajectory_len=1000, n_boids=100
):
super().__init__()
self.test_every = test_every
self.n_trajectories = n_trajectories
self.trajectory_len = trajectory_len
self.n_boids = n_boids
self.complexities = []
def on_epoch_begin(self, epoch, logs=None):
if self.test_every > 0 and epoch == 0:
self.evaluate_complexity()
def on_epoch_end(self, epoch, logs=None):
if self.test_every > 0 and epoch > 0 and epoch % self.test_every == 0:
self.evaluate_complexity()
def on_train_end(self, logs=None):
if self.test_every > 0:
self.complexities = np.array(self.complexities)
np.savez("complexities.npz", complexities=self.complexities)
def evaluate_complexity(self):
out = evaluate_complexity(
self.model,
forward,
self.n_trajectories,
self.trajectory_len,
self.n_boids,
)
self.complexities.append(out)
|
[
"daniele.grattarola@gmail.com"
] |
daniele.grattarola@gmail.com
|
bc041e4c5efd26ad99e6caa4c17e001f6f83401a
|
b5daf9d5525971be607e87e140c6e7575ac4a9e2
|
/service-mgmt-api/sm-api/sm_api/openstack/common/cliutils.py
|
4e37ff8fcfa5fda508427ff57b265d4cf6653e80
|
[
"Apache-2.0"
] |
permissive
|
starlingx/ha
|
9befeac4e934a22fdeadfc35163feb37967189e0
|
e35510e1cc54e83a158af9a5da3fb75ed4dd8601
|
refs/heads/master
| 2023-08-16T18:36:29.902446
| 2023-08-09T14:31:50
| 2023-08-10T14:52:02
| 237,515,014
| 3
| 3
|
Apache-2.0
| 2021-01-14T02:49:45
| 2020-01-31T20:58:31
|
C
|
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
import inspect
class MissingArgs(Exception):
def __init__(self, missing):
self.missing = missing
def __str__(self):
if len(self.missing) == 1:
return "An argument is missing"
else:
return ("%(num)d arguments are missing" %
dict(num=len(self.missing)))
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: An argument is missing
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: 2 arguments are missing
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, 'im_self', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
|
[
"dtroyer@gmail.com"
] |
dtroyer@gmail.com
|
61bffabbd45a33440250f55fb45cacc40dd6a16c
|
19a32440205b2caeec67c73c10d917b5fb30a86a
|
/test/test_cloud_account_create_params.py
|
729711d8628bae2fcbff63c14301d27e5882ccab
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
marrotte/isilon_sdk_python
|
480e84312f5924a506aeb09c9c7cae79a2b9b7f4
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
refs/heads/master
| 2020-03-23T07:31:40.376316
| 2016-06-07T23:44:31
| 2016-06-07T23:44:31
| 141,277,076
| 1
| 0
|
MIT
| 2018-07-17T11:02:08
| 2018-07-17T11:02:08
| null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.cloud_account_create_params import CloudAccountCreateParams
class TestCloudAccountCreateParams(unittest.TestCase):
""" CloudAccountCreateParams unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCloudAccountCreateParams(self):
"""
Test CloudAccountCreateParams
"""
model = swagger_client.models.cloud_account_create_params.CloudAccountCreateParams()
if __name__ == '__main__':
unittest.main()
|
[
"Alex.Pecoraro@isilon.com"
] |
Alex.Pecoraro@isilon.com
|
f747c7fc30189608f2665557c00fa27eb5312c27
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_092/ch68_2020_05_04_19_40_02_713532.py
|
94edce737f60b07ea6de9d86db9b8fbf6ef19a20
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
def separa_trios(x):
if len(x)%3 == 0:
i = 0
L1 = []
L2 = []
while i < len(x):
L1.append(x[i])
L1.append(x[i + 1])
L1.append(x[i + 2])
L2.append(L1)
L1 = []
i += 3
return L2
elif len(x)%3 == 2:
c = 0
L3 = []
L4 = []
while(c < (len(x)-2)):
L3.append(x[c])
L3.append(x[c + 1])
L3.append(x[c + 2])
L4.append(L3)
L3 = []
c += 3
L3.append(x[len(X)-1])
L3.append(x[len(x)])
L4.append(L3)
return L4
else:
j = 0
L5 = []
L6 = []
while(j < (len(x)-1)):
L5.append(x[j])
L5.append(x[j + 1])
L5.append(x[j + 2])
L6.append(L5)
L5 = []
j += 3
L5.append(x[len(x)])
L6.append(L5)
return L6
|
[
"you@example.com"
] |
you@example.com
|
ddff7707ddf45d323fdbddc7a9da8e6465da5e1d
|
491c1e520a64e3ebd5349130f35047aaed1e70ec
|
/contest/maxAscendingSum.py
|
854c5fb405b93f90d906324748c19480c1d1e40d
|
[] |
no_license
|
pangyouzhen/data-structure
|
33a7bd7790c8db3e018114d85a137f5f3d6b92f8
|
cd46cf08a580c418cc40a68bf9b32371fc69a803
|
refs/heads/master
| 2023-05-26T12:02:30.800301
| 2023-05-21T08:07:57
| 2023-05-21T08:07:57
| 189,315,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
class Solution(object):
# 这个题如果改成 子序列呢
def maxAscendingSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0] * len(nums)
dp[0] = nums[0]
for i in range(1, len(nums)):
if nums[i] > nums[i - 1]:
dp[i] = dp[i - 1] + nums[i]
else:
dp[i] = nums[i]
return max(dp)
if __name__ == '__main__':
# nums = [10, 30, 60, 60, 60, 65]
nums = [10, 20, 30, 5, 10, 50]
sol = Solution()
print(sol.maxAscendingSum(nums))
|
[
"pangyouzhen@live.com"
] |
pangyouzhen@live.com
|
b91c102509d6cfc53f4dfb4a2f6fad0aa1418164
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_encouraging.py
|
80132252a0c761c502ceab036d3d7ee9fc288904
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
#calss header
class _ENCOURAGING():
def __init__(self,):
self.name = "ENCOURAGING"
self.definitions = [u'making you feel more confidence or hope: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
66e2b39f73d65ebeac8acb7ee4270d2bf488b601
|
5c37dfaac3a02cc592c154bd9ed1a3fbcf5855cd
|
/python/cron/numpy.py
|
6a5307cbf874d543abfc54dfb176cdaf496672e7
|
[] |
no_license
|
thejohnfreeman/cron
|
5a91884e5ce8448e6f16b4d0823a05cc429ee37f
|
c2dbbb50ea8852e0f947b075e9e2f663a450fdcb
|
refs/heads/master
| 2020-12-29T00:41:24.952795
| 2016-03-30T02:29:48
| 2016-03-30T02:30:47
| 55,026,110
| 0
| 0
| null | 2016-03-30T02:31:40
| 2016-03-30T02:31:40
| null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
"""
Numpy support for Cron.
Numpy support is provided in a submodule so that users who do not need it aren't
forced to import numpy.
After this module is imported, Cron types that can be used as numpy array types
will have a `dtype` attribute. For example:
>>> import numpy as np
>>> from cron import Date
>>> import cron.numpy
>>> array = np.zeroes(8, dtype=Date.dtype)
"""
#-------------------------------------------------------------------------------
import numpy
from .ext import set_up_numpy as _set_up_numpy
# Add all the numpy stuff to the extension module.
_set_up_numpy()
# FIXME: Should we put this all in a submodule?
from .ext import get_day, get_month, get_year, get_ymd, get_ymdi
from .ext import date_from_ymdi
|
[
"alex@alexsamuel.net"
] |
alex@alexsamuel.net
|
1057b2013d6364200744efd5a4918a410b79b9bf
|
03a70d422855fcf2b488c5070c5ef12001143230
|
/virtual/bin/django-admin
|
c98dbb340f1fcdb0df6f4f85e1453a849a12ae31
|
[
"MIT"
] |
permissive
|
bellahOchola/Rater
|
0b5f1004552a8d966bff4092bd242834cd68564e
|
0251fdc6886eb1bcbad82a7aefc61e0ecf6a0738
|
refs/heads/master
| 2022-12-11T01:21:39.542482
| 2020-01-17T00:33:18
| 2020-01-17T00:33:18
| 232,781,927
| 0
| 0
| null | 2022-12-08T03:26:23
| 2020-01-09T10:22:29
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
#!/home/moringa/Desktop/projects/Django/rater/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"bellahkenya@gmail.com"
] |
bellahkenya@gmail.com
|
|
1e30765ae403bdd8a4a0a002c1a6768b0b8fafa0
|
269f18999a158db1a8a736655b84704780439cab
|
/shots/migrations/0009_remove_image_tags.py
|
cf21e8bde8d3c97021b08bf3ed43d7db2113b507
|
[] |
no_license
|
raul-jr3/fantastic-spoon
|
6c6d1a323f4d3ff1dc157c8bfd63e0cd8da41ffd
|
557cb3e85d2c385b080564bdf2571bd09c3b6655
|
refs/heads/master
| 2021-01-02T08:37:20.087398
| 2017-09-14T09:05:13
| 2017-09-14T09:05:13
| 99,033,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shots', '0008_image_tags'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='tags',
),
]
|
[
"rahul.srivatsav1995@gmail.com"
] |
rahul.srivatsav1995@gmail.com
|
c964f1ce9a0d56b63720d461b59ad92d73476c3b
|
9192a0bf95b54fcbe76573cc250e590d64328422
|
/ibanking_web/ibanking/views.py
|
5e90f4bacf087dda72646b04d0de445acae9a6d1
|
[] |
no_license
|
ihfazhillah/explore-scrapy
|
ca07b31ce4c285bef1e1a7e0f5d97da1e3018217
|
74751f687062b47e56a1466836306b84724f094e
|
refs/heads/master
| 2022-12-10T03:00:54.083201
| 2019-02-15T00:41:38
| 2019-02-15T00:41:38
| 166,724,277
| 0
| 0
| null | 2022-12-08T01:37:37
| 2019-01-21T00:22:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
from uuid import uuid4
from django.shortcuts import render
from django.http import JsonResponse
from scrapyd_api import ScrapydAPI
from .models import ScrapyItem
# Create your views here.
scrapyd = ScrapydAPI('http://localhost:6800')
def get_statements(request):
unique_id = str(uuid4())
settings = {
'unique_id': unique_id
}
task = scrapyd.schedule('default', 'ibmandiri', settings=settings, to_crawl='otjlsjflask')
return JsonResponse({
'task_id': task,
'unique_id': unique_id,
'status': 'started',
'url': '/check_job?unique_id={}&task_id={}'.format(unique_id, task)
})
def check_job_get_statements(request):
task_id = request.GET.get('task_id')
unique_id = request.GET.get('unique_id')
status = scrapyd.job_status('default', task_id)
if status == 'finished':
item = ScrapyItem.objects.get(unique_id=unique_id)
return JsonResponse(item.to_dict())
return JsonResponse({'status': status})
|
[
"mihfazhillah@gmail.com"
] |
mihfazhillah@gmail.com
|
b5f279e0b892ad03556863e8883bdf0635bb56f0
|
4b86ebac6e2273bec07e8f0f1275a9f4c4700491
|
/Sort/2947_나무조각.py
|
b14eb7a5c7ba76ac4a95e165b8923bbaf58b19a0
|
[] |
no_license
|
chohan3036/algo_study
|
999d8a9d44b27100009246dcf913e07f36787295
|
64abbc8a401f9e555692f01917eb78b0fd37d7fb
|
refs/heads/master
| 2023-04-07T06:13:16.059638
| 2021-04-20T04:32:35
| 2021-04-20T04:32:35
| 307,859,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
import sys
reading = lambda :sys.stdin.readline().strip()
N = list(map(int, reading().split()))
n = 5
for i in range(n):
for j in range(1, n):
if N[j - 1] > N[j]:
temp = N[j - 1]
N[j - 1] = N[j]
N[j] = temp
for k in N:
print(k, end=' ')
print()
|
[
"chohan3036@gmail.com"
] |
chohan3036@gmail.com
|
2f07383bfc2b9984072cbf5c3ab0ad171fc409ae
|
f92fb9b9abe021d5604d3b5fb2ade0fbe6d85e3e
|
/robot/logging.py
|
30ffb6e8cc9edea7f47e51458319b66961d6af58
|
[
"MIT"
] |
permissive
|
chuchiring/wukong-robot
|
a9f4656db45656962614451ebb170b4ca4573871
|
f31982738df5b5c3929c713415aee04d13f2f4c8
|
refs/heads/master
| 2020-04-23T06:45:10.085706
| 2019-02-16T05:51:26
| 2019-02-16T05:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
import logging
import sys
import os
from robot import config, constants
from logging.handlers import RotatingFileHandler, HTTPHandler
PAGE = 4096
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
def tail(filepath, n=10):
"""
实现 tail -n
"""
res = ""
with open(filepath, 'rb') as f:
f_len = f.seek(0, 2)
rem = f_len % PAGE
page_n = f_len // PAGE
r_len = rem if rem else PAGE
while True:
# 如果读取的页大小>=文件大小,直接读取数据输出
if r_len >= f_len:
f.seek(0)
lines = f.readlines()[::-1]
break
f.seek(-r_len, 2)
# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))
lines = f.readlines()[::-1]
count = len(lines) -1 # 末行可能不完整,减一行,加大读取量
if count >= n: # 如果读取到的行数>=指定行数,则退出循环读取数据
break
else: # 如果读取行数不够,载入更多的页大小读取数据
r_len += PAGE
page_n -= 1
for line in lines[:n][::-1]:
res += line.decode('utf-8')
return res
def getLogger(name):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name)
# StreamHandler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=logging.INFO)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# FileHandler
file_handler = RotatingFileHandler(os.path.join(constants.TEMP_PATH, 'wukong.log'), maxBytes=1024*1024,backupCount=5)
file_handler.setLevel(level=logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def readLog(lines=200):
log_path = os.path.join(constants.TEMP_PATH, 'wukong.log')
if os.path.exists(log_path):
return tail(log_path, lines)
return ''
|
[
"m@hahack.com"
] |
m@hahack.com
|
7dca2733f4c7e5b4178a1a284fd2614650b0b984
|
e522c1606e06cd0a2ea82c57b72aac2e065ad429
|
/programming-team/First Semester/UCB/boxes.py
|
9d349284cd7c37e4a74902d185a11f42dbe71df4
|
[] |
no_license
|
Nate8888/programming-contest-practice
|
80d17d7ccea9491f42ec0cccab7e0cfe80849f17
|
4c2496260c209190103d7bbef82d866ae4be09b6
|
refs/heads/master
| 2023-05-04T16:20:36.136598
| 2021-05-27T22:52:04
| 2021-05-27T22:52:04
| 293,109,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
import sys
def k_min_max_partitions(my_list, size, k):
if k == 1:
return sum(my_list[0:size])
if size == 1:
return my_list[0]
current_min_max = 1e27
for current_index in range(1, size + 1):
current_min_max = min(current_min_max, max(k_min_max_partitions(my_list, current_index, k - 1), sum(my_list[current_index:size])))
#print(current_min_max)
return current_min_max
line = input().rstrip().split(" ")
partitions = int(line[1])
items = int(line[0])
all_nums = input().rstrip().split(" ")
nums = [int(x) for x in all_nums]
print(k_min_max_partitions(nums, items, partitions))
|
[
"wilk.nathan@gmail.com"
] |
wilk.nathan@gmail.com
|
ff3b0522f795040dd1eea2c2af80b0748c4a76eb
|
151e4ab8bdcff37ded920f32250331c1edc1772d
|
/tlmshop/serializers/__init__.py
|
87773f17382c4d432810508bc9149e2b62673837
|
[] |
no_license
|
LegionMarket/django-cms-base
|
79069ee67628ff7d10338b48b154fe087863e1ea
|
1b6fc3423e3d0b2165552cc980432befb496f3e0
|
refs/heads/master
| 2020-12-30T17:10:33.866390
| 2017-05-25T12:49:41
| 2017-05-25T12:49:41
| 91,057,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,797
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.safestring import mark_safe
from rest_framework import serializers
from shop.serializers.bases import ProductSerializer
from shop.search.serializers import ProductSearchSerializer as BaseProductSearchSerializer
from tlmshop.search_indexes import tlmshop_search_index_classes
__all__ = ['ProductSummarySerializer', 'ProductSearchSerializer', 'CatalogSearchSerializer']
class ProductSummarySerializer(ProductSerializer):
media = serializers.SerializerMethodField()
class Meta(ProductSerializer.Meta):
fields = ['id', 'product_name', 'product_url', 'product_model', 'price', 'media', 'caption']
def get_media(self, product):
return self.render_html(product, 'media')
if settings.SHOP_TYPE in ['commodity', 'i18n_commodity']:
class ProductDetailSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
fields = ['product_name', 'slug', 'unit_price', 'product_code']
__all__.append('ProductDetailSerializer')
elif settings.SHOP_TYPE in ['smartcard', 'i18n_smartcard']:
class ProductDetailSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
fields = ['product_name', 'slug', 'unit_price', 'manufacturer', 'card_type', 'speed',
'product_code', 'storage']
__all__.append('ProductDetailSerializer')
elif settings.SHOP_TYPE in ['i18n_polymorphic', 'polymorphic']:
from .polymorphic import (SmartCardSerializer, SmartPhoneSerializer, AddSmartPhoneToCartSerializer)
__all__.extend(['SmartCardSerializer', 'SmartPhoneSerializer', 'AddSmartPhoneToCartSerializer'])
class ProductSearchSerializer(BaseProductSearchSerializer):
"""
Serializer to search over all products in this shop
"""
media = serializers.SerializerMethodField()
class Meta(BaseProductSearchSerializer.Meta):
fields = BaseProductSearchSerializer.Meta.fields + ['media', 'caption']
field_aliases = {'q': 'text'}
search_fields = ['text']
index_classes = tlmshop_search_index_classes
def get_media(self, search_result):
return mark_safe(search_result.search_media)
class CatalogSearchSerializer(BaseProductSearchSerializer):
"""
Serializer to restrict products in the catalog
"""
media = serializers.SerializerMethodField()
class Meta(BaseProductSearchSerializer.Meta):
fields = BaseProductSearchSerializer.Meta.fields + ['media', 'caption']
field_aliases = {'q': 'autocomplete'}
search_fields = ['autocomplete']
index_classes = tlmshop_search_index_classes
def get_media(self, search_result):
return mark_safe(search_result.catalog_media)
|
[
"spivegin@txtsme.com"
] |
spivegin@txtsme.com
|
d0c6e4689bcc224f4fde752a7459a16adbb45cf1
|
2e263bb909bb706957990f23d4d07a33fe031a61
|
/curiosity_debug.py
|
dffbbe8768dbdc709baeb1ed25319c6dda169585
|
[] |
no_license
|
liziniu/SuperMario
|
3e9b776d4c490275b1684b4fd3b3471914811b7d
|
64a33901c61591348dd4b9c878e396901dea27b6
|
refs/heads/master
| 2020-05-16T02:32:10.694180
| 2019-06-02T10:08:11
| 2019-06-02T10:08:11
| 182,632,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,193
|
py
|
import matplotlib
import numpy as np
import tensorflow as tf
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import os
import argparse
import pickle
import json
from baselines.common import set_global_seeds
from curiosity.dynamics import Dynamics
from run import build_env
from scipy.stats import pearsonr
from common.util import DataRecorder
from baselines import logger
class Model:
def __init__(self, sess, env, aux_task, feat_dim, lr):
self.sess = sess or tf.Session()
self.dynamics = Dynamics(sess=self.sess, env=env, auxiliary_task=aux_task, feat_dim=feat_dim,
queue_size=1000, normalize_novelty=True)
self.obs_shape = env.observation_space.shape
self.ac_shape = env.action_space.shape
del env
self.opt = tf.train.RMSPropOptimizer(lr, decay=0.99)
self.aux_loss = self.dynamics.aux_loss
self.dyna_loss = self.dynamics.dyna_loss
self.loss = self.aux_loss + self.dyna_loss
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradsandvars = self.opt.compute_gradients(self.loss, params)
self.train_op = self.opt.apply_gradients(gradsandvars)
self.train_history = []
def train(self, data, rollout_size, online=True, save_path=None):
"""
:param data: list of dict. [{"obs": arr; "next_obs": arr}]
"""
self.recoder = DataRecorder(os.path.join(save_path, "training"))
for episode, episode_data in enumerate(data):
episode_length = len(episode_data["obs"])
obs, act, next_obs, x_pos = episode_data["obs"], episode_data["act"], episode_data["next_obs"], episode_data["x_pos"]
episode_novelty = []
if not online:
ind = np.random.permutation(episode_length)
obs, act, next_obs, x_pos = obs[ind], act[ind], next_obs[ind], x_pos[ind]
for start in range(0, episode_length, rollout_size):
end = start + rollout_size
batch_obs, batch_act, batch_next_obs, batch_x_pos = obs[start:end], act[start:end], next_obs[start:end], x_pos[start:end]
novelty = self.sess.run(self.dynamics.novelty, feed_dict={self.dynamics.obs: obs,
self.dynamics.ac: act,
self.dynamics.next_obs: next_obs})
self.sess.run(self.train_op, feed_dict={self.dynamics.obs: batch_obs, self.dynamics.ac: batch_act,
self.dynamics.next_obs: batch_next_obs})
p = pearsonr(x_pos, novelty)[0]
logger.info("Episode:{}|Epoch:{}|P:{}".format(episode, start//rollout_size, p))
episode_novelty.append(novelty)
self.recoder.store({"x_pos": x_pos, "novelty": novelty, "episode": episode, "epoch": start//rollout_size,
"p": p})
plt.figure()
plt.scatter(x_pos, novelty)
# plt.yscale("log")
plt.savefig(os.path.join(save_path, "{}_{}.png".format(episode, start//rollout_size)))
plt.close()
self.recoder.dump()
def preprocess_data(data):
data_episode = []
for episode_data in data:
tmp = {"obs": [], "act": [], "x_pos": []}
for t in episode_data:
tmp["obs"].append(t["obs"][0])
tmp["act"].append(t["act"][0])
tmp["x_pos"].append(t["info"][0]["x_pos"])
tmp["obs"] = np.asarray(tmp["obs"], dtype=np.float32)
tmp["act"] = np.asarray(tmp["act"], dtype=np.float32)
tmp["x_pos"] = np.asarray(tmp["x_pos"], dtype=np.float32)
tmp["next_obs"] = np.copy(tmp["obs"][1:])
tmp["obs"] = tmp["obs"][:-1]
tmp["act"] = tmp["act"][:-1]
tmp["x_pos"] = tmp["x_pos"][:-1]
data_episode.append(tmp)
return data_episode
def visualize_p(path):
f = open(path, "rb")
data = []
while True:
try:
data.append(pickle.load(f))
except Exception as e:
print(e)
break
p = []
episode = None
for i in range(len(data)):
if episode is None:
episode = data[i]["episode"]
ep = data[i]["episode"]
if ep == episode:
p.append(data[i]["p"])
else:
plt.figure()
plt.plot(p)
plt.savefig(os.path.join(path, "p_{}.png").format(episode))
p = []
p.append(ep)
episode = ep
print("Epoch:{} done".format(ep))
def main(args):
f = open("{}/data.pkl".format(args.load_path), "rb")
data = []
while True:
try:
data.append(pickle.load(f))
except:
break
print("Episode:", len(data))
set_global_seeds(args.seed)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
sess = tf.Session(config=config, )
env = build_env(env_id=args.env, num_env=1, alg="acer", reward_scale=1.0, env_type=args.env_type,
gamestate=None, seed=None, prefix="")
model = Model(
sess=sess,
env=env,
aux_task=args.aux_task,
feat_dim=args.feat_dim,
lr=args.lr
)
sess.run(tf.global_variables_initializer())
save_path = "{}/plots-{}-{}-{}".format(
args.load_path,
args.memo,
args.online,
args.aux_task,
)
logger.configure(dir=save_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, "config.json"), "w") as f:
json.dump(args.__dict__, f, indent=4)
model.train(
preprocess_data(data),
rollout_size=args.rollout_size*args.num_env,
save_path=save_path,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--rollout_size", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--nb_opt", type=int, default=5)
parser.add_argument("--lr", type=float, default=7e-4)
parser.add_argument("--memo", type=str, default="")
parser.add_argument("--online", action="store_true", default=False)
parser.add_argument("--aux_task", type=str, default="RF", choices=["RF", "RND", "IDF"])
parser.add_argument("--feat_dim", type=int, default=512)
parser.add_argument("--dyna_dim", type=int, default=512)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--load_path", type=str, default="logs")
parser.add_argument('--gamestate', default=None)
parser.add_argument("--alg", type=str, default="ppo2")
parser.add_argument("--env_type", type=str, default="atari")
parser.add_argument("--env", type=str)
parser.add_argument("--num_env", type=int, default=1)
parser.add_argument("--reward_scale", type=float, default=1.0, choices=[1.0])
args = parser.parse_args()
main(args)
|
[
"374387855@qq.com"
] |
374387855@qq.com
|
8eafd7e316029d9719d71cf80f9817845735177a
|
eb85c96c3783be407b396956c13448d89f5e5fee
|
/design_patterns_with_python/2-python-design-patterns-m2-exercise-files/Strategy/StrategyVariation/strategy_variation/shipping_cost.py
|
39dd4ddcb717c4d486e40cddd906eba548af071d
|
[] |
no_license
|
ForeverDreamer/python_learning
|
83c2c290271dbf060ee1718140b8dfd128b82b20
|
ff905c4811ddb688f8ee44aed8c4d8067db6168b
|
refs/heads/master
| 2022-04-30T03:23:45.162498
| 2019-07-05T07:55:01
| 2019-07-05T07:55:01
| 181,037,513
| 1
| 0
| null | 2022-04-22T21:12:46
| 2019-04-12T15:41:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
class ShippingCost(object):
def __init__(self, strategy):
self._strategy = strategy
def shipping_cost(self, order):
return self._strategy(order)
|
[
"499361328@qq.com"
] |
499361328@qq.com
|
8390cd4854229dfee6b142cd9d5bbad654885cf3
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_223/ch87_2020_04_29_13_42_03_524040.py
|
51d2d5546b1465b3bf43fb948f7677c962d2a02a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
with open ('churras.txt', 'r') as churrastxt:
linhas = churrastxt.readlines()
print (linhas)
custo = 0
for linha in linhas:
separa = linha.split(',')
for e in separa:
e1=float(e[1])
e2=float(e[2])
custo+=(e1*e2)
print (custo)
|
[
"you@example.com"
] |
you@example.com
|
b4a637358f8fd61fff627c1eb8da57d3effb6445
|
8b942cbd6a0da0a61f68c468956ba318c7f1603d
|
/sortings/0056_merge_intervals.py
|
9ebfcf815fa3d6974a6efdde33f440b51204ad67
|
[
"MIT"
] |
permissive
|
MartinMa28/Algorithms_review
|
080bd608b0e0c6f39c45f28402e5181791af4766
|
3f2297038c00f5a560941360ca702e6868530f34
|
refs/heads/master
| 2022-04-13T03:56:56.932788
| 2020-04-06T03:41:33
| 2020-04-06T03:41:33
| 203,349,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
class Solution:
def merge(self, intervals: list) -> list:
sorted_intervals = sorted(intervals)
idx = 0
while idx < len(sorted_intervals) - 1:
start, end = sorted_intervals[idx]
next_start, next_end = sorted_intervals[idx + 1]
if end >= next_start:
# overlaps with next range
sorted_intervals.pop(idx)
if end < next_end:
sorted_intervals[idx] = (start, next_end)
else:
sorted_intervals[idx] = (start, end)
else:
# if does not overlap, check the next range
idx += 1
return sorted_intervals
if __name__ == "__main__":
solu = Solution()
print(solu.merge([[1,3],[2,6],[8,10],[15,18]]))
|
[
"1135485226@qq.com"
] |
1135485226@qq.com
|
5261b1fa72f0205b52d000597183839bd1f223ff
|
2c7e967b5cb7c911245463ae1cd152b25b5e0b89
|
/steps/google.py
|
54cefc195ca13018e3b6ebd16626e1f29e1f6c1f
|
[
"MIT"
] |
permissive
|
antonckoenig/citeas-api
|
902d725c59dad9292c68f873d3a3512c77ceb06e
|
9a0da10fad95b49363ef43c4d02be1dcb17169d6
|
refs/heads/master
| 2023-07-30T13:08:39.967004
| 2021-09-19T20:08:13
| 2021-09-19T20:08:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
from googlesearch import get_random_user_agent, search
from steps.arxiv import ArxivResponseStep
from steps.bitbucket import BitbucketRepoStep
from steps.cran import CranLibraryStep
from steps.core import Step
from steps.github import GithubRepoStep
from steps.pypi import PypiLibraryStep
from steps.webpage import WebpageStep
class GoogleStep(Step):
step_intro = "Use Google to find the software citation."
step_more = "This project webpage often includes attribution information like an associated DOI, GitHub repository, and/or project title."
@property
def starting_children(self):
return [
ArxivResponseStep,
GithubRepoStep,
BitbucketRepoStep,
CranLibraryStep,
PypiLibraryStep,
WebpageStep,
]
def set_content_url(self, input):
if "http" in input:
return None
self.content_url = self.google_search(input)
def set_content(self, input):
self.content = self.content_url
@staticmethod
def google_search(input):
random_user_agent = get_random_user_agent()
# check if input is PMID
if len(input) == 8 and input.isdigit():
query = input
elif "scipy" in input:
query = "scipy citation"
else:
query = "{} software citation".format(input)
for url in search(query, stop=3, user_agent=random_user_agent):
if "citebay.com" not in url and not url.endswith(".pdf"):
return url
|
[
"caseym@gmail.com"
] |
caseym@gmail.com
|
b7e68212cd709d9a98d2c452268db90ad47392ae
|
871d2a367e45164f21ecdbefe52bf442b563b33c
|
/tests/tests/correctness/EPLAnalytics/Utilities/DataSimulator/sim_cor_030/run.py
|
df3cb6f159e3069ad3abecc6e54e13c8a54fb953
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
SoftwareAG/apama-industry-analytics-kit
|
c0f6c30badf31411a29bc6daa4a7125b76f4e737
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
refs/heads/master
| 2022-02-19T20:47:27.180233
| 2022-02-02T12:58:23
| 2022-02-02T12:58:23
| 185,572,282
| 3
| 2
|
Apache-2.0
| 2022-02-02T12:58:24
| 2019-05-08T09:14:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,894
|
py
|
# $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectDataSimulator(correlator)
self.ready(correlator)
correlator.receive(filename='RawOutputData.evt', channels=['OutputData'])
correlator.receive(filename='OutputDataOnly.evt', channels=['OUTPUT_DATA_ONLY'])
correlator.injectMonitorscript(['test.mon'], self.input)
# Run the simulator for just over 60 seconds so that we get 60 data points generated
correlator.incrementTime(60.1)
self.waitForSignal('OutputDataOnly.evt', expr='Received Data:', condition='>=59', timeout=5)
def validate(self):
# Ensure the test output was correct
exprList=[]
exprList.append('Received Data: 100')
exprList.append('Received Data: 96.66666666666667')
exprList.append('Received Data: 93.33333333333333')
exprList.append('Received Data: 90')
exprList.append('Received Data: 86.66666666666667')
exprList.append('Received Data: 83.33333333333334')
exprList.append('Received Data: 80')
exprList.append('Received Data: 76.66666666666667')
exprList.append('Received Data: 73.33333333333334')
exprList.append('Received Data: 70')
exprList.append('Received Data: 66.66666666666667')
exprList.append('Received Data: 63.33333333333334')
exprList.append('Received Data: 60')
exprList.append('Received Data: 56.66666666666667')
exprList.append('Received Data: 53.33333333333334')
exprList.append('Received Data: 50')
exprList.append('Received Data: 46.66666666666667')
exprList.append('Received Data: 43.33333333333334')
exprList.append('Received Data: 40.00000000000001')
exprList.append('Received Data: 36.66666666666667')
exprList.append('Received Data: 33.33333333333334')
exprList.append('Received Data: 30.00000000000001')
exprList.append('Received Data: 26.66666666666667')
exprList.append('Received Data: 23.33333333333334')
exprList.append('Received Data: 20.00000000000001')
exprList.append('Received Data: 16.66666666666668')
exprList.append('Received Data: 13.33333333333334')
exprList.append('Received Data: 10.00000000000001')
exprList.append('Received Data: 6.66666666666668')
exprList.append('Received Data: 3.33333333333334')
exprList.append('Received Data: 100')
self.assertOrderedGrep("OutputDataOnly.evt", exprList=exprList)
self.assertLineCount('OutputDataOnly.evt', expr='Received Data:', condition='>=59')
# Check for invalid data values
self.assertLineCount('OutputDataOnly.evt', expr='INVALID DATA RECEIVED!', condition='==0')
# Ensure the test output was correct
exprList=[]
exprList.append('Validating com.industry.analytics.Analytic\("DataSimulator",\[\],\["OutputData"\],{"simulationType":"sawfalling"}\)')
exprList.append('Analytic DataSimulator started for inputDataNames \[\]')
self.assertOrderedGrep("correlator.out", exprList=exprList)
# Make sure that the we got the right number of analytics created
self.assertLineCount('correlator.out', expr='Validating com.industry.analytics.Analytic', condition='==1')
self.assertLineCount('correlator.out', expr='Analytic DataSimulator started', condition='==1')
# Basic sanity checks
self.checkSanity()
|
[
"Richard.Peach@softwareag.com"
] |
Richard.Peach@softwareag.com
|
6576c4f0d44e7715aa4ad36675fbe3c92075e2db
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02381/s751837364.py
|
bd7cb0f49528872efae67b22b1fb8ff06610b153
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# Standard Deviation
end = 0
while end == 0:
studentAmount = int(input())
if studentAmount == 0:
end += 1
else:
scoreData = [int(i) for i in input().rstrip().split()]
totalScore = 0
for score in scoreData:
totalScore += score
averageScore = totalScore / studentAmount
# print('Average: ' + str(averageScore))
totalSquareDistance = 0
for score in scoreData:
totalSquareDistance += (score - averageScore) ** 2
variance = totalSquareDistance / studentAmount
# print('Variance: ' + str(variance))
standardDeviation = variance ** 0.5
# print('Standard Deviation: ', end = '')
print(str(standardDeviation))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
78a893873241271aad40cdcd58e8cd782cbe62e3
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/hvy_chainer-gan-improvements/chainer-gan-improvements-master/sample.py
|
8326637b1ad23d6efabde2e6c8394804eb7dfa71
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
import argparse
import numpy as np
from chainer import serializers
from models import Generator
import plot
# Resize the MNIST dataset to 32x32 images for convenience
# since the generator will create images with dimensions
# of powers of 2 (doubling upsampling in each deconvolution).
im_shape = (32, 32)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--n-z', type=int, default=10)
parser.add_argument('--n-samples', type=int, default=128)
parser.add_argument('--in-generator-filename', type=str, default='generator.model')
parser.add_argument('--out-filename', type=str, default='sample.png')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
n_z = args.n_z
n_samples = args.n_samples
in_generator_filename = args.in_generator_filename
out_filename = args.out_filename
generator = Generator(n_z, im_shape)
serializers.load_hdf5(in_generator_filename, generator)
zs = np.random.uniform(-1, 1, (n_samples, n_z)).astype(np.float32)
x = generator(zs)
plot.save_ims(out_filename, x.data)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
ecfc3bac0bddf6b0310970297e5c76cc50d20103
|
e33e414418be93aa0fb19c38b82b221ed8826460
|
/intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/tests/test_base.py
|
41e0c22a674faf17892e0df2b8aceaa3fafe3569
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
wisererik/proposals
|
69e4caaf89d7838c14b18328dc261b6c914748bf
|
9db7413983df9341d1796f2acba7202d36f31278
|
refs/heads/master
| 2021-05-03T23:12:39.496346
| 2018-12-22T04:02:46
| 2018-12-22T04:02:46
| 120,399,059
| 0
| 0
| null | 2018-02-06T03:54:13
| 2018-02-06T03:54:13
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
from vsmclient import base
from vsmclient import exceptions
from vsmclient.v1 import vsms
from tests import utils
from tests.v1 import fakes
cs = fakes.FakeClient()
class BaseTest(utils.TestCase):
def test_resource_repr(self):
r = base.Resource(None, dict(foo="bar", baz="spam"))
self.assertEqual(repr(r), "<Resource baz=spam, foo=bar>")
def test_getid(self):
self.assertEqual(base.getid(4), 4)
class TmpObject(object):
id = 4
self.assertEqual(base.getid(TmpObject), 4)
def test_eq(self):
# Two resources of the same type with the same id: equal
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
r2 = base.Resource(None, {'id': 1, 'name': 'hello'})
self.assertEqual(r1, r2)
# Two resoruces of different types: never equal
r1 = base.Resource(None, {'id': 1})
r2 = vsms.Volume(None, {'id': 1})
self.assertNotEqual(r1, r2)
# Two resources with no ID: equal if their info is equal
r1 = base.Resource(None, {'name': 'joe', 'age': 12})
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
self.assertEqual(r1, r2)
def test_findall_invalid_attribute(self):
# Make sure findall with an invalid attribute doesn't cause errors.
# The following should not raise an exception.
cs.vsms.findall(vegetable='carrot')
# However, find() should raise an error
self.assertRaises(exceptions.NotFound,
cs.vsms.find,
vegetable='carrot')
|
[
"huangzhipeng@huawei.com"
] |
huangzhipeng@huawei.com
|
08260b91313501752e2e3f0567b2f9abe58b6278
|
9d1ef7993bf0df9967b1e7a79d5913fbc3e3a7e1
|
/tests/teststatistics.py
|
a34c3440109210c279d13e5d7aa17a063019754c
|
[
"BSD-2-Clause"
] |
permissive
|
mitmedialab/WhatWeWatch-Analysis
|
f6f4fbd8fba4ef6a58f4961c7f3d9b9519dae3a4
|
cc01dee4e77155c8aec7638e4275172053db3247
|
refs/heads/master
| 2021-05-28T05:40:36.678808
| 2014-11-03T01:22:26
| 2014-11-03T01:22:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
import inspect
import os
import sys
import unittest
import numpy as np
import numpy.testing as nptest
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import statistics
import stubs
import util
class StatisticsTest(unittest.TestCase):
def setUp(self):
self.video_data = util.VideoData(stubs.raw)
self.spread_span = statistics.SpreadSpan(self.video_data)
def test_span(self):
self.assertEqual(self.spread_span.span_by_vid, stubs.span_by_vid)
def test_spread(self):
self.assertEqual(self.spread_span.spread_by_vid, stubs.spread_by_vid)
if __name__ == '__main__':
unittest.main()
|
[
"ed@elplatt.com"
] |
ed@elplatt.com
|
3eca6fa93c5db8360edf1d7504cead97383393d2
|
5a8214b3a452c574e6c883bf5d90ba58ba87c461
|
/leetcode/549.py
|
bdd217ec4dc7b3604cda9930f55824e25cf201fb
|
[] |
no_license
|
phlalx/algorithms
|
69a3c8519687816e3c6333ec12b40659d3e3167f
|
f4da5a5dbda640b9bcbe14cb60a72c422b5d6240
|
refs/heads/master
| 2023-02-03T10:30:30.181735
| 2020-12-26T09:47:38
| 2020-12-26T09:47:38
| 129,254,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
#TAG tree
# yet another tree problem
# do it on paper first to see if we use synth/inherited attributed and how
# to deal with None
#
# If we don't recurse to None, don't forget to deal with corner case where
# root = None.
#
# In this example, we don't recurse through None because we need
# node.left, node.right value
#
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
res = float('-inf')
def f(node) -> Tuple[int, int]: # longest increasing sequence starting/end
nonlocal res
v = node.val
linc, rinc, ldec, rdec = (1,) * 4
if node.left is not None:
linc, ldec = f(node.left)
if v == node.left.val - 1:
linc += 1
else:
linc = 1
if v == node.left.val + 1:
ldec += 1
else:
ldec = 1
if node.right is not None:
rinc, rdec = f(node.right)
if v == node.right.val - 1:
rinc += 1
else:
rinc = 1
if v == node.right.val + 1:
rdec += 1
else:
rdec = 1
res = max(res, linc + rdec - 1, rinc + ldec - 1)
return max(linc, rinc), max(ldec, rdec)
if root is None:
return 0
f(root)
return res
|
[
"phlalx@users.noreply.github.com"
] |
phlalx@users.noreply.github.com
|
6e7e9e7be12b4e755ac17174ef8c25c82594321e
|
3293dc42e15e956be202e39db196eed9912dcc01
|
/estimation/causal_inference/I_and_R_treatment effect evaluation/stratified_randomized_experiments/fep_stats.py
|
5f2e7f9659245edb8a169bbd0e9f13888824119d
|
[] |
no_license
|
bthowe/data_science
|
c372e5364f24dc29e3de1fca3504211cb93b62fb
|
63291df8084e5f62f9ba226e87db2242bb31ac94
|
refs/heads/master
| 2021-11-24T10:49:00.800890
| 2021-11-02T16:10:16
| 2021-11-02T16:10:16
| 106,839,857
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
import sys
import numpy as np
import pandas as pd
pd.set_option('max_columns', 1000)
pd.set_option('max_info_columns', 1000)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 30000)
pd.set_option('max_colwidth', 4000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
def T_diff_lambda(y_t, y_c, lam='RSS'):
Y_t = y_t['y'].groupby(y_t['stratum']).mean()
N_t = y_t['y'].groupby(y_t['stratum']).count()
Y_c = y_c['y'].groupby(y_c['stratum']).mean()
N_c = y_c['y'].groupby(y_c['stratum']).count()
if lam == 'RSS':
l = (N_t + N_c) / (N_t.sum() + N_c.sum())
return (l * (Y_t - Y_c)).sum()
elif lam == 'OPT':
l = ((N_t + N_c) * (N_t / (N_t + N_c)) * (N_c / (N_t + N_c))) / ((N_t + N_c) * (N_t / (N_t + N_c)) * (N_c / (N_t + N_c))).sum()
return (l * (Y_t - Y_c)).sum()
def T_rank_stratum(y):
y['rank'] = y['y'].groupby(y['stratum']).rank()
y['norm'] = (y['y'].groupby(y['stratum']).transform('count') + 1) / 2
y['normalized_rank'] = y['rank'] - y['norm']
return np.abs(y.query('treatment == 1')['normalized_rank'].mean() - y.query('treatment == 0')['normalized_rank'].mean())
def T_range(y):
y_t = y.query('treatment == 1')
y_c = y.query('treatment == 0')
Y_t = y_t['y'].groupby(y_t['stratum']).max() - y_t['y'].groupby(y_t['stratum']).min()
Y_c = y_c['y'].groupby(y_c['stratum']).max() - y_c['y'].groupby(y_c['stratum']).min()
N_t = y_t['y'].groupby(y_t['stratum']).count()
N_c = y_c['y'].groupby(y_c['stratum']).count()
l = (N_t + N_c) / (N_t.sum() + N_c.sum())
return (l * (Y_t - Y_c)).sum()
if __name__ == '__main__':
np.random.seed(seed=2)
N = 10
J = 3
df = pd.DataFrame(np.random.uniform(-1, 1, size=(N, 3)), columns=['one', 'two', 'three'])
df['stratum'] = np.random.choice(range(J), size=(N, 1))
df['treatment'] = np.random.choice([0, 1], size=(N, 1))
df['y'] = np.random.uniform(0, 1, size=(N, 1))
print(
T_diff_lambda(
df.query('treatment == 1')[['y', 'stratum']],
df.query('treatment == 0')[['y', 'stratum']],
lam='OPT'
# lam='RSS'
)
)
print(
T_rank_stratum(
df[['y', 'stratum', 'treatment']]
)
)
print(
T_range(
df[['y', 'stratum', 'treatment']]
)
)
|
[
"b.travis.howe@gmail.com"
] |
b.travis.howe@gmail.com
|
64165e3ab97aeeeb15eedc35d813c6e5e60e29c1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03086/s435473021.py
|
48cf74090e9644bcb577dece1ddb37330a8fc1a4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
ans = 0
c = 1
li = ["A","C","G","T"]
s = input()
lens = len(s)
for i in range(lens):
for j in range(lens-(i)):
a = s[j:j+i+1]
for k in range(i+1):
if a[k] not in li:
c = 1
break
else:
c = 0
continue
if c == 0:
ans = i+1
else:
c = 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b334814b90ea86220c975382e7dfc5b5e03db363
|
281c0694b9c6e394a0329a34d1b9ec564811fd3a
|
/test/test_errors.py
|
d6dabec0f52bb01937c19c6e0a1e1f051da688a5
|
[
"Apache-2.0"
] |
permissive
|
fridex/json2sql
|
35448d1011a53c55641ef928cbcdc2c40d55fb65
|
a0851dd79827a684319b03fb899e129f81ff2d3a
|
refs/heads/master
| 2021-01-01T19:09:30.394871
| 2018-04-01T12:12:43
| 2018-04-01T12:12:43
| 98,528,002
| 0
| 0
|
Apache-2.0
| 2018-04-01T12:20:36
| 2017-07-27T11:26:43
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/env python3
# ######################################################################
# Copyright (C) 2017 Fridolin Pokorny, fridolin.pokorny@gmail.com
# This file is part of json2sql package.
# ######################################################################
"""Tests for error handling."""
from json2sql import select2sql
from json2sql import ParsingInputError
import pytest
from .base import TestBase
class TestErrors(TestBase):
"""Tests for error handling."""
def test_unknown_subquery_key(self):
wrong_nested_query = {'$filter': {'table': 'BarTable'}, 'wrong_key': 'baz'}
with pytest.raises(ParsingInputError):
select2sql(table='FooTable', where={'something in': wrong_nested_query})
|
[
"fridolin@redhat.com"
] |
fridolin@redhat.com
|
18963044c0b542d8438fa4a3f06fcece9a5724c2
|
19fb0eb26f5a6d2180a323cf242ce00f5e4e1c6d
|
/test/functional/rpc_net.py
|
1edc734577b514756d5995d5ae4866c5961a14bd
|
[
"MIT"
] |
permissive
|
j00v/NestEGG
|
bd4c9555f6473cc655e203531c6ab4d0dc795b61
|
8c507974a5d49f5ffa7000fa8b864a528dcb9c3e
|
refs/heads/master
| 2022-12-03T09:16:14.732378
| 2020-08-12T15:25:31
| 2020-08-12T15:25:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,111
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from test_framework.test_framework import NestEggTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
disconnect_nodes,
p2p_port,
wait_until,
)
class NetTest(NestEggTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
#self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
disconnect_nodes(self.nodes[0], 1)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(True), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(True, ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, True, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
|
[
"shamim.ice.ewu@gmail.com"
] |
shamim.ice.ewu@gmail.com
|
30cb251586599dc04ef74e664fb104b60b42d8e1
|
4a9dada02c749e9e5277fe1e35357d7b2b28ad5c
|
/高艺航2018012950/操作系统实验/作业3 创建多进程.py
|
6c0b31386f1a4c6f1d792b15aa9fb29d96d4ee90
|
[] |
no_license
|
wanghan79/2020_Option_System
|
631cc80f52829390a128a86677de527472470348
|
f37b870614edf7d85320da197d932df2f25a5720
|
refs/heads/master
| 2021-01-09T13:10:05.630685
| 2020-07-10T03:30:39
| 2020-07-10T03:30:39
| 242,312,271
| 13
| 9
| null | 2020-07-04T16:13:11
| 2020-02-22T09:12:56
|
Python
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
"""
Author : Yihang.Gao 高艺航
StuNumber: 2018012950
Purpose : Set up multiprocess by python.
Created : 1/7/2020
"""
from multiprocessing import Process
def setup_pro(i):
print('process',i)
if __name__ == '__main__':
list_pro = []
for i in range(3):
k = Process(target=setup_pro, args=(i+1,))
list_pro.append(k)
list_pro[0].start()
list_pro[1].start()
list_pro[2].start()
|
[
"noreply@github.com"
] |
wanghan79.noreply@github.com
|
ed5eb4fea9abb6d9e7d56595dc9603d9abd22bf4
|
77c641fd0708b279dddbe01f6af32a8531b93185
|
/marketsim/gen/_intrinsic/observable/minmax.py
|
734d5a8a443fbbb0acaf22a6f90bff1435caa3bc
|
[] |
no_license
|
abensrhir/marketsimulator
|
aea286afd2bb2e0c8a547bfa879601aef21c0cd5
|
f9f55c72fb34cdbec42b96737ca20839f26c6299
|
refs/heads/master
| 2020-12-13T20:55:55.795344
| 2014-02-24T22:52:24
| 2014-02-24T22:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
from marketsim import event, _
import fold
class Min_Impl(fold.Last):
def __init__(self):
event.subscribe(self.source, _(self)._update, self)
self.reset()
def reset(self):
import blist
self._levels = blist.sorteddict()
self._x = None
def at(self, t):
p = self._levels.keys()[0] if len(self._levels) > 0 else None
x = self._x
if p is not None:
if x is not None:
return min(p,x)
return p
return x
def _remove(self, x):
self._levels[x] -= 1
if self._levels[x] == 0:
del self._levels[x]
self.fire(self)
def update(self, t, x):
if x is not None and (self._x is None or x < self._x):
if x not in self._levels:
self._levels[x] = 0
self._levels[x] += 1
self._scheduler.scheduleAfter(self.timeframe, _(self, x)._remove)
self._x = x
self.fire(self)
class Max_Impl(fold.Last):
def __init__(self):
event.subscribe(self.source, _(self)._update, self)
self.reset()
def reset(self):
import blist
self._levels = blist.sorteddict()
self._x = None
def at(self, t):
p = -self._levels.keys()[0] if len(self._levels) > 0 else None
x = self._x
if p is not None:
if x is not None:
return max(p,x)
return p
return x
def _remove(self, x):
self._levels[-x] -= 1
if self._levels[-x] == 0:
del self._levels[-x]
self.fire(self)
def update(self, t, x):
if x is not None and (self._x is None or x > self._x):
if -x not in self._levels:
self._levels[-x] = 0
self._levels[-x] += 1
self._scheduler.scheduleAfter(self.timeframe, _(self, x)._remove)
self._x = x
self.fire(self)
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
20427a03af6d5566c36d42e5d8ea4bcbfd11aa93
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2015_02_19_strain_stress_vtk/old_version/Calculate_Stress_from_Strain_ORIGINAL.py
|
46c772ad37f6fbc4da3a37d15695c54b8772e00f
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,842
|
py
|
#
# Calculate_Stress_from_Strain_MKS.py
#
# Written by Matthew Priddy on February 19, 2015
#
from sys import *
from string import *
from math import *
from pylab import *
from random import *
from numpy import *
import itertools
from numpy import tensordot as td
import matplotlib
#matplotlib.use('PDF')
import linecache
import time
from matplotlib import pyplot as plt
from scipy import optimize
def Gmatrix(phi1, phi0, phi2):
g = zeros((3,3))
g[0,0] = ( cos(phi1) * cos(phi2) ) - ( sin(phi1) * sin(phi2) * cos(phi0) )
g[0,1] = ( sin(phi1) * cos(phi2) ) + ( cos(phi1) * sin(phi2) * cos(phi0) )
g[0,2] = ( sin(phi2) * sin(phi0) )
g[1,0] = -( cos(phi1) * sin(phi2) ) - ( sin(phi1) * cos(phi2) * cos(phi0) )
g[1,1] = -( sin(phi1) * sin(phi2) ) + ( cos(phi1) * cos(phi2) * cos(phi0) )
g[1,2] = ( cos(phi2) * sin(phi0) )
g[2,0] = ( sin(phi1) * sin(phi0) )
g[2,1] = -( cos(phi1) * sin(phi0) )
g[2,2] = ( cos(phi0) )
return g
def Bmatrix(p00, p11, p22, p01, p02, p12):
B = zeros((3,3))
B[0,0] = p00
B[0,1] = p01
B[0,2] = p02
B[1,0] = p01
B[1,1] = p11
B[1,2] = p12
B[2,0] = p02
B[2,1] = p12
B[2,2] = p22
return B
def Cijkl_dot_dot_Skl(C,S):
# Vij = Cijkl*Skl
# Technically written as Vij = Cijkl*Slk, but Skl is symmetric in this work
value = zeros((3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
value[i,j] = value[i,j] + C[i,j,k,l] * S[k,l]
return value
def calc_Cijkl_from_Cij(Cij):
## ij or kl: 11 22 33 23 31 12 32 13 21
## m or n : 1 2 3 4 5 6 7 8 9
# Theory of dislocations, pg. 34.
Cijkl = zeros((3,3,3,3))
ia = 0
ib = 0
for i in range(3):
for j in range(3):
ia = i
if (i != j):
ia = 6-i-j
for k in range(3):
for l in range(3):
ib = k
if (k != l):
ib = 6-k-l
Cijkl[i,j,k,l] = Cij[ia,ib]
return Cijkl
elements = 21*21*21
# (1c) Extract the material constants from the .inp files
# This should be automated, but for now we can hard code the input parameters
C11 = 172832.50
C12 = 97910.060
C13 = 73432.550
C33 = 192308.10
C44 = 49700.000
C66 = 0.5 * (C11 - C12)
shear_mod = (C44 * C66) ** 0.5
# For HCP crystal structures (e.g. Titanium)
Cij = zeros((6,6))
Cij[0,0] = C11; Cij[1,1] = C11
Cij[0,1] = C12; Cij[1,0] = C12
Cij[0,2] = C13; Cij[1,2] = C13; Cij[2,0] = C13; Cij[2,1] = C13
Cij[2,2] = C33
Cij[3,3] = C44; Cij[4,4] = C44
Cij[5,5] = C66
# Determine the 3x3x3x3 Stiffness matrix
Cijkl = calc_Cijkl_from_Cij(Cij)
# (1) Extract various values for use in this code
# (a) Extract the Euler angles for each element
euler_file = open(f6_EulerAngles,'r')
file_contents = euler_file.readlines()
euler = zeros((elements, 3))
for i in range(1+2,elements+3):
data1 = linecache.getline(f6_EulerAngles,i,module_globals=None)
data1 = data1.split()
euler[i-3,0] = float(data1[1])
euler[i-3,1] = float(data1[2])
euler[i-3,2] = float(data1[3])
# Total Strain
NumCycles = 3
strn_t00_el = zeros((elements, 2*NumCycles + 1)); strn_t11_el = zeros((elements, 2*NumCycles + 1)); strn_t22_el = zeros((elements, 2*NumCycles + 1))
for i in range(0,elements):
R = Gmatrix(euler[i,0], euler[i,1], euler[i,2]).T
for count in range(0,2 * NumCycles):
strain_0 = Bmatrix(strn_t00_el[i,count], strn_t11_el[i,count], strn_t22_el[i,count], strn_t01_el[i,count], strn_t02_el[i,count], strn_t12_el[i,count])
stress_0_from_strain = Cijkl_dot_dot_Skl(rotT(R.T, Cijkl),strain_0)
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
4d6a69101128cf2a501beb4695b10e4e6725b142
|
da130508b04587493424d6c95ab05a55dd70170f
|
/math/0x00-linear_algebra/2-size_me_please.py
|
e49b8b8ce7b567c07f70d7815294e0e636d80882
|
[] |
no_license
|
AndrewKalil/holbertonschool-machine_learning
|
ea38c0d1ef6ce2206da5f3903fcc22730404af9c
|
bb980395b146c9f4e0d4e9766c4a36f67de70d2e
|
refs/heads/master
| 2023-07-09T04:09:24.271069
| 2021-08-11T02:29:54
| 2021-08-11T02:29:54
| 317,371,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
#!/usr/bin/env python3
""" 0x00. Linear Algebra """
def matrix_shape(matrix):
""" calculates shape of a matrix """
if matrix:
shape = [len(matrix)]
while type(matrix[0]) == list:
shape.append(len(matrix[0]))
matrix = matrix[0]
return shape
else:
return [0]
|
[
"1541@holbertonschool.com"
] |
1541@holbertonschool.com
|
d4088c4ab77b083f972d428a0ce87909942c2d89
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02399/s168551365.py
|
01908c0ef89d62a27267c675c1736b6a0a60212f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
l=raw_input()
k=l.split()
a=0.0
b=0.0
a=int(k[0])
b=int(k[1])
#
print int((a-(a%b))/b),
print int(a%b),
a*=1.0
b*=1.0
if b==1:
print a*1.0
else:
if a/b>0.0000002:
print a/b
else: print "0.00000001.99999982"
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
afb7b1c1f7b6530a07d7f811577e8294e4a176a6
|
9dba8607dce414f9905700d7a4ac44668de5e1f1
|
/Brelaz/rough_calculations/dir1_reinforcement.py
|
3f25e1b2285ceab6ba6be10343ff326ed6f62365
|
[] |
no_license
|
anaiortega/XCmodels
|
c0463ffe38531578aee281456e88528882255cd7
|
e9b8c2f996a21b8aa3314242f3cc12b0e391b5df
|
refs/heads/master
| 2023-08-16T22:44:01.168775
| 2023-08-14T18:15:10
| 2023-08-14T18:15:10
| 141,140,177
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
# -*- coding: utf-8 -*-
from rough_calculations import ng_cantilever
from rough_calculations import ng_simple_bending_reinforcement
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from materials.sia262 import SIA262_limit_state_checking
beam= ng_cantilever.Cantilever()
beam.l= 2.45+0.5
#Loads
Qa= -2*135e3/2.5 #N/m
qa= -0.25*2500*9.81-2.35e3-8.1e3 # N/m2
Qd= 1.5*Qa # N
qd= 1.35*qa # N/m
Md= beam.getBendingMomentUnderUniformLoad(qd,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,0.0)
Ma= beam.getBendingMomentUnderUniformLoad(qa,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qa,2.45-0.2,0.0)
MdMax= min(Md,Ma)
print 'Md= ', Md/1e3, ' kN m/m Ma= ', Ma/1e3, 'kN m/m MdMax= ', MdMax/1e3, ' kN m/m'
Vd= beam.getShearUnderUniformLoad(qd,0.25)+beam.getShearUnderConcentratedLoad(Qd,1.0-0.2,0.25)
Va= beam.getShearUnderUniformLoad(qa,0.25)+beam.getShearUnderConcentratedLoad(Qa,2.45-0.2,0.25)
MVRd= beam.getBendingMomentUnderUniformLoad(qd,0.25)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,0.25)
VdMax= max(Vd,Va)
print 'Vd= ', Vd/1e3, ' kN/m MVRd= ', MVRd/1e3, ' kN m/m Va= ', Va/1e3, 'kN/m VdMax= ', VdMax/1e3, ' kN/m'
#Reinforcement
from materials.sia262 import SIA262_materials
concrete= SIA262_materials.c50_60
reinfSteel= SIA262_materials.B500A
d= 0.25-0.035-20e-3/2.0
As= ng_simple_bending_reinforcement.AsSimpleBending(-MdMax,-concrete.fcd(),reinfSteel.fyd(),1.0,d)
print 'As= ', As*1e6, ' mm2'
VRd= SIA262_limit_state_checking.VuNoShearRebars(concrete,reinfSteel,0.0,-MVRd,As,2.5/2.0,d)
print 'VRd= ', VRd/1e3, ' kN VdMax= ', VdMax/1e3, ' kN'
#Reinforcement 2
Md2= beam.getBendingMomentUnderUniformLoad(qd,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,1.2)
Ma2= beam.getBendingMomentUnderUniformLoad(qa,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qa,2.45-0.2,1.2)
MdMax2= min(Md2,Ma2)
print 'Md2= ', Md2/1e3, ' kN m/m Ma2= ', Ma2/1e3, 'kN m/m MdMax2= ', MdMax2/1e3, ' kN m/m'
As2= ng_simple_bending_reinforcement.AsSimpleBending(-MdMax2,-concrete.fcd(),reinfSteel.fyd(),1.0,d)
print 'As2= ', As2*1e6, ' mm2'
#Fatigue
Mf= beam.getBendingMomentUnderConcentratedLoad(Qa,0.5,0.0)
print 'Mf= ', Mf/1e3, ' kN m/m'
|
[
"ana.Ortega.Ort@gmail.com"
] |
ana.Ortega.Ort@gmail.com
|
43a3de2ecfead616819fe5e028bf38e44a50baa1
|
1b78ca7f3250ebed418717c6ea28b5a77367f1b8
|
/318.maximum-product-of-word-lengths/maximum-product-of-word-lengths.py
|
4bb2a28a8a6aa610056989f8cf03d7720cd87fd1
|
[] |
no_license
|
JaniceLC/lc-all-solutions
|
ced854f31b94f44c0b03a0677988805e3b9ee718
|
3f2a4ee8c09a8890423c6a22c73f470eccf979a2
|
refs/heads/master
| 2020-04-05T19:53:31.307528
| 2018-11-12T04:18:45
| 2018-11-12T04:18:45
| 157,155,285
| 0
| 2
| null | 2018-11-12T04:13:22
| 2018-11-12T04:13:22
| null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
class Solution(object):
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
bitmap = [0] * len(words)
mask = 0x01
ans = 0
for i in xrange(0, len(words)):
word = words[i]
for c in word:
bitmap[i] |= (mask << (ord(c) - ord('a')))
for i in xrange(0, len(words)):
for j in xrange(0, i):
if bitmap[i] & bitmap[j] == 0:
ans = max(ans, len(words[i]) * len(words[j]))
return ans
|
[
"jedihy@yis-macbook-pro.local"
] |
jedihy@yis-macbook-pro.local
|
45ce1099bc5ffdfa946930b8766b76fde1714949
|
3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1
|
/pytools/utilities/python/readonly_shelf.py
|
6e1644dd96a518813865861187fe91557dd97f85
|
[] |
no_license
|
stefie10/slu_hri
|
a76f79094bd1740676fec5d889411ba3b1d9dc26
|
50753379953e1ff822162eeab094cffe4a30f3e1
|
refs/heads/master
| 2022-12-14T01:07:51.522258
| 2020-08-31T00:50:12
| 2020-08-31T00:50:12
| 291,386,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import shelve
def open(filename, writeback=True):
return DbfilenameReadonlyShelf(filename, writeback=writeback)
class DbfilenameReadonlyShelf(shelve.Shelf):
"""
Shelf implementation using the "anydbm" generic dbm interface,
read only. Gets rid of annoying error message on shutdown when it
tries to write back.
"""
def __init__(self, filename, writeback):
import anydbm
shelve.Shelf.__init__(self, anydbm.open(filename, flag='r'), protocol=2, writeback=writeback)
def __del__(self):
self.dict.close()
|
[
"stefie10@alum.mit.edu"
] |
stefie10@alum.mit.edu
|
5d1cb88fa75275344805c8e438afef54358aaf7b
|
0b1b92e30893d4428b0e04342490da8aef121a65
|
/Python/find_peak_element.py
|
162886f43b6c6d0a6de053c773e2447a4969f82c
|
[] |
no_license
|
miaojiang1987/LeetCode
|
280dc892f8adbf8c18f30c180e76b045b3797f8c
|
d8f96b0ec1a85abeef1ce8c0cc409ed501ce088b
|
refs/heads/master
| 2021-06-09T14:46:43.858955
| 2021-05-20T09:06:24
| 2021-05-20T09:06:24
| 155,755,065
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
l,r=0,len(nums)-1
while l<r:
mid=l+(r-l)//2
if nums[mid]>nums[mid+1]:
r=mid
else:
l=mid+1
return l
|
[
"miaojiang1987@gmail.com"
] |
miaojiang1987@gmail.com
|
280ef9746c4ed0ee0513728f4b5d8d3ab3d2d34f
|
1eb382ad4712721f646bf478fea747c928f47177
|
/plot_perturbation_analysis.py
|
a542f264f5da6c4a9dff770b207998ac2e7f6258
|
[
"MIT"
] |
permissive
|
rmodi6/sentence-representations
|
ad8f03c6f4588020c1fcf6a691fc3b83ebae8e0f
|
4124b8705002ce7188a0473c9840fef6befae0b5
|
refs/heads/master
| 2023-04-09T23:10:11.873093
| 2023-03-26T18:55:40
| 2023-03-26T18:55:40
| 215,374,321
| 0
| 1
|
MIT
| 2023-03-26T18:55:41
| 2019-10-15T18:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,430
|
py
|
import os
import json
import copy
# external libs imports
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# project imports
from data import load_vocabulary, index_instances, generate_batches
from util import load_pretrained_model
if __name__ == '__main__':
training_commands = []
choices = {"dan": range(1, 4+1), "gru": range(1, 4+1)}
models = {"dan": None, "gru": None}
vocabs = {"dan": None, "gru": None}
for seq2vec_name, _ in choices.items():
serialization_dir = os.path.join("serialization_dirs", f"main_{seq2vec_name}_5k_with_emb")
vocab_path = os.path.join(serialization_dir, "vocab.txt")
config_path = os.path.join(serialization_dir, "config.json")
weights_path = os.path.join(serialization_dir, "model.ckpt.index")
model_files_present = all([os.path.exists(path)
for path in [vocab_path, config_path, weights_path]])
if not model_files_present:
epochs = 8 if seq2vec_name == "dan" else 4 # gru is slow, use only 4 epochs
training_command = (f"python train.py main "
f"data/imdb_sentiment_train_5k.jsonl "
f"data/imdb_sentiment_dev.jsonl "
f"--seq2vec-choice {seq2vec_name} "
f"--embedding-dim 50 "
f"--num-layers 4 "
f"--num-epochs {epochs} "
f"--suffix-name _{seq2vec_name}_5k_with_emb "
f"--pretrained-embedding-file data/glove.6B.50d.txt ")
training_commands.append(training_command)
continue
model = load_pretrained_model(serialization_dir)
models[seq2vec_name] = model
vocab, _ = load_vocabulary(vocab_path)
vocabs[seq2vec_name] = vocab
if training_commands:
print("\nFirst, please finish the missing model training using the following commands:")
print("\n".join(training_commands))
exit()
original_instance = {"text_tokens": "the film performances were awesome".split()}
updates = ["worst", "okay", "cool"]
updated_instances = []
for update in updates:
updated_instance = copy.deepcopy(original_instance)
updated_instance["text_tokens"][4] = update
updated_instances.append(updated_instance)
all_instances = [original_instance]+updated_instances
layer_representations = {}
for seq2vec_name in choices.keys():
model = models[seq2vec_name]
vocab = vocabs[seq2vec_name]
all_indexed_instances = index_instances(copy.deepcopy(all_instances), vocab)
batches = generate_batches(all_indexed_instances, 4)
layer_representations[seq2vec_name] = model(**batches[0],
training=False)["layer_representations"]
for seq2vec_name, representations in layer_representations.items():
representations = np.asarray(representations)
differences_across_layers = {"worst": [], "okay": [], "cool": []}
for layer_num in choices[seq2vec_name]:
original_representation = representations[0, layer_num-1, :]
updated_representations = representations[1:, layer_num-1,:]
differences = [sum(np.abs(original_representation-updated_representation))
for updated_representation in updated_representations]
differences_across_layers["worst"].append(float(differences[0]))
differences_across_layers["okay"].append(float(differences[1]))
differences_across_layers["cool"].append(float(differences[2]))
# Make the plots
plt.style.use('seaborn-whitegrid')
plt.plot(choices[seq2vec_name], differences_across_layers["worst"], label="worst")
plt.plot(choices[seq2vec_name], differences_across_layers["okay"], label="okay")
plt.plot(choices[seq2vec_name], differences_across_layers["cool"], label="cool")
plt.xlabel("Layer")
plt.ylabel("Perturbation Response")
plt.legend()
title = f"{seq2vec_name}: Perturbation Response vs Layer"
plt.title(title)
plt.savefig(os.path.join("plots", f"perturbation_response_{seq2vec_name}.png"))
plt.clf()
|
[
"modi.ruchit6@gmail.com"
] |
modi.ruchit6@gmail.com
|
b26baf5d67b85a511a6b942d78af2cb10ca51f02
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/weather__yahoo.py
|
6c01d6686eaff44c12a67e3dd687dc0531713f44
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 973
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import requests
city = "Магнитогорск"
# # OR:
# city = 'Magnitogorsk'
url = (
"https://query.yahooapis.com/v1/public/yql?q=select item from weather.forecast where woeid in "
"(select woeid from geo.places(1) where text='{city}') and u='c'"
"&format=json&diagnostics=true".format(city=city)
)
rs = requests.get(url)
item = rs.json()["query"]["results"]["channel"]["item"]
# Если нужна иконка для погоды:
# https://developer.yahoo.com/weather/documentation.html in Condition Codes
# code = condition['code']
#
# Weather image: http://l.yimg.com/a/i/us/we/52/' + code + '.gif
# Example: http://l.yimg.com/a/i/us/we/52/26.gif
#
condition = item["condition"]
print("Current: {temp} °C, {text}".format(**condition))
print()
print("Forecast:")
for forecast in item["forecast"]:
print("{date}: {low} - {high} °C. {day}: {text}".format(**forecast))
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
75769b77e4341042b1aebeac586a2b46403aecd8
|
307939a14bd837b67ebf18b28a99aa2ad9755d18
|
/app_user_login/migrations/0002_users_birthdate.py
|
dcc3e72027f14118d5113f6a0934585363a9068b
|
[] |
no_license
|
raqueloropeza/Django_LoginAndRegistration
|
3c4d37742c09f5f4f442154251cf81ddf159cba6
|
89b63ded8c60a5b21cd3de0f84c1aed662e29d3b
|
refs/heads/master
| 2023-05-07T19:37:00.592985
| 2021-06-03T16:55:30
| 2021-06-03T16:55:30
| 373,557,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Generated by Django 2.2.4 on 2021-02-25 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_user_login', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='users',
name='birthdate',
field=models.DateField(default=1976),
preserve_default=False,
),
]
|
[
"rockuzaki@gmail.com"
] |
rockuzaki@gmail.com
|
611d4762679b0fa7bc528a0bf1515549c0bb6062
|
1b48dcc9b7a4dc7debff78e65d55617d04b74495
|
/chapter10_multThreading/thread_condition.py
|
aa54f3e62df175f4a546fb383c91a2b5a0544aeb
|
[] |
no_license
|
beenlyons/python_supreme
|
15ad7baabfab57e55ea1521b173e507420a18b9e
|
3279aa8db52c7c8a1295f24a39d228df7f77ce43
|
refs/heads/master
| 2020-03-21T17:30:37.150525
| 2018-07-24T03:54:46
| 2018-07-24T03:54:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
from threading import Condition, Lock
import threading
# 条件变量, 用于复杂的线程间同步
# class XiaoAi(threading.Thread):
# def __init__(self, lock):
# self.lock = lock
# super().__init__(name="xiaoAi")
# def run(self):
# self.lock.acquire()
# print("{} : 在".format(self.name))
# self.lock.release()
#
# self.lock.acquire()
# print("{} : 好啊".format(self.name))
# self.lock.release()
#
# class TianMao(threading.Thread):
# def __init__(self, lock):
# self.lock = lock
# super().__init__(name="tianMao")
# def run(self):
# self.lock.acquire()
# print("{} : 小爱同学".format(self.name))
# self.lock.release()
#
# self.lock.acquire()
# print("{} : 我们来对古诗吧".format(self.name))
# self.lock.release()
class XiaoAi(threading.Thread):
def __init__(self, cond):
self.cond = cond
super().__init__(name="xiaoAi")
def run(self):
with self.cond:
self.cond.wait()
print("{} : 在".format(self.name))
self.cond.notify()
print("{} : 好啊".format(self.name))
class TianMao(threading.Thread):
def __init__(self, cond):
self.cond = cond
super().__init__(name="tianMao")
def run(self):
with self.cond:
print("{} : 小爱同学".format(self.name))
self.cond.notify()
self.cond.wait()
print("{} : 我们来对古诗吧".format(self.name))
self.cond.release()
if __name__ == '__main__':
cond = Condition()
xiaoai = XiaoAi(cond)
tianmao = TianMao(cond)
xiaoai.start()
tianmao.start()
# 启动顺序很重要, wait要先启动
# wait和notify一定要在with condition之后才能调用
# condition有两把锁, 底层锁会在wait的时候释放,上面的锁,会在每次调用wait时候分配一把
# 并放入到condition的等待队列中,等待notify方法的唤醒
|
[
"1335682725@qq.com"
] |
1335682725@qq.com
|
40c671b4b964f8a4f970e54ae2518d818bd8c8bd
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/CodeChef/HAND01.py
|
6bd4e91e18b0489271a1a5f90c80e17e34ed505e
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719
| 2020-08-08T06:54:14
| 2020-08-08T06:54:14
| 138,744,302
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
# Credits: Sir Isaac Newton is standing over a circle of radius 0.5641895835477563.
# Input
# The first line of the input contains an integer T denoting the number of test cases. The next T lines contain a string containing numbers and one or more special charters (?, !, %, $)
# Output
# For each test case, output a single line containing a single real number.
# Constraints
# Should contain all the constraints on the input data that you may have. Format it like:
# 1 ≤ T ≤ 100
# Example
# Input:
# 2
# 3!2?1
# 5%2$2
# Output:
# 5
# 6.0
for t in range (int(input())):
str1=input()
a=len(str1)
b=""
for i in range (a):
if(str1[i]=='!'):
b=b+'*'
elif(str1[i]=='?'):
b=b+'-'
elif(str1[i]=='%'):
b=b+'+'
elif(str1[i]=='$'):
b=b+'/'
else:
b=b+str1[i]
print(eval(b))
|
[
"rastogiritvik99@gmail.com"
] |
rastogiritvik99@gmail.com
|
2e4f86b82f95b90c80d1725615e2996fdb2cb164
|
9c3765dba0b249eb0a8da92076d2ae01291fc0e7
|
/not_done/py_not_started/euler_392.py
|
498d71d4902d7af330f192be8c5b3f979bc95492
|
[] |
no_license
|
saetar/pyEuler
|
3a021f95a1856775bef87b38c753049b04282b80
|
f0af7092e16c2109028b4b1aa5bed7a0057d3fe9
|
refs/heads/master
| 2020-03-21T12:05:15.430454
| 2018-06-15T03:50:50
| 2018-06-15T03:50:50
| 138,535,115
| 0
| 0
| null | 2018-06-25T02:40:43
| 2018-06-25T02:40:42
| null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~ Jesse Rubin ~ project Euler ~
"""
Enmeshed unit circle
http://projecteuler.net/problem=392
A rectilinear grid is an orthogonal grid where the spacing between the gridlines does not have to be equidistant.
An example of such grid is logarithmic graph paper.
Consider rectilinear grids in the Cartesian coordinate system with the following properties:The gridlines are parallel to the axes of the Cartesian coordinate system.There are N+2 vertical and N+2 horizontal gridlines. Hence there are (N+1) x (N+1) rectangular cells.The equations of the two outer vertical gridlines are x = -1 and x = 1.The equations of the two outer horizontal gridlines are y = -1 and y = 1.The grid cells are colored red if they overlap with the unit circle, black otherwise.For this problem we would like you to find the positions of the remaining N inner horizontal and N inner vertical gridlines so that the area occupied by the red cells is minimized.
E.g. here is a picture of the solution for N = 10:
The area occupied by the red cells for N = 10 rounded to 10 digits behind the decimal point is 3.3469640797.
Find the positions for N = 400.
Give as your answer the area occupied by the red cells rounded to 10 digits behind the decimal point.
"""
def p392():
pass
if __name__ == '__main__':
p392()
|
[
"jessekrubin@gmail.com"
] |
jessekrubin@gmail.com
|
8e559598071a54f18e8b5b49476b09087984d51e
|
dfe4d9f6b09109b22e76069b0f6e56993165e91d
|
/camp-python-2021-find-me-develop/apps/users/apps.py
|
00812dc9a1174718ab09f30466d4af9874f10952
|
[] |
no_license
|
rhanmar/oi_projects_summer_2021
|
ef6b06e003a1c54583df4b006d141df924adeafb
|
0879ade24685b628624dce06698f8a0afd042000
|
refs/heads/main
| 2023-08-25T04:02:46.284756
| 2021-09-17T03:29:16
| 2021-09-17T03:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
from watson import search as watson
class UsersAppConfig(AppConfig):
"""Default configuration for Users app."""
name = "apps.users"
verbose_name = _("Users")
def ready(self):
# pylint: disable=unused-import,invalid-name
from .api.auth import scheme # noqa
from .signals import check_avatar, copy_default_avatar_to_media # noqa
User = self.get_model("User")
watson.register(
User,
fields=("first_name", "last_name", "email"),
)
|
[
"dimontura@yandex.ru"
] |
dimontura@yandex.ru
|
85a51ad7dc8750ab6d9a66bec400b2433c202821
|
368be25e37bafa8cc795f7c9f34e4585e017091f
|
/.history/app_fav_books/views_20201114172332.py
|
ae26a428bcd7b78a9862ed83d5fbd6fd90c986e0
|
[] |
no_license
|
steven-halla/fav_books_proj
|
ebcfbfda0e7f3cdc49d592c86c633b1d331da513
|
512005deb84ac906c9f24d4ab0939bd0db096716
|
refs/heads/master
| 2023-03-30T09:37:38.016063
| 2021-04-02T20:27:22
| 2021-04-02T20:27:22
| 354,125,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_New_User(request):
errors = User.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/success')
def books_info(request):
user = User.objects.get(id=request.session['user_id'])
context = {
"user":user
}
return render(request, "books_user_page.html", context)
def log_user_out(request):
request.session.clear()
return redirect("/")
def log_in(request):
email_from_post = request.POST['email']
password_from_post = request.POST['password']
user = User.objects.filter(email=email_from_post)
if len(user) > 0:
logged_user = user[0]
print(logged_user.first_name)
print(logged_user.password)
request.session['user_id'] = logged_user.id
return redirect("/register/success")
else:
messages.error(request, "email/password does not exist")
return redirect("register/success")
def add_new_book(request):
errors = Books.objects.basic_validator(request.POST)
|
[
"69405488+steven-halla@users.noreply.github.com"
] |
69405488+steven-halla@users.noreply.github.com
|
47e588e7c429cf9a9c1d10db3d2ef25f983ed712
|
2850d9adba96bc4e73185de5d6adebf363a5c534
|
/tce/tcloud/cvm/AssociateSecurityGroups.py
|
11d8e5ca52a09b8e0e312396bb00ee9dfb7bb036
|
[
"Apache-2.0"
] |
permissive
|
FatAnker/tencentcloud-sdk-python
|
d8f757b12ad336e78a06b68a789ecc3c86d1d331
|
d6f75a41dc7053cb51f9091f4d41b8cb7a837559
|
refs/heads/master
| 2020-04-30T22:34:16.740484
| 2019-04-28T11:14:11
| 2019-04-28T11:14:11
| 177,122,691
| 0
| 1
| null | 2019-03-22T10:46:01
| 2019-03-22T10:46:01
| null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
# -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
# 导入可选配置类
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
from tce.tcloud.utils.config import global_config
ssl._create_default_https_context = ssl._create_unverified_context
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain = params['domain']
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(secretId, secretKey)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm."+domain
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品(以cvm为例)的client对象,clientProfile是可选的。
client = cvm_client.CvmClient(cred, region, clientProfile)
# 实例化一个cvm实例信息查询请求对象,每个接口都会对应一个request对象。
req = models.AssociateSecurityGroupsRequest()
# 这里还支持以标准json格式的string来赋值请求参数的方式。下面的代码跟上面的参数赋值是等效的。
params = '{"InstanceIds":["ins-i4ekkudx","ins-gwggvy39"],"SecurityGroupIds":["sg-poxp7nok"]}'
req.from_json_string(params)
resp = client.AssociateSecurityGroups(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
# 也可以取出单个值。
# 你可以通过官网接口文档或跳转到response对象的定义处查看返回字段的定义。
# print(resp.TotalCount)
except TencentCloudSDKException as err:
print(err)
|
[
"1113452717@qq.com"
] |
1113452717@qq.com
|
5fa589d9a038fcbb13e019a6129a02a94b582d64
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2751486_0/Python/axr123/a.py
|
49363e53c0937f442e2efd0decfa7c6a90f1f400
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
import sys
vowels = 'aeiou'
def concons(str):
maxN = 0
n = 0
for l in str:
if l in vowels:
if n > maxN: maxN = n
n = 0
else:
n += 1
if n > maxN: maxN = n
return maxN
def solve(name, n):
count = 0
for s in range(len(name)):
for e in range(s+1, len(name)+1):
if concons(name[s:e]) >= n: count += 1
return count
numcases = int(sys.stdin.readline())
for c in range(numcases):
name, n = sys.stdin.readline().split()
print("Case #%d: %d" % (c+1, solve(name, int(n))))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
31be72f47f8097ba9df421e724f210ac08707884
|
602a4e86499841fbae43d84fc92908c533106aea
|
/core/forms.py
|
92edcd63936e84acef0f101ba7523413660abe6f
|
[] |
no_license
|
vden/TsoguNG
|
b187ccf1bef387417ec73467c51458d6f1443239
|
f8d5e7ab9d85559aa163c232c9f28a24a2b7c2a4
|
refs/heads/master
| 2021-01-02T08:52:03.914218
| 2011-04-26T07:01:57
| 2011-04-26T07:01:57
| 1,663,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
# -*- coding: utf-8 -*-
from django import forms
from core.fields import CalendarDateField
from django.contrib.admin import widgets as admin_widgets
from core import models
from datetime import datetime
def extra_form_factory(type):
class BaseExtraForm(forms.ModelForm):
time_choices = [('None',u'не задано')] + [('%s'%x,'%2d:00'%x) for x in xrange(0,25)]
slug = forms.SlugField(label=u'Адрес', required=False)
date_published = CalendarDateField(label=u'Дата публикации', required=False)
time = forms.ChoiceField(label = u'Время публикации', required=False, choices=time_choices)
view_template = forms.ModelChoiceField(label=u"Шаблон",queryset=type.available_templates(),empty_label="(стандартный шаблон)",required=False)
def __init__(self, *args, **kw):
try:
kw['initial'] = {'time':str(kw['instance'].date_published.hour)}
except:
kw['initial'] = {'time':'None'}
super(BaseExtraForm, self).__init__(*args, **kw)
def save(self):
s = super(BaseExtraForm, self).save()
if self['time'].data != 'None':
d = s.date_published
s.date_published = datetime(d.year, d.month, d.day, int(self['time'].data))
s.save()
class Meta:
model = type
fields = ('slug','view_template','not_browse','block','date_published')
return BaseExtraForm
class BaseConfigletForm(forms.ModelForm):
bid = models.Configlet._meta.get_field("bid").formfield(widget=forms.HiddenInput())
value = forms.CharField(label = u'Значение')
remove = forms.CharField(label = u'Удалить')
class Meta:
model = models.Configlet
fields = ('predicate','value','bid')
def is_valid(self):
if self['predicate'].data:
return True
return False
def save(self):
conf = models.Configlet.objects.filter(bid = self['bid'].data,
predicate = self['predicate'].data)
if conf:
conf = conf[0]
if str(self['remove'].data) == 'True':
conf.delete()
return True
else:
conf = models.Configlet()
conf.predicate = self['predicate'].data
conf.value = self['value'].data
conf.bid = models.BaseObject.objects.get(id=self['bid'].data)
conf.save()
|
[
"denis.voskvitsov@gmail.com"
] |
denis.voskvitsov@gmail.com
|
3571215df13d920412f8c5912d86fde82b88a82e
|
fe4b49f22cd851ee4f7639bef720d774fbfb1248
|
/src/comments/models.py
|
cc5f4cbe71dfee3814480f8a9c14549170dde5f7
|
[] |
no_license
|
tyagow/servicos-paraguai
|
b2c20a48651e7e046f4e86b0e94f026589dbc545
|
a71078174a175f86f2a4f49fcaf26b8f91ea778d
|
refs/heads/master
| 2020-12-24T05:39:57.243158
| 2017-05-23T14:14:28
| 2017-05-23T14:14:28
| 73,492,227
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class CommentManager(models.Manager):
def filter_by_instance(self, instance):
content_type = ContentType.objects.get_for_model(instance.__class__)
obj_id = instance.id
qs = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(aprovado=True)
return qs
class Comment(models.Model):
nome = models.CharField(max_length=60)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
conteudo = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=False)
objects = CommentManager()
def __str__(self):
return self.nome
|
[
"tyagow@hotmail.com.br"
] |
tyagow@hotmail.com.br
|
c12d8a3717648e007e9abc3dc9d63a711c4d0582
|
b864232c0133738e329e61ca74188c8eafe74108
|
/misc/experiment/distortion/main.py
|
80b7d04914341bb72e17df1ec3410fc3387e5877
|
[] |
no_license
|
QilinGu/tf-face-recognizer
|
0b16af8225d4e3bd67b0bd2df3005b5f1a3a7f35
|
d1092b72d01f08a7bbfb2f30072a60b8d8409804
|
refs/heads/master
| 2021-01-09T06:40:30.678375
| 2017-02-03T17:57:21
| 2017-02-03T17:57:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
import os
import tensorflow as tf
def main(argv=None):
with open(os.path.join(os.path.dirname(__file__), 'face.png'), 'rb') as f:
png = f.read()
image = tf.image.decode_png(png, channels=3)
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
[[[8.0/112.0, 8.0/112.0, (112.0-8.0)/112.0, (112.0-8.0)/112.0]]],
min_object_covered=0.9)
image = tf.slice(image, begin, size)
resize = tf.random_uniform([2], minval=48, maxval=144, dtype=tf.int32)
image = tf.image.resize_images(image, resize, method=2)
image = tf.image.resize_images(image, [96, 96], method=2)
image = tf.image.random_brightness(image, max_delta=0.4)
image = tf.image.random_contrast(image, lower=0.6, upper=1.4)
image = tf.image.random_hue(image, max_delta=0.04)
image = tf.image.random_saturation(image, lower=0.6, upper=1.4)
images = tf.train.batch([tf.image.per_image_standardization(image)], 20)
summary = tf.summary.image('images', images, max_outputs=20)
writer = tf.summary.FileWriter(os.path.dirname(__file__))
with tf.Session() as sess:
tf.train.start_queue_runners(sess=sess)
summary_value, begin_value, size_value, resize_value = sess.run([summary, begin, size, resize])
print(begin_value, size_value, resize_value)
writer.add_summary(summary_value)
if __name__ == '__main__':
tf.app.run()
|
[
"sugi1982@gmail.com"
] |
sugi1982@gmail.com
|
8cfa28a1b18e8ac19215d87eebf5216525b2160e
|
25fa5fdc9f67738332bd6f95a1e4f038cd286890
|
/BOJ/단계별로 풀어보기/ch07_문자열/1152_단어의 개수.py
|
0e051399bc66d888bae95cbbf9345f6aa8eeb91a
|
[] |
no_license
|
mandos1995/online_judge
|
b0cfd56e3391495f22b9832895cddcea70334349
|
9b90bffdcbfb5369e8dd5dafbb07f8e9e7050617
|
refs/heads/main
| 2023-08-02T19:29:03.716295
| 2021-10-04T15:10:34
| 2021-10-04T15:10:34
| 329,517,747
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
'''
문제
영어 대소문자와 띄어쓰기만으로 이루어진 문자열이 주어진다. 이 문자열에는 몇 개의 단어가 있을까?
이를 구하는 프로그램을 작성하시오. 단, 한 단어가 여러 번 등장하면 등장한 횟수만큼 모두 세어야 한다.
'''
# solution
string = list(map(str,input().strip().split()))
print(len(string))
|
[
"mandos19950620@gmail.com"
] |
mandos19950620@gmail.com
|
9040402abc555921137b96e21d59ea69a4a65b3f
|
ebbc32882b1bbb6e6935695a400e8ffd8c0fb7f6
|
/ROAR/planning_module/mission_planner/waypoint_following_mission_planner.py
|
81d8b4b89955d40a27fae6e422993f7a437d1132
|
[
"Apache-2.0"
] |
permissive
|
augcog/ROAR
|
25fc3555934854871883de84bae49d84f86fbd7b
|
b7cd322f451ceccf5c53b331e15f5025f2e63bac
|
refs/heads/main
| 2023-08-31T16:51:00.857463
| 2023-08-12T00:08:06
| 2023-08-12T00:08:06
| 302,450,761
| 29
| 222
|
Apache-2.0
| 2023-08-26T07:04:26
| 2020-10-08T20:10:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,333
|
py
|
from ROAR.planning_module.mission_planner.mission_planner import (
MissionPlanner,
)
from pathlib import Path
import logging
from typing import List, Optional
from ROAR.utilities_module.data_structures_models import Transform, Location, Rotation
from collections import deque
from ROAR.agent_module.agent import Agent
import numpy as np
class WaypointFollowingMissionPlanner(MissionPlanner):
"""
A mission planner that takes in a file that contains x,y,z coordinates, formulate into carla.Transform
"""
def run_in_series(self) -> deque:
"""
Regenerate waypoints from file
Find the waypoint that is closest to the current vehicle location.
return a mission plan starting from that waypoint
Args:
vehicle: current state of the vehicle
Returns:
mission plan that start from the current vehicle location
"""
super(WaypointFollowingMissionPlanner, self).run_in_series()
return self.produce_mission_plan()
def __init__(self, agent: Agent):
super().__init__(agent=agent)
self.logger = logging.getLogger(__name__)
self.file_path: Path = Path(self.agent.agent_settings.waypoint_file_path)
self.mission_plan = self.produce_mission_plan()
self._mission_plan_backup = self.mission_plan.copy()
self.logger.debug("Path Following Mission Planner Initiated.")
def produce_mission_plan(self) -> deque:
"""
Generates a list of waypoints based on the input file path
:return a list of waypoint
"""
raw_path: List[List[float]] = self._read_data_file()
length = self.agent.agent_settings.num_laps * len(raw_path)
mission_plan = deque(maxlen=length)
for coord in np.tile(raw_path, (self.agent.agent_settings.num_laps, 1)):
if len(coord) == 3 or len(coord) == 6:
mission_plan.append(self._raw_coord_to_transform(coord))
self.logger.debug(f"Computed Mission path of length [{len(mission_plan)}]")
return mission_plan
def produce_single_lap_mission_plan(self):
raw_path: List[List[float]] = self._read_data_file()
mission_plan = deque(maxlen=len(raw_path))
for coord in raw_path:
if len(coord) == 3 or len(coord) == 6:
mission_plan.append(self._raw_coord_to_transform(coord))
self.logger.debug(f"Computed Mission path of length [{len(mission_plan)}]")
return mission_plan
def _read_data_file(self) -> List[List[float]]:
"""
Read data file and generate a list of (x, y, z) where each of x, y, z is of type float
Returns:
List of waypoints in format of [x, y, z]
"""
result = []
with open(self.file_path.as_posix(), "r") as f:
for line in f:
result.append(self._read_line(line=line))
return result
def _raw_coord_to_transform(self, raw: List[float]) -> Optional[Transform]:
"""
transform coordinate to Transform instance
Args:
raw: coordinate in form of [x, y, z, pitch, yaw, roll]
Returns:
Transform instance
"""
if len(raw) == 3:
return Transform(
location=Location(x=raw[0], y=raw[1], z=raw[2]),
rotation=Rotation(pitch=0, yaw=0, roll=0),
)
elif len(raw) == 6:
return Transform(
location=Location(x=raw[0], y=raw[1], z=raw[2]),
rotation=Rotation(roll=raw[3], pitch=raw[4], yaw=raw[5]),
)
else:
self.logger.error(f"Point {raw} is invalid, skipping")
return None
def _read_line(self, line: str) -> List[float]:
"""
parse a line of string of "x,y,z" into [x,y,z]
Args:
line: comma delimetered line
Returns:
[x, y, z]
"""
try:
x, y, z = line.split(",")
x, y, z = float(x), float(y), float(z)
return [x, y, z]
except:
x, y, z, roll, pitch, yaw = line.split(",")
return [float(x), float(y), float(z), float(roll), float(pitch), float(yaw)]
def restart(self):
self.mission_plan = self._mission_plan_backup.copy()
|
[
"wuxiaohua1011@berkeley.edu"
] |
wuxiaohua1011@berkeley.edu
|
479a24ebea460d630afb100cbe446f2b98ea29c5
|
49819aef1336ddb8511e6dec53311777c0b41a7e
|
/apps/authentication/middleware.py
|
b2dd6834f09fcecbc03130d84405085fa4a502c7
|
[] |
no_license
|
devmaster54/pslam
|
d91f80340a89820ee596068f13fe6a628a93aab6
|
e81316677a1db24bbedce70bf59f85d30583742d
|
refs/heads/master
| 2022-12-24T15:20:39.219597
| 2020-10-02T15:46:20
| 2020-10-02T15:46:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
import re
from psalm import settings
from django.contrib.auth.decorators import login_required
class AuthVerificationMiddleware(object):
def process_exception(self, request, exception):
return None
|
[
"soft.expert32@gmail.com"
] |
soft.expert32@gmail.com
|
b0a4d0f17558cbe4b82cc607cb5c64a7b2238ed4
|
ae381913c23385f004b82161624097645ba8c4c8
|
/Xianyang_modwt/projects/ensemble_models.py
|
505bd0dc6a0bcac8eb2a2a3565d5e738a7deca6b
|
[
"MIT"
] |
permissive
|
zjy8006/MonthlyRunoffForecastByAutoReg
|
aa37910fdc66276d0df9d30af6885209d4a4ebfc
|
661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2
|
refs/heads/master
| 2020-12-12T05:25:48.768993
| 2020-08-20T07:21:12
| 2020-08-20T07:21:12
| 259,588,564
| 7
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from tools.ensembler import ensemble
from Xianyang_modwt.projects.variables import variables
# Set the project parameters
ORIGINAL = 'XianyangRunoff1951-2018(1953-2018).xlsx'
STATION = 'Xianyang'
DECOMPOSER = 'modwt'
PREDICTOR = 'esvr' # esvr or gbrt or lstm
wavelet_level='db1-4'
# ensemble(
# root_path=root_path,
# original_series=ORIGINAL,
# station=STATION,
# decomposer=DECOMPOSER,
# variables = variables,
# predictor=PREDICTOR,
# predict_pattern='single_hybrid_1_ahead',
# wavelet_level=wavelet_level,
# )
for lead_time in [1,3,5,7]:
ensemble(
root_path=root_path,
original_series=ORIGINAL,
station=STATION,
decomposer=DECOMPOSER,
variables = variables,
predictor=PREDICTOR,
predict_pattern='single_hybrid_'+str(lead_time)+'_ahead_lag12_mi_ts0.1',
wavelet_level=wavelet_level,
)
|
[
"zuojianyi@outlook.com"
] |
zuojianyi@outlook.com
|
f024ccaadc2065d52a7c473fe13a3620d3f75af1
|
6d913683be43f459b6e29dd84f09c05234efeb4d
|
/single_cell_differentiation_cuomo_data/merge_parallelized_covariate_modulated_eqtls.py
|
3cea2a67cb603b0c7c42cf93e49bc86f36b4b564
|
[] |
no_license
|
BennyStrobes/eqtl_factorization
|
4f94d8e2e00cf1830fd008f3264d1f9c57f6b2a0
|
e555485e40e44c51e86f67761e5200b370673910
|
refs/heads/master
| 2021-06-20T15:54:34.906395
| 2021-04-05T14:11:46
| 2021-04-05T14:11:46
| 198,705,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
import numpy as np
import os
import sys
import pdb
def bf_fdr_multiple_testing_correction(variant_gene_pairs_eqtl_results_file, multple_testing_correction_results_file, fdr_thresh):
f = open(variant_gene_pairs_eqtl_results_file)
t = open(multple_testing_correction_results_file, 'w')
head_count = 0
genes = {}
for line in f:
line = line.rstrip()
data = line.split()
if head_count == 0:
head_count = head_count + 1
t.write(line + '\tnum_snps_in_gene\tfdr\n')
continue
gene_id = data[0]
variant_id = data[1]
pvalue = float(data[7])
if gene_id not in genes:
genes[gene_id] = (variant_id, pvalue, 1, line)
else:
old_pvalue = genes[gene_id][1]
old_count = genes[gene_id][2]
if pvalue <= old_pvalue:
genes[gene_id] = (variant_id, pvalue, old_count+1, line)
else:
genes[gene_id] = (genes[gene_id][0], genes[gene_id][1], old_count+1, genes[gene_id][3])
f.close()
# Loop through genes and do BF correction
bf_gene_array = []
for gene in genes.keys():
lead_variant = genes[gene][0]
lead_nominal_pvalue = genes[gene][1]
num_variants_at_gene = genes[gene][2]
test_line = genes[gene][3]
bf_corrected_pvalue = lead_nominal_pvalue*num_variants_at_gene
if bf_corrected_pvalue > 1.0:
bf_corrected_pvalue = 1.0
bf_gene_array.append((bf_corrected_pvalue, lead_variant, gene, num_variants_at_gene, test_line))
sorted_bf_gene_array = sorted(bf_gene_array, key=lambda tup: tup[0])
# BH correction
kk = 1
num_genes = len(sorted_bf_gene_array)
sig = True
for gene_tuple in sorted_bf_gene_array:
bf_pvalue = gene_tuple[0]
fdr = num_genes*bf_pvalue/kk
kk = kk + 1
if fdr > fdr_thresh:
sig = False
if sig == True:
t.write(gene_tuple[4] + '\t' + str(gene_tuple[3]) + '\t' + str(fdr) + '\n')
t.close()
def make_sure_files_exist(output_root, total_jobs, suffix):
booly = True
for job_number in range(total_jobs):
file_name = output_root + str(job_number) + '_' + str(total_jobs) + suffix
if os.path.isfile(file_name) == False:
print(file_name)
booly = False
return booly
def merge_parallelized_results(output_root, suffix, total_jobs):
to_run = make_sure_files_exist(output_root, total_jobs, suffix)
if to_run == False:
print('Missing required input files. Please re-evaluate :)')
return
# Open output (merged result) file handle
t = open(output_root + 'merged' + suffix, 'w')
t.write('Gene_id\tvariant_id\tchrom_num\tgene_tss\tvariant_position\tmaf\tcell_maf\tcovariate_modulated_eqtl_pvalue\teqtl_pvalue\n')
# Loop through parrallelized jobs
for job_number in range(total_jobs):
file_name = output_root + str(job_number) + '_' + str(total_jobs) + suffix
# Open file for one job
f = open(file_name)
# To identify header
head_count = 0
# Stream file from one job
for line in f:
line = line.rstrip()
# Standard line
t.write(line + '\n')
f.close()
# Delete file from single job
os.system ('rm ' + file_name)
t.close()
output_root = sys.argv[1]
total_jobs = int(sys.argv[2])
merge_parallelized_results(output_root, ".txt", total_jobs)
merged_file = output_root + 'merged.txt'
####################
# Multiple-testing correction
####################
# Output file
fdr_thresh=.01
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
# Output file
fdr_thresh=.05
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
# Output file
fdr_thresh=.1
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
|
[
"bstrobe1@jhu.edu"
] |
bstrobe1@jhu.edu
|
3d522fc5eb54ef2e47e6a569e0ad572dc5690bf3
|
6c9d41994937733dc84e54359f5789ac945724a2
|
/echo.py
|
863cb8941b7dd620f6dacb81c28f14c916781f13
|
[
"MIT"
] |
permissive
|
cider-load-test/pygadu
|
2268f38b4612cb32236c687ef355c6dc1d4cae33
|
d4c1b25908ae6facd89d3509ea8bc2ec6b4eb11c
|
refs/heads/master
| 2021-12-02T06:28:02.877479
| 2008-11-30T13:01:41
| 2008-11-30T13:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
import time
from pygadu.session import PyGadu
from pygadu.util import query_hub
class Echo(PyGadu):
def __init__(self):
super(Echo, self).__init__()
def onWelcome(self, packet):
print repr(packet)
print "Welcome!"
def onLoginOk(self, packet):
print repr(packet)
print "Login Ok!"
self.sendFriendList([])
def onConnect(self):
print "Connect"
def onClose(self):
print "Close"
def onSendMessage(self, packet):
print repr(packet)
print "on send message?"
def onLoginError(self, packet):
print repr(packet)
print "Error!"
def onRecvMessage(self, packet):
print repr(packet)
print packet.sender, packet.text
self.sendMessage(packet.sender, packet.text)
def onUnknown(self, packet):
print repr(packet)
if __name__ == '__main__':
try:
host = query_hub(10533767)
print host
gg = Echo()
gg.login(10533767, "123456", host=host)
while True:
time.sleep(300)
gg.ping()
except KeyboardInterrupt:
gg.logout()
|
[
"devnull@localhost"
] |
devnull@localhost
|
68c366cf3fe05227f6edc4c1f969590059582f5e
|
e3fd35a8443aaf2f293ae03a5f6c819046a4dd21
|
/leetcode-java/leetcode.py
|
9eecc06a263da457fb4f2cbe087d8d5b6c42ee2f
|
[] |
no_license
|
hieutran106/leetcode-ht
|
2223ea6bcd459c2cdbc33344c0ff69df7f8a3c7f
|
8332eb20e613f82cda2e326218154c7803a32403
|
refs/heads/main
| 2023-08-09T02:52:41.360360
| 2023-07-27T10:12:28
| 2023-07-27T10:12:28
| 234,890,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import sys
import os
if __name__ == "__main__":
difficulty = sys.argv[1]
problem = sys.argv[2]
cwd = os.getcwd()
path = os.path.join(cwd, "src", "test", "java", difficulty, problem)
# early exit
if os.path.exists(path):
print("Solution existed. Exit ...")
sys.exit()
print(f'Create {difficulty}/{problem}')
os.mkdir(path)
# create README.md
file = os.path.join(path, 'README.md')
open(file, 'a').close()
# create SolutionTest.java
file = os.path.join(path, 'SolutionTest.java')
with open(file, 'w+') as f:
test_content = f"""package {difficulty}.{problem};
import org.junit.Assert;
import org.junit.Test;
public class SolutionTest {{
public static class Solution {{
}}
@Test
public void testCase1() {{
var s = new Solution();
}}
@Test
public void testCase0() {{
var s = new Solution();
}}
}}"""
f.writelines(test_content)
|
[
"hieutran106@gmail.com"
] |
hieutran106@gmail.com
|
7a5dcf48ff36cbc9f82e9329b3f7faec2ce88438
|
8eafb73fdab3e422aa717bac9af338dcba5e3c1e
|
/bbp/tests/test_gp_gof.py
|
bfb3309cb2b062ef6e9a0ed863563eb5f5da02ec
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
LevyForchh/bbp
|
6dae4ce3577a73f5cef9b9b5507753a1381ec870
|
3cc389fb956ea14ef827af0f437ce37e8291afac
|
refs/heads/master
| 2020-06-03T05:10:35.751009
| 2019-06-11T21:38:18
| 2019-06-11T21:38:18
| 191,453,945
| 0
| 0
| null | 2019-06-11T21:38:16
| 2019-06-11T21:38:15
| null |
UTF-8
|
Python
| false
| false
| 7,314
|
py
|
#! /usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import unittest
# Import Broadband modules
import cmp_bbp
import seqnum
import bband_utils
from install_cfg import InstallCfg
from gp_gof_cfg import GPGofCfg
from gp_gof import GPGof
class TestGPGof(unittest.TestCase):
"""
Unit test for gp_gof.py
"""
def setUp(self):
self.install = InstallCfg()
self.gp_gof_cfg = GPGofCfg()
os.chdir(self.install.A_INSTALL_ROOT)
self.srcfile = "test_wh.src"
self.stations = "test_stat.txt"
self.sim_id = int(seqnum.get_seq_num())
sta_base = os.path.basename(os.path.splitext(self.stations)[0])
# Set up paths
refdir = os.path.join(self.install.A_TEST_REF_DIR, "gp")
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_outdir_seis = os.path.join(self.install.A_OUT_DATA_DIR,
str(self.sim_id),
"obs_seis_%s" % (sta_base))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))
# Create directories
bband_utils.mkdirs([a_indir, a_tmpdir, a_outdir_seis,
a_outdir, a_logdir],
print_cmd=False)
# Copy stations
cmd = "cp %s %s" % (os.path.join(refdir, self.stations), a_indir)
bband_utils.runprog(cmd, print_cmd=False)
# Copy src file
cmd = "cp %s %s" % (os.path.join(refdir, self.srcfile), a_indir)
bband_utils.runprog(cmd, print_cmd=False)
for i in range(1, 6):
# Copy sample calculated seismograms and response files
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.merged.bbp" % (i)),
os.path.join(a_outdir,
"%d.s%02d.vel.bbp" %
(self.sim_id, i)))
bband_utils.runprog(cmd, print_cmd=False)
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.rd50" % (i)),
os.path.join(a_outdir,
"%d.s%02d.rd50" %
(self.sim_id, i)))
bband_utils.runprog(cmd, print_cmd=False)
# Cope sample observed seismograms and response files
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.merged.bbp" % (i)),
os.path.join(a_outdir_seis,
"s%02d.bbp" % (i)))
bband_utils.runprog(cmd, print_cmd=False)
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.rd50" % (i)),
os.path.join(a_outdir_seis,
"s%02d.rd50" % (i)))
bband_utils.runprog(cmd, print_cmd=False)
def test_gof(self):
"""
Test GP GOF Code
"""
gof_obj = GPGof(self.srcfile, self.stations,
"NR", 25, sim_id=self.sim_id)
gof_obj.run()
resid_ref_file = os.path.join(self.install.A_TEST_REF_DIR,
"gp", "GoF", "nr-rd50-resid.txt")
resid_file = os.path.join(self.install.A_OUT_DATA_DIR,
str(self.sim_id),
"NR-%d.rd50-resid.txt" % (self.sim_id))
self.failIf(cmp_bbp.cmp_resid(resid_ref_file,
resid_file,
tolerance=0.005) != 0,
"output resid file %s does not match reference resid file %s" %
(resid_file, resid_ref_file))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
for comp in ['psa5e', 'psa5n', 'rotd50']:
bias_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.bias" % (comp))
m90_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.m90" % (comp))
p90_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.p90" % (comp))
sigma_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.sigma" % (comp))
sigma0_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.sigma0" % (comp))
bias_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.bias" % (self.sim_id, comp))
m90_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.m90" % (self.sim_id, comp))
p90_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.p90" % (self.sim_id, comp))
sigma_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.sigma" % (self.sim_id, comp))
sigma0_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.sigma0" % (self.sim_id, comp))
self.failIf(cmp_bbp.cmp_bias(bias_ref_file, bias_file) != 0,
"output bias file %s does not match reference bias file %s" %
(bias_file, bias_ref_file))
self.failIf(cmp_bbp.cmp_bias(m90_ref_file, m90_file) != 0,
"output m90 file %s does not match reference m90 file %s" %
(m90_file, m90_ref_file))
self.failIf(cmp_bbp.cmp_bias(p90_ref_file, p90_file, tolerance=0.0025) != 0,
"output p90 file %s does not match reference p90 file %s" %
(p90_file, p90_ref_file))
self.failIf(cmp_bbp.cmp_bias(sigma_ref_file, sigma_file) != 0,
"output sigma file %s does not match reference sigma file %s" %
(sigma_file, sigma_ref_file))
self.failIf(cmp_bbp.cmp_bias(sigma0_ref_file, sigma0_file) != 0,
"output sigma0 file %s does not match reference sigma0 file %s" %
(sigma0_file, sigma0_ref_file))
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestGPGof)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
[
"fsilva@usc.edu"
] |
fsilva@usc.edu
|
b55110e630e67fe860d2037fa0094925bbf45e28
|
64a8dcfa0a98c665b8b2ac796ed7231135977d9a
|
/fluent_contents/plugins/googledocsviewer/content_plugins.py
|
cf330dfc9bf3f2783a245dbbb6a29a3f46c583a2
|
[
"Apache-2.0"
] |
permissive
|
jpotterm/django-fluent-contents
|
8bc70c9f0309bfeeb3c1e7a96c0687c7070e48ed
|
e617815874c936af1e00a8bfb79a4c8fc9a40cbb
|
refs/heads/master
| 2021-01-18T06:56:15.737257
| 2015-09-09T15:40:43
| 2015-09-09T15:40:43
| 30,092,032
| 0
| 0
| null | 2015-01-30T21:42:42
| 2015-01-30T21:42:42
| null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
"""
Google apps widgets for your site.
"""
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.googledocsviewer.models import GoogleDocsViewerItem
@plugin_pool.register
class GoogleDocsViewerPlugin(ContentPlugin):
"""
Plugin to add a Google Docs viewer to the page.
This can be used to display a PDF file inline.
Note then when using the Google Docs viewer on your site,
Google assumes you agree with the Terms of Service,
see: https://docs.google.com/viewer/TOS
"""
model = GoogleDocsViewerItem
category = _('Media')
def render(self, request, instance, **kwargs):
url = 'http://docs.google.com/viewer?url={url}&embedded=true'.format(url=urlquote(instance.url, ''))
return mark_safe(u'<iframe class="googledocsviewer" src="{src}" width="{width}" height="{height}"></iframe>'.format(
src=escape(url),
width=instance.width,
height=instance.height
))
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
5aec7d812d592d9d37ec207d0ed43b8a3d21710a
|
58e51a01846176168bcd83175f63b240cd6db916
|
/apps/news/models.py
|
070e6feb4346a30d22e857ca08a87b62ba33f7b9
|
[] |
no_license
|
reddytocode/projectX-backend
|
7e79795cd0c6951ca39f93e316af7a61b631940d
|
05fb5f6e14889ecac94ad52c13796eb14c59814d
|
refs/heads/main
| 2023-07-18T06:37:16.298857
| 2021-06-30T20:29:33
| 2021-06-30T20:29:33
| 380,374,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from django.db import models
class News(models.Model):
# Todo: inherit
guid = models.TextField()
title = models.CharField(max_length=100, blank=False)
link = models.CharField(max_length=200, blank=False)
content = models.TextField()
author = models.CharField(max_length=100, blank=False)
|
[
"aaabeeelooon@gmail.com"
] |
aaabeeelooon@gmail.com
|
21df715483d4461c101e7b8501691258ca4d040d
|
4ae34a5179d7adf1037eb9a3cb249f9a5c06684e
|
/examples/v1beta1/trial-images/darts-cnn-cifar10/model.py
|
dea7d43f84c6d761b0697b84b5ffd2adc18d9249
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/katib
|
367373c0452d49a7a115b86893f4dab9e1f278ea
|
e3e0aa24aeea1edfab0fd42f55392af651d2b3ae
|
refs/heads/master
| 2023-09-04T05:02:05.752156
| 2023-08-24T22:40:54
| 2023-08-24T22:40:54
| 127,941,481
| 1,385
| 422
|
Apache-2.0
| 2023-09-14T13:17:29
| 2018-04-03T17:07:12
|
Go
|
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
# Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import FactorizedReduce, StdConv, MixedOp
class Cell(nn.Module):
""" Cell for search
Each edge is mixed and continuous relaxed.
"""
def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):
"""
Args:
num_nodes: Number of intermediate cell nodes
c_prev_prev: channels_out[k-2]
c_prev : Channels_out[k-1]
c_cur : Channels_in[k] (current)
reduction_prev: flag for whether the previous cell is reduction cell or not
reduction_cur: flag for whether the current cell is reduction cell or not
"""
super(Cell, self).__init__()
self.reduction_cur = reduction_cur
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing
if reduction_prev:
self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)
else:
self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)
# Generate dag from mixed operations
self.dag_ops = nn.ModuleList()
for i in range(self.num_nodes):
self.dag_ops.append(nn.ModuleList())
# Include 2 input nodes
for j in range(2+i):
# Reduction with stride = 2 must be only for the input node
stride = 2 if reduction_cur and j < 2 else 1
op = MixedOp(c_cur, stride, search_space)
self.dag_ops[i].append(op)
def forward(self, s0, s1, w_dag):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for edges, w_list in zip(self.dag_ops, w_dag):
state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))
states.append(state_cur)
state_out = torch.cat(states[2:], dim=1)
return state_out
class NetworkCNN(nn.Module):
def __init__(self, init_channels, input_channels, num_classes,
num_layers, criterion, search_space, num_nodes, stem_multiplier):
super(NetworkCNN, self).__init__()
self.init_channels = init_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.criterion = criterion
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
c_cur = self.stem_multiplier*self.init_channels
self.stem = nn.Sequential(
nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),
nn.BatchNorm2d(c_cur)
)
# In first Cell stem is used for s0 and s1
# c_prev_prev and c_prev - output channels size
# c_cur - init channels size
c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(self.num_layers):
# For Network with 1 layer: Only Normal Cell
if self.num_layers == 1:
reduction_cur = False
else:
# For Network with two layers: First layer - Normal, Second - Reduction
# For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels
# Others - Normal cell
if ((self.num_layers == 2 and i == 1) or
(self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):
c_cur *= 2
reduction_cur = True
else:
reduction_cur = False
cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)
reduction_prev = reduction_cur
self.cells.append(cell)
c_cur_out = c_cur * self.num_nodes
c_prev_prev, c_prev = c_prev, c_cur_out
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, self.num_classes)
# Initialize alphas parameters
num_ops = len(search_space.primitives)
self.alpha_normal = nn.ParameterList()
self.alpha_reduce = nn.ParameterList()
for i in range(self.num_nodes):
self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
if self.num_layers > 1:
self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
# Setup alphas list
self.alphas = []
for name, parameter in self.named_parameters():
if "alpha" in name:
self.alphas.append((name, parameter))
def forward(self, x):
weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]
weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]
s0 = s1 = self.stem(x)
for cell in self.cells:
weights = weights_reduce if cell.reduction_cur else weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
# Make out flatten
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def print_alphas(self):
print("\n>>> Alphas Normal <<<")
for alpha in self.alpha_normal:
print(F.softmax(alpha, dim=-1))
if self.num_layers > 1:
print("\n>>> Alpha Reduce <<<")
for alpha in self.alpha_reduce:
print(F.softmax(alpha, dim=-1))
print("\n")
def getWeights(self):
return self.parameters()
def getAlphas(self):
for _, parameter in self.alphas:
yield parameter
def loss(self, x, y):
logits = self.forward(x)
return self.criterion(logits, y)
def genotype(self, search_space):
gene_normal = search_space.parse(self.alpha_normal, k=2)
gene_reduce = search_space.parse(self.alpha_reduce, k=2)
# concat all intermediate nodes
concat = range(2, 2 + self.num_nodes)
return search_space.genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
|
[
"noreply@github.com"
] |
kubeflow.noreply@github.com
|
8d0fe3775c506e3d3501551bd2693ec99edb0d39
|
fe19d2fac4580d463132e61509bd6e3cc2cf958d
|
/toontown/parties/DistributedPartyFireworksActivity.py
|
cbe6cf69ed730e33a04c428825cbb5fd222fffe2
|
[] |
no_license
|
t00nt0wn1dk/c0d3
|
3e6db6dd42c3aa36ad77709cf9016176a3f3a44f
|
7de105d7f3de0f8704b020e32fd063ee2fad8d0d
|
refs/heads/master
| 2021-01-01T16:00:15.367822
| 2015-03-21T21:25:52
| 2015-03-21T21:25:55
| 32,647,654
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,941
|
py
|
# 2013.08.22 22:23:22 Pacific Daylight Time
# Embedded file name: toontown.parties.DistributedPartyFireworksActivity
from pandac.PandaModules import Vec3
from pandac.PandaModules import OmniBoundingVolume
from pandac.PandaModules import AlphaTestAttrib
from pandac.PandaModules import RenderAttrib
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from toontown.effects.FireworkShowMixin import FireworkShowMixin
from toontown.effects.RocketExplosion import RocketExplosion
from toontown.toonbase import TTLocalizer
from PartyGlobals import FireworkShows
from PartyGlobals import ActivityIds
from PartyGlobals import ActivityTypes
from PartyGlobals import FireworksStartedEvent
from PartyGlobals import FireworksFinishedEvent
from PartyGlobals import FireworksPostLaunchDelay
from PartyGlobals import RocketSoundDelay
from PartyGlobals import RocketDirectionDelay
from DistributedPartyActivity import DistributedPartyActivity
from activityFSMs import FireworksActivityFSM
import PartyGlobals
class DistributedPartyFireworksActivity(DistributedPartyActivity, FireworkShowMixin):
__module__ = __name__
notify = directNotify.newCategory('DistributedPartyFireworksActivity')
def __init__(self, cr):
DistributedPartyFireworksActivity.notify.debug('__init__')
DistributedPartyActivity.__init__(self, cr, ActivityIds.PartyFireworks, ActivityTypes.HostInitiated, wantLever=True)
FireworkShowMixin.__init__(self, restorePlaygroundMusic=True, startDelay=FireworksPostLaunchDelay)
def setEventId(self, eventId):
DistributedPartyFireworksActivity.notify.debug('setEventId( %s )' % FireworkShows.getString(eventId))
self.eventId = eventId
def setShowStyle(self, showStyle):
DistributedPartyFireworksActivity.notify.debug('setShowStyle( %d )' % showStyle)
self.showStyle = showStyle
def load(self):
DistributedPartyFireworksActivity.notify.debug('load')
DistributedPartyActivity.load(self)
self.eventId = PartyGlobals.FireworkShows.Summer
self.launchPadModel = loader.loadModel('phase_13/models/parties/launchPad')
self.launchPadModel.setH(90.0)
self.launchPadModel.setPos(0.0, -18.0, 0.0)
self.launchPadModel.reparentTo(self.root)
railingsCollection = self.launchPadModel.findAllMatches('**/launchPad_mesh/*railing*')
for i in range(railingsCollection.getNumPaths()):
railingsCollection[i].setAttrib(AlphaTestAttrib.make(RenderAttrib.MGreater, 0.75))
leverLocator = self.launchPadModel.find('**/RocketLever_locator')
self.lever.setPosHpr(Vec3.zero(), Vec3.zero())
self.lever.reparentTo(leverLocator)
self.toonPullingLeverInterval = None
self.sign.reparentTo(self.launchPadModel.find('**/launchPad_sign_locator'))
self.rocketActor = Actor('phase_13/models/parties/rocket_model', {'launch': 'phase_13/models/parties/rocket_launch'})
rocketLocator = self.launchPadModel.find('**/rocket_locator')
self.rocketActor.reparentTo(rocketLocator)
self.rocketActor.node().setBound(OmniBoundingVolume())
self.rocketActor.node().setFinal(True)
effectsLocator = self.rocketActor.find('**/joint1')
self.rocketExplosionEffect = RocketExplosion(effectsLocator, rocketLocator)
self.rocketParticleSeq = None
self.launchSound = base.loadSfx('phase_13/audio/sfx/rocket_launch.mp3')
self.activityFSM = FireworksActivityFSM(self)
self.activityFSM.request('Idle')
return
def unload(self):
DistributedPartyFireworksActivity.notify.debug('unload')
taskMgr.remove(self.taskName('delayedStartShow'))
if self.rocketParticleSeq:
self.rocketParticleSeq.pause()
self.rocketParticleSeq = None
self.launchPadModel.removeNode()
del self.launchPadModel
del self.toonPullingLeverInterval
self.rocketActor.delete()
self.rocketExplosionEffect.destroy()
self.activityFSM.request('Disabled')
del self.rocketActor
del self.launchSound
del self.activityFSM
del self.eventId
del self.showStyle
DistributedPartyActivity.unload(self)
return
def _leverPulled(self, collEntry):
DistributedPartyFireworksActivity.notify.debug('_leverPulled')
hostPulledLever = DistributedPartyActivity._leverPulled(self, collEntry)
if self.activityFSM.getCurrentOrNextState() == 'Active':
self.showMessage(TTLocalizer.PartyFireworksAlreadyActive)
elif self.activityFSM.getCurrentOrNextState() == 'Disabled':
self.showMessage(TTLocalizer.PartyFireworksAlreadyDone)
elif self.activityFSM.getCurrentOrNextState() == 'Idle':
if hostPulledLever:
base.cr.playGame.getPlace().fsm.request('activity')
self.toonPullingLeverInterval = self.getToonPullingLeverInterval(base.localAvatar)
self.toonPullingLeverInterval.append(Func(self.d_toonJoinRequest))
self.toonPullingLeverInterval.append(Func(base.cr.playGame.getPlace().fsm.request, 'walk'))
self.toonPullingLeverInterval.start()
else:
self.showMessage(TTLocalizer.PartyOnlyHostLeverPull)
def setState(self, newState, timestamp):
DistributedPartyFireworksActivity.notify.debug('setState( newState=%s, ... )' % newState)
DistributedPartyActivity.setState(self, newState, timestamp)
if newState == 'Active':
self.activityFSM.request(newState, timestamp)
else:
self.activityFSM.request(newState)
def startIdle(self):
DistributedPartyFireworksActivity.notify.debug('startIdle')
def finishIdle(self):
DistributedPartyFireworksActivity.notify.debug('finishIdle')
def startActive(self, showStartTimestamp):
DistributedPartyFireworksActivity.notify.debug('startActive')
messenger.send(FireworksStartedEvent)
timeSinceStart = globalClockDelta.localElapsedTime(showStartTimestamp)
if timeSinceStart > self.rocketActor.getDuration('launch'):
self.rocketActor.hide()
self.startShow(self.eventId, self.showStyle, showStartTimestamp)
else:
self.rocketActor.play('launch')
self.rocketParticleSeq = Sequence(Wait(RocketSoundDelay), Func(base.playSfx, self.launchSound), Func(self.rocketExplosionEffect.start), Wait(RocketDirectionDelay), LerpHprInterval(self.rocketActor, 4.0, Vec3(0, 0, -60)), Func(self.rocketExplosionEffect.end), Func(self.rocketActor.hide))
self.rocketParticleSeq.start()
taskMgr.doMethodLater(FireworksPostLaunchDelay, self.startShow, self.taskName('delayedStartShow'), extraArgs=[self.eventId,
self.showStyle,
showStartTimestamp,
self.root])
def finishActive(self):
self.rocketParticleSeq = None
DistributedPartyFireworksActivity.notify.debug('finishActive')
messenger.send(FireworksFinishedEvent)
taskMgr.remove(self.taskName('delayedStartShow'))
FireworkShowMixin.disable(self)
return
def startDisabled(self):
DistributedPartyFireworksActivity.notify.debug('startDisabled')
if not self.rocketActor.isEmpty():
self.rocketActor.hide()
def finishDisabled(self):
DistributedPartyFireworksActivity.notify.debug('finishDisabled')
def handleToonDisabled(self, toonId):
self.notify.warning('handleToonDisabled no implementation yet')
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\parties\DistributedPartyFireworksActivity.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:23:22 Pacific Daylight Time
|
[
"anonymoustoontown@gmail.com"
] |
anonymoustoontown@gmail.com
|
eee3ed0cdfe623261a833b1da967a5cb2705933d
|
30a61c74d5108279af498a181ebc83151f13c033
|
/one_hundred_eighteen.py
|
30b365d21d51a1e5aa523f67b58d17a91ffc45e6
|
[] |
no_license
|
Yanl05/LeetCode
|
274267cb189813c96fff67d8cbfba4afebd5c2b2
|
c0807a7f31a265b3090ef3d32a0ad5a2b10579f7
|
refs/heads/master
| 2020-03-15T12:25:16.456742
| 2020-01-20T01:12:35
| 2020-01-20T01:12:35
| 132,143,417
| 0
| 0
| null | 2018-12-21T03:01:32
| 2018-05-04T13:25:24
|
Python
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding: UTF-8 -*-
"""
# @Time : 2019-08-28 16:33
# @Author : yanlei
# @FileName: one_hundred_eighteen.py
给定一个非负整数 numRows,生成杨辉三角的前 numRows 行。
在杨辉三角中,每个数是它左上方和右上方的数的和。
"""
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
ret = []
if numRows == 0: return ret
for i in range(numRows):
ret.append([1]*(i+1))
if i > 1:
for j in range(1,i):
ret[-1][j] = ret[-2][j-1] + ret[-2][j]
return ret
print(Solution().generate(5))
|
[
"756593069@qq.com"
] |
756593069@qq.com
|
83f6803e4b1251b1ff5c7750399269d0197edd3d
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2449486_0/Python/ynasu/B.py
|
2540035aa29eb33019f7839e601b4e6d8c3a9332
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python
import sys
yes = "YES"
no = "NO"
def solve(heights):
N = len(heights)
M = len(heights[0])
for y in xrange(N):
for x in xrange(M):
possible = True
for y1 in xrange(N):
if heights[y1][x] > heights[y][x]:
possible = False
if possible:
continue
possible = True
for x1 in xrange(M):
if heights[y][x1] > heights[y][x]:
possible = False
if possible:
continue
return no
return yes
def readInts():
return [ int(s) for s in sys.stdin.readline().split() ]
T = int(sys.stdin.readline())
for t in xrange(T):
inputs = readInts()
N = inputs[0]
heights = []
for i in xrange(N):
heights.append(readInts())
res = solve(heights)
print "Case #%d: %s" % (t + 1, res)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
f102adefa4347ff2438caea310576abfc8cc8e52
|
27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f
|
/tests/unit/modules/network/iosxr/test_iosxr_netconf.py
|
fbb7819e173700b0821c2a564cbc459b04350fe6
|
[] |
no_license
|
coll-test/notstdlib.moveitallout
|
eb33a560070bbded5032385d0aea2f3cf60e690b
|
0987f099b783c6cf977db9233e1c3d9efcbcb3c7
|
refs/heads/master
| 2020-12-19T22:28:33.369557
| 2020-01-23T18:51:26
| 2020-01-23T18:51:26
| 235,865,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,632
|
py
|
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch
from ansible_collections.notstdlib.moveitallout.plugins.modules import iosxr_netconf
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import set_module_args
from ..iosxr_module import TestIosxrModule
class TestIosxrNetconfModule(TestIosxrModule):
module = iosxr_netconf
def setUp(self):
super(TestIosxrNetconfModule, self).setUp()
self.mock_get_config = patch('ansible_collections.notstdlib.moveitallout.plugins.modules.iosxr_netconf.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible_collections.notstdlib.moveitallout.plugins.modules.iosxr_netconf.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestIosxrNetconfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def test_iosxr_disable_netconf_service(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no netconf-yang agent ssh', 'no ssh server netconf port 830', 'no ssh server netconf vrf default'])
def test_iosxr_enable_netconf_service(self):
self.get_config.return_value = ''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['netconf-yang agent ssh', 'ssh server netconf port 830', 'ssh server netconf vrf default'])
def test_iosxr_change_netconf_port(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=9000, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf port 9000'])
def test_iosxr_change_netconf_vrf(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_vrf='new_default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf vrf new_default'])
|
[
"wk@sydorenko.org.ua"
] |
wk@sydorenko.org.ua
|
efe05d84e80b3c205eaad6cbb005cf317016866d
|
5f6cd57c692191acfea18c1af9d87e7db8e873f1
|
/devito/ir/iet/scheduler.py
|
7f2bf50f2985af84bbae530639c9d5aeae566821
|
[
"MIT"
] |
permissive
|
Antongk/devito
|
684b0f4928d47bb6acc6469cc0471b5122c34561
|
a50c0a8337497cf6d7603cf9beff4a3231e63bee
|
refs/heads/master
| 2020-04-19T17:00:18.380108
| 2019-01-30T09:16:38
| 2019-01-30T09:16:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,330
|
py
|
from collections import OrderedDict
from devito.cgen_utils import Allocator
from devito.ir.iet import (Expression, Increment, LocalExpression, Element, Iteration,
List, Conditional, Section, HaloSpot, ExpressionBundle,
MapExpressions, Transformer, FindNodes, FindSymbols, XSubs,
iet_analyze, filter_iterations)
from devito.symbolics import IntDiv, xreplace_indices
from devito.tools import as_mapper
from devito.types import ConditionalDimension
__all__ = ['iet_build', 'iet_insert_C_decls']
def iet_build(stree):
"""
Create an Iteration/Expression tree (IET) from a :class:`ScheduleTree`.
The nodes in the returned IET are decorated with properties deriving from
data dependence analysis.
"""
# Schedule tree -> Iteration/Expression tree
iet = iet_make(stree)
# Data dependency analysis. Properties are attached directly to nodes
iet = iet_analyze(iet)
# Turn DerivedDimensions into lower-level Dimensions or Symbols
iet = iet_lower_dimensions(iet)
return iet
def iet_make(stree):
"""Create an IET from a :class:`ScheduleTree`."""
nsections = 0
queues = OrderedDict()
for i in stree.visit():
if i == stree:
# We hit this handle at the very end of the visit
return List(body=queues.pop(i))
elif i.is_Exprs:
exprs = [Increment(e) if e.is_Increment else Expression(e) for e in i.exprs]
body = [ExpressionBundle(i.shape, i.ops, i.traffic, body=exprs)]
elif i.is_Conditional:
body = [Conditional(i.guard, queues.pop(i))]
elif i.is_Iteration:
# Order to ensure deterministic code generation
uindices = sorted(i.sub_iterators, key=lambda d: d.name)
# Generate Iteration
body = [Iteration(queues.pop(i), i.dim, i.dim._limits, offsets=i.limits,
direction=i.direction, uindices=uindices)]
elif i.is_Section:
body = [Section('section%d' % nsections, body=queues.pop(i))]
nsections += 1
elif i.is_Halo:
body = [HaloSpot(hs) for hs in i.halo_scheme.components] + queues.pop(i)
queues.setdefault(i.parent, []).extend(body)
assert False
def iet_lower_dimensions(iet):
"""
Replace all :class:`DerivedDimension`s within the ``iet``'s expressions with
lower-level symbolic objects (other :class:`Dimension`s, or :class:`sympy.Symbol`).
* Array indices involving :class:`SteppingDimension`s are turned into
:class:`ModuloDimension`s.
Example: ``u[t+1, x] = u[t, x] + 1 >>> u[t1, x] = u[t0, x] + 1``
* Array indices involving :class:`ConditionalDimension`s used are turned into
integer-division expressions.
Example: ``u[t_sub, x] = u[time, x] >>> u[time / 4, x] = u[time, x]``
"""
# Lower SteppingDimensions
for i in FindNodes(Iteration).visit(iet):
if not i.uindices:
# Be quick: avoid uselessy reconstructing nodes
continue
# In an expression, there could be `u[t+1, ...]` and `v[t+1, ...]`, where
# `u` and `v` are TimeFunction with circular time buffers (save=None) *but*
# different modulo extent. The `t+1` indices above are therefore conceptually
# different, so they will be replaced with the proper ModuloDimension through
# two different calls to `xreplace`
groups = as_mapper(i.uindices, lambda d: d.modulo)
for k, v in groups.items():
mapper = {d.origin: d for d in v}
rule = lambda i: i.function.is_TimeFunction and i.function._time_size == k
replacer = lambda i: xreplace_indices(i, mapper, rule)
iet = XSubs(replacer=replacer).visit(iet)
# Lower ConditionalDimensions
cdims = [d for d in FindSymbols('free-symbols').visit(iet)
if isinstance(d, ConditionalDimension)]
mapper = {d: IntDiv(d.index, d.factor) for d in cdims}
iet = XSubs(mapper).visit(iet)
return iet
def iet_insert_C_decls(iet, external=None):
"""
Given an IET, build a new tree with the necessary symbol declarations.
Declarations are placed as close as possible to the first symbol occurrence.
Parameters
----------
iet : Node
The input Iteration/Expression tree.
external : tuple, optional
The symbols defined in some outer Callable, which therefore must not
be re-defined.
"""
external = external or []
# Classify and then schedule declarations to stack/heap
allocator = Allocator()
mapper = OrderedDict()
for k, v in MapExpressions().visit(iet).items():
if k.is_Expression:
if k.is_scalar_assign:
# Inline declaration
mapper[k] = LocalExpression(**k.args)
continue
objs = [k.write]
elif k.is_Call:
objs = k.params
for i in objs:
try:
if i.is_LocalObject:
# On the stack
site = v[-1] if v else iet
allocator.push_stack(site, i)
elif i.is_Array:
if i in external:
# The Array is to be defined in some foreign IET
continue
elif i._mem_stack:
# On the stack
key = lambda i: not i.is_Parallel
site = filter_iterations(v, key=key, stop='asap') or [iet]
allocator.push_stack(site[-1], i)
else:
# On the heap, as a tensor that must be globally accessible
allocator.push_heap(i)
except AttributeError:
# E.g., a generic SymPy expression
pass
# Introduce declarations on the stack
for k, v in allocator.onstack:
mapper[k] = tuple(Element(i) for i in v)
iet = Transformer(mapper, nested=True).visit(iet)
# Introduce declarations on the heap (if any)
if allocator.onheap:
decls, allocs, frees = zip(*allocator.onheap)
iet = List(header=decls + allocs, body=iet, footer=frees)
return iet
|
[
"f.luporini12@imperial.ac.uk"
] |
f.luporini12@imperial.ac.uk
|
e1816c2710b111f0d03effc960902ea50e60d696
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/experiments/murtaza/multiworld/camera_ready/pusher/offline_vae_twin_sac.py
|
621da28adc1a2950fe2cadb66df88816f479d492
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,361
|
py
|
import railrl.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v1
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import grill_her_td3_full_experiment, grill_her_twin_sac_full_experiment
if __name__ == "__main__":
variant = dict(
imsize=48,
init_camera=sawyer_pusher_camera_upright_v1,
env_id='SawyerPushAndReachEnvEasy-v0',
grill_variant=dict(
save_video=True,
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1005,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=100,
discount=0.99,
num_updates_per_env_step=1,
collection_mode='online-parallel',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(
),
twin_sac_kwargs=dict(
train_policy_with_reparameterization=True,
soft_target_tau=1e-3, # 1e-2
policy_update_period=1,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='RIG-HER-TD3',
normalize=False,
render=False,
exploration_noise=0,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
)
),
train_vae_variant=dict(
vae_path=None,
representation_size=16,
beta=2.5,
num_epochs=1000,
dump_skew_debug_plots=False,
generate_vae_dataset_kwargs=dict(
test_p=.9,
N=5000,
oracle_dataset_using_set_to_goal=True,
use_cached=True,
vae_dataset_specific_kwargs=dict(
),
show=False,
),
vae_kwargs=dict(
input_channels=3,
),
algo_kwargs=dict(
do_scatterplot=False,
use_linear_dynamics=False,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
),
decoder_activation='sigmoid',
save_period=100,
),
)
search_space = {
'grill_variant.exploration_noise':[0, .1, .3],
'env_id':['SawyerPushAndReachSmallArenaEnv-v0', 'SawyerPushAndReachSmallArenaResetFreeEnv-v0', 'SawyerPushAndReachEnvEasy-v0']
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 3
mode = 'gcp'
exp_prefix = 'sawyer_pusher_offline_vae_twin_sac_easier_envs'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_twin_sac_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=1,
gcp_kwargs=dict(
zone='us-west2-b',
)
)
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
a86265864b4893259754738fe02624202d5d2073
|
eb166976684cf7c3e6ce613e17b270334bcdd837
|
/configs/DOTA/faster_rcnn_r50_fpn_1x_dota.py
|
7e905b2c0cc4fa43e1cd7846b589a845fdeb687c
|
[
"Apache-2.0"
] |
permissive
|
dlrudco/AerialDetection
|
d987c3a301737911e29197065da00c2b8230e423
|
e9381016113ddf77a09d07209341bb2208481850
|
refs/heads/master
| 2023-04-13T16:02:26.331775
| 2021-04-15T07:00:40
| 2021-04-15T07:00:40
| 290,456,718
| 0
| 0
|
Apache-2.0
| 2020-08-26T09:43:20
| 2020-08-26T09:43:19
| null |
UTF-8
|
Python
| false
| false
| 5,037
|
py
|
# model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssignerCy',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssignerCy',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
# score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=1000)
score_thr = 0.05, nms = dict(type='nms', iou_thr=0.5), max_per_img = 2000)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'DOTADataset'
data_root = 'data/dota1_1024/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images/',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'test1024/DOTA_test1024.json',
img_prefix=data_root + 'test1024/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=12)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_dota'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"574258651@qq.com"
] |
574258651@qq.com
|
d2974097e777a4e406eabe78dffd4f18f27f2d4f
|
2500a2ab1f43c649fb0b4fe3b9e3420efa017efa
|
/MPK_mini/config.py
|
9bd08eee27ccd0fff60ef4fb0ba3349531d15488
|
[] |
no_license
|
cappytan3/AbletonLive9_RemoteScripts
|
0ce3e2d728190ba2ff5d2422cd03ae8a5df9d46f
|
65d08fd4ccdadd8366eca6f3c0fa7932516147bf
|
refs/heads/master
| 2021-01-15T11:50:14.152579
| 2014-04-11T17:37:22
| 2014-04-11T17:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
#Embedded file name: /Users/versonator/Hudson/live/Projects/AppLive/Resources/MIDI Remote Scripts/MPK_mini/config.py
from consts import *
TRANSPORT_CONTROLS = {'STOP': -1,
'PLAY': -1,
'REC': -1,
'LOOP': -1,
'RWD': -1,
'FFWD': -1}
DEVICE_CONTROLS = (GENERIC_ENC1,
GENERIC_ENC2,
GENERIC_ENC3,
GENERIC_ENC4,
GENERIC_ENC5,
GENERIC_ENC6,
GENERIC_ENC7,
GENERIC_ENC8)
VOLUME_CONTROLS = ((-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1))
TRACKARM_CONTROLS = (-1, -1, -1, -1, -1, -1, -1, -1)
BANK_CONTROLS = {'TOGGLELOCK': -1,
'BANKDIAL': -1,
'NEXTBANK': -1,
'PREVBANK': -1,
'BANK1': -1,
'BANK2': -1,
'BANK3': -1,
'BANK4': -1,
'BANK5': -1,
'BANK6': -1,
'BANK7': -1,
'BANK8': -1}
PAD_TRANSLATION = ((0, 0, 36, 0),
(1, 0, 37, 0),
(2, 0, 38, 0),
(3, 0, 39, 0),
(0, 1, 32, 0),
(1, 1, 33, 0),
(2, 1, 34, 0),
(3, 1, 35, 0),
(0, 2, 48, 0),
(1, 2, 49, 0),
(2, 2, 50, 0),
(3, 2, 51, 0),
(0, 3, 44, 0),
(1, 3, 45, 0),
(2, 3, 46, 0),
(3, 3, 47, 0))
CONTROLLER_DESCRIPTION = {'INPUTPORT': 'MPK mini',
'OUTPUTPORT': 'MPK mini',
'CHANNEL': -1,
'PAD_TRANSLATION': PAD_TRANSLATION}
MIXER_OPTIONS = {'NUMSENDS': 2,
'SEND1': (-1, -1, -1, -1, -1, -1, -1, -1),
'SEND2': (-1, -1, -1, -1, -1, -1, -1, -1),
'MASTERVOLUME': -1}
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
745ce5c114180c8dfd4cfc9986795a097c916ddb
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200627213700.py
|
713340873e0307f55fcf3881bebea59a02cdfb52
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
# import xlml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
print(url)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
soup = bs(response.text,'html.parser')
print(soup.text)
return soup
item = MaoyanspidersItem()
item['films_name'] = 'name'
item['release_time'] = "tiome"
return item
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
0eeceafe6472fa14ed5584e9b820ca5caf6bcd8f
|
f6af4dee160aed43afae42c6c7d92542265b26e7
|
/backend/views/product_views.py
|
9d4eaf4c842da2b6afc8b31ddc8c0a43395c8aa5
|
[] |
no_license
|
jasimdipu/django_1st_batch_final_project
|
8fcd0aad0fe9dc7922ea0692b24679546796748b
|
c75a80402967a6492433f75392dd0d8a20314601
|
refs/heads/main
| 2023-04-07T14:15:03.199353
| 2021-04-03T04:42:51
| 2021-04-03T04:42:51
| 353,403,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
from django.shortcuts import render
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from ..models import Product, Review
from ..serializers import ProductSerializer, ReviewSeralizer
from rest_framework import status
@api_view('GET')
def getProducts(request):
query = request.query_params.get('keyword')
if query == None:
query = ""
products = Product.objects.filter(product_name__icontains=query).order_by('-created_at')
page = request.query_params.get('page')
paginator = Paginator(products, 6)
try:
products = paginator.page((page))
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
if page == None:
page = 1
page = int(page)
print('Page', page)
serializers = ProductSerializer(products, many=True)
return Response({'products': serializers.data, 'page': page, "pages": paginator.num_pages})
@api_view(["POST"])
def createProduct(request):
pass
@api_view(["POST"])
def updateProduct(request, pk):
pass
@api_view(["Delete"])
def deleteProduct(request, pk):
pass
|
[
"dipu.j247@gmail.com"
] |
dipu.j247@gmail.com
|
037fc945b94f2fda69201f2375319345568518c0
|
38ac429d63369922e12e19cdda042b08b8123027
|
/test/test_saml_api.py
|
26846e6f2fcda7d9165535256227fcc7e7e394f6
|
[] |
no_license
|
aviv-julienjehannet/collibra_apiclient
|
0dfebe5df2eb929645b87eba42fab4c06ff0a6be
|
10a89e7acaf56ab8c7417698cd12616107706b6b
|
refs/heads/master
| 2021-09-12T16:52:19.803624
| 2018-04-19T01:35:20
| 2018-04-19T01:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
# coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.saml_api import SAMLApi # noqa: E501
from swagger_client.rest import ApiException
class TestSAMLApi(unittest.TestCase):
"""SAMLApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.saml_api.SAMLApi() # noqa: E501
def tearDown(self):
pass
def test_resource_saml_resource_get_sp_metadata_as_string_get(self):
"""Test case for resource_saml_resource_get_sp_metadata_as_string_get
Returns the SAML Service Provider metadata for a this instance. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"busworld08@gmail.com"
] |
busworld08@gmail.com
|
8b1820d27a95d40776ef0c21fe1b3fc18a519080
|
b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339
|
/Pandas_study/p25.py
|
a87f99f1a432299ee2ff02de0f557417bca76b3c
|
[] |
no_license
|
python-yc/pycharm_script
|
ae0e72898ef44a9de47e7548170a030c0a752eb5
|
c8947849090c71e131df5dc32173ebe9754df951
|
refs/heads/master
| 2023-01-05T06:16:33.857668
| 2020-10-31T08:09:53
| 2020-10-31T08:09:53
| 296,778,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
# -*- coding: utf-8 -*-
"""
pycharm无法显示条件格式的情况,jupyter是可以的,这里只写代码
"""
import pandas as pd
def low_score_red(s):
color = 'red' if s < 20 else 'black'
return f'color:{color}'
def highest_score_green(col):
return ['background-color:lime' if s == col.max()
else 'backgroud-color:white' for s in col]
students = pd.read_excel('./excel/Students-25-26.xlsx')
# print(students)
students.style.applymap(low_score_red, subset=['Test_1', 'Test_2', 'Test_3'])
students.style.applymap(low_score_red, subset=['Test_1', 'Test_2', 'Test_3'])\
.apply(highest_score_green, subset=['Test_1', 'Test_2', 'Test_3'])
|
[
"15655982512.com"
] |
15655982512.com
|
f63d3b091f50788337d5c3cb0719c39c23f0dfba
|
376b6933872b9110765154094d2c77713da2c853
|
/assemblies/bad.assemblies/abyss/3abyss.assembly.24.py
|
61ae7ee6df58578b2948a7ad2b7ef9fbaa8bf97b
|
[] |
no_license
|
markphuong/geographus-genome
|
46b037e7789641895f1a99b8bf6dee3418887600
|
a0ff439fbc0c350279359a51321e40e7778f5170
|
refs/heads/master
| 2020-03-19T07:21:29.297458
| 2018-06-05T04:15:18
| 2018-06-05T04:15:18
| 136,107,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
#!/usr/bin/env python
#flash manual: http://ccb.jhu.edu/software/FLASH/MANUAL
#this script cleans reads using trimmomatic, merges reads using flash, and creates a read1 file, read2 file (these represent paired files) and an unpaired file
import os
import sys
import argparse
import multiprocessing
# an arguments portion in the code represents necessary inputs to give to the script. I usually use this to give the program a file that contains all the unique sample IDs which should be in the read file names
def get_args():
parser = argparse.ArgumentParser(description="run blastx")
required = parser.add_argument_group("required arguments")
required.add_argument("--map", help="textfile with samples to run and what fasta file to match it to", required=True) #A map file with the sample ID and the fasta file it goes to
return parser.parse_args()
def align(element):
#the adapters file should have both forward and reverse, and the universal adapters
#this variables dict species the names for the input/out files
variables = dict(
sampleID = element) #name your output
commands = """
cp /pylon2/bi4s86p/phuong/geographus.genome/regular/2trim/*final*.fq ./
cp /pylon2/bi4s86p/phuong/geographus.genome/fosmids/4scaffolding/longreads.fa ./
cp /pylon2/bi4s86p/phuong/geographus.genome/matepair/2trim/*.fastq ./
abyss-pe np=60 k=24 name={sampleID}_kmer24 lib='pe1 pe2 pe3 pe4' mp='mp1 mp2 mp3 mp4' pe1='UU0018MY.final1.fq UU0018MY.final2.fq' pe2='10361X3.final1.fq 10361X3.final2.fq' pe3='10361X1.final1.fq 10361X1.final2.fq' pe4='9988X1.final1.fq 9988X1.final2.fq' se='UU0018MY.finalunpaired.fq 10361X3.finalunpaired.fq 10361X1.finalunpaired.fq 9988X1.finalunpaired.fq' mp1='11308X4_A_R1.fastq 11308X4_A_R2.fastq' mp2='11308X4_B_R1.fastq 11308X4_B_R2.fastq' mp3='11308X4_C_R1.fastq 11308X4_C_R2.fastq' mp4='11308X4_D_R1.fastq 11308X4_D_R2.fastq'
cp -r *{sampleID}_kmer* /pylon2/bi4s86p/phuong/geographus.genome/assemblies/abyss
""".format(**variables)
#this bit of code executes the command
cmd_list = commands.split("\n")
for cmd in cmd_list:
os.system(cmd)
mylist = []
args = get_args() #this is where the arguments from the def args code gets called upon
with open(args.map) as rfile:
for line in rfile:
line = line.strip()
mylist.append(line)
#this bit is really not necessary. I could have done this by not having 'def main()' and just starting with the args=get_args() line, but the following code follows the logic of what preceded it.
pool = multiprocessing.Pool(1)
pool.map(align, mylist)
|
[
"phuong@br006.pvt.bridges.psc.edu"
] |
phuong@br006.pvt.bridges.psc.edu
|
0e043652c0c6321e999e76558a46f1c3bb9d060d
|
98f7a31ee122cea4b9ed61300c8ee3be456b4850
|
/ws-tests/test_study_get.py
|
4a483310814437cf889f8e5e8a4583590d922dfe
|
[
"BSD-2-Clause"
] |
permissive
|
BioinformaticsArchive/phylesystem-api
|
4bd30bd32fba29497ca4c4df4a4cc5e85f0c7dfc
|
08a77e3f49a1607ec1bc5d835977b1d2c365e291
|
refs/heads/master
| 2021-01-16T21:38:30.651003
| 2015-04-09T18:08:18
| 2015-04-09T18:08:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
#!/usr/bin/env python
import sys, os
from opentreetesting import test_http_json_method, config
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/10'
data = {'output_nexml2json':'1.2'}
if test_http_json_method(SUBMIT_URI, 'GET', data=data, expected_status=200):
sys.exit(0)
sys.exit(1)
|
[
"mtholder@gmail.com"
] |
mtholder@gmail.com
|
8d7793e068d44fdf86b78fc3ce1096b6349256b3
|
df15792f5a82fcea25e62714b13e2d502485eb00
|
/peil/peil/migrations/0009_auto_20170507_2253.py
|
a180183bba7ba0b86a466b593a969a1c33e7bac5
|
[] |
no_license
|
acaciawater/peilstok
|
c6fcdab232d938a3f281e9843c81cfbf59cc4eea
|
179e1981a656e78a8ebdac6e50ef73f852df7630
|
refs/heads/master
| 2020-12-30T17:11:27.871514
| 2017-12-08T13:14:35
| 2017-12-08T13:14:35
| 91,066,224
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-07 20:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('peil', '0008_auto_20170507_2159'),
]
operations = [
migrations.RemoveField(
model_name='basemodule',
name='devid',
),
migrations.RemoveField(
model_name='basemodule',
name='serial',
),
]
|
[
"tkleinen@gmail.com"
] |
tkleinen@gmail.com
|
d9d098d9fb7eebef77cb4e80498df3812f35e9be
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/mail_bot/models/mail_channel.py
|
b3486e0f9c67d8c5f70a64944b335d665f4eec2a
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264
| 2019-04-30T14:43:46
| 2019-04-30T14:43:46
| 184,268,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class Channel(models.Model):
_inherit = 'mail.channel'
def _execute_command_help(self, **kwargs):
super(Channel, self)._execute_command_help(**kwargs)
self.env['mail.bot']._apply_logic(self, kwargs, command="help") # kwargs are not usefull but...
@api.model
def init_odoobot(self):
if self.env.user.odoobot_state == 'not_initialized':
partner = self.env.user.partner_id
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
channel = self.with_context({"mail_create_nosubscribe": True}).create({
'channel_partner_ids': [(4, partner.id), (4, odoobot_id)],
'public': 'private',
'channel_type': 'chat',
'email_send': False,
'name': 'ALWAFIBot'
})
message = _("Hello,<br/>ALWAFI's chat helps employees collaborate efficiently. I'm here to help you discover its features.<br/><b>Try to send me an emoji :)</b>")
channel.sudo().message_post(body=message, author_id=odoobot_id, message_type="comment", subtype="mail.mt_comment")
self.env.user.odoobot_state = 'onboarding_emoji'
return channel
|
[
"50145400+gilbertp7@users.noreply.github.com"
] |
50145400+gilbertp7@users.noreply.github.com
|
43daa355f596415d794bf815f82a5c47f5f64f4e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03268/s110149697.py
|
306d09930e8015c8cc774fe726aa8960b05be863
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
n, k = map(int, input().split())
if k % 2 == 0:
d = k//2
ans = ((n // d)-(n//k)) ** 3
else:
ans = 0
ans += (n // k) ** 3
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
acba81390f9885788faef08ef3a0b042ce1e6ced
|
f5cf699c09c4bf2fd94285004ebc7766b9464532
|
/scripts2/has_release.py
|
aebe6aa3b0abba1d3f9fea32914f2aa20c91398c
|
[] |
no_license
|
marcoacierno/autodeploy-tests
|
056f7351a94d3316d9a984e9307c4d42b0848be6
|
f3bc97235cc17481ecc9529149ac7b361fbbe701
|
refs/heads/master
| 2022-01-08T02:52:41.418328
| 2019-05-11T10:46:33
| 2019-05-11T10:46:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import sys
import os
import re
sys.path.append(os.path.dirname(__file__)) # noqa
from datetime import datetime
from base import run_process, RELEASE_FILE, CHANGELOG_FILE
if __name__ == "__main__":
if not os.path.exists(RELEASE_FILE):
print("Not releasing a new version because there isn't a RELEASE.md file.")
run_process(["circleci", "step", "halt"])
|
[
"marcoaciernoemail@gmail.com"
] |
marcoaciernoemail@gmail.com
|
e291e05c5bc7d1bb9602b61922781b54f61194f1
|
cc2a00ce7e05245327ce8da85d0e3aa01d9635b9
|
/P_controller/Tank_2/models/environment.py
|
4b71bb3680bde2d87dcab15ee3e7ea14fb3ac54f
|
[] |
no_license
|
puttak/Reinforcement-Learning-in-Process-Control
|
f7c05a0ed41826cb1d7248caffdb3c47bbe66df0
|
852967e97b2fb0b6c5022365c9ef62906c099832
|
refs/heads/master
| 2020-05-03T21:56:33.515929
| 2019-03-21T08:28:36
| 2019-03-21T08:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
from models.tank_model.tank import Tank
from visualize.window import Window
import matplotlib.pyplot as plt
from drawnow import drawnow
class Environment:
"Parameters are set in the params.py file"
def __init__(self, TANK_PARAMS_LIST, TANK_DIST_LIST, MAIN_PARAMS):
self.model = []
for i, TANK_PARAMS in enumerate(TANK_PARAMS_LIST):
tank = Tank(
height=TANK_PARAMS["height"],
radius=TANK_PARAMS["width"],
max_level=TANK_PARAMS["max_level"],
min_level=TANK_PARAMS["min_level"],
pipe_radius=TANK_PARAMS["pipe_radius"],
init_level=TANK_PARAMS["init_level"],
dist=TANK_DIST_LIST[i],
)
self.model.append(tank)
self.running = True
self.episode = 0
self.all_rewards = []
self.terminated = False
self.show_rendering = MAIN_PARAMS["RENDER"]
self.live_plot = MAIN_PARAMS["LIVE_REWARD_PLOT"]
if self.show_rendering:
self.window = Window(self.model)
if self.live_plot:
plt.ion() # enable interactivity
plt.figure(num="Rewards per episode") # make a figure
def get_next_state(self, z, i, t, q_out):
"""
Calculates the dynamics of the agents action and
gives back the next state
"""
dldt, q_out = self.model[i].get_dhdt(z, t, q_out)
self.model[i].change_level(dldt)
# Check terminate state
if self.model[i].level < self.model[i].min:
self.terminated = True
self.model[i].level = self.model[i].min
elif self.model[i].level > self.model[i].max:
self.terminated = True
self.model[i].level = self.model[i].max
return self.model[i].level, q_out
def render(self, action):
"Draw the water level of the tank in pygame"
if self.render:
running = self.window.Draw(action)
if not running:
self.running = False
def get_reward(self, h):
h = h / self.model.h
reward = (h - 0.5) ** 2
return reward
if h > 0.49 and h < 0.51:
return 5
if h > 0.45 and h < 0.55:
return 4
if h > 0.4 and h < 0.6:
return 3
if h > 0.3 and h < 0.7:
return 2
if h > 0.2 and h < 0.8:
return 1
else:
return 0
def plot_rewards(self):
"drawnow plot of the reward"
plt.plot(
self.all_rewards,
label="Exploration rate: {} %".format(self.epsilon * 100),
)
plt.legend()
def plot(self, all_rewards, epsilon):
"Live plot of the reward"
self.all_rewards = all_rewards
self.epsilon = round(epsilon, 4)
try:
drawnow(self.plot_rewards)
except KeyboardInterrupt:
print("Break")
|
[
"eskild.emedd33@gmail.com"
] |
eskild.emedd33@gmail.com
|
ba858d836612f5c7033f224fdde159303b0860e2
|
ef2e354ae06e9994b7bc65f9685f8769ec56dc28
|
/offerride/urls.py
|
62a8e3f206ab88fb18f3719babe863841c06b08e
|
[] |
no_license
|
akhilpatil123/FareShare
|
45e634b07749f507a40eeb08be710b2090844ab9
|
a0d89ba324ef5cf74fe5c54cf641f0d3625bd373
|
refs/heads/master
| 2020-04-30T00:21:56.041455
| 2019-03-19T11:55:56
| 2019-03-19T11:55:56
| 176,501,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
from django.conf.urls import url
from . import views
app_name = 'offerride'
urlpatterns = [
url(r'^$',views.index, name='index'),
url(r'^rides/$', views.rides, name='rides'),
url(r'^addride/$', views.addride, name='addride'),
url(r'^submitride/$', views.submitride, name='submitride'),
url(r'^join/$', views.join, name='join'),
url(r'^addcar/$', views.addcar, name='addcar'),
url(r'^submitcar/$', views.submitcar, name='submitcar'),
url(r'^seat/$', views.seat, name='seat'),
url(r'^maps/$', views.maps, name='maps'),
]
|
[
"you@example.com"
] |
you@example.com
|
70026a40fd339a0274b5f4b28aa1d9800b33d7aa
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/scipy/integrate/_ivp/dop853_coefficients.py
|
5cc5c9e4c41c8bc9fa2287554b1308777aaa5172
|
[
"GPL-3.0-or-later",
"BSD-3-Clause",
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"GCC-exception-3.1",
"Qhull",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684
| 2023-06-26T00:53:53
| 2023-06-26T00:53:53
| 94,152,032
| 14
| 0
|
MIT
| 2023-09-04T02:53:29
| 2017-06-13T00:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,303
|
py
|
from __future__ import absolute_import, division, print_function
import numpy as np
N_STAGES = 12
N_STAGES_EXTENDED = 16
INTERPOLATOR_POWER = 7
C = np.array([0.0,
0.526001519587677318785587544488e-01,
0.789002279381515978178381316732e-01,
0.118350341907227396726757197510,
0.281649658092772603273242802490,
0.333333333333333333333333333333,
0.25,
0.307692307692307692307692307692,
0.651282051282051282051282051282,
0.6,
0.857142857142857142857142857142,
1.0,
1.0,
0.1,
0.2,
0.777777777777777777777777777778])
A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
A[1, 0] = 5.26001519587677318785587544488e-2
A[2, 0] = 1.97250569845378994544595329183e-2
A[2, 1] = 5.91751709536136983633785987549e-2
A[3, 0] = 2.95875854768068491816892993775e-2
A[3, 2] = 8.87627564304205475450678981324e-2
A[4, 0] = 2.41365134159266685502369798665e-1
A[4, 2] = -8.84549479328286085344864962717e-1
A[4, 3] = 9.24834003261792003115737966543e-1
A[5, 0] = 3.7037037037037037037037037037e-2
A[5, 3] = 1.70828608729473871279604482173e-1
A[5, 4] = 1.25467687566822425016691814123e-1
A[6, 0] = 3.7109375e-2
A[6, 3] = 1.70252211019544039314978060272e-1
A[6, 4] = 6.02165389804559606850219397283e-2
A[6, 5] = -1.7578125e-2
A[7, 0] = 3.70920001185047927108779319836e-2
A[7, 3] = 1.70383925712239993810214054705e-1
A[7, 4] = 1.07262030446373284651809199168e-1
A[7, 5] = -1.53194377486244017527936158236e-2
A[7, 6] = 8.27378916381402288758473766002e-3
A[8, 0] = 6.24110958716075717114429577812e-1
A[8, 3] = -3.36089262944694129406857109825
A[8, 4] = -8.68219346841726006818189891453e-1
A[8, 5] = 2.75920996994467083049415600797e1
A[8, 6] = 2.01540675504778934086186788979e1
A[8, 7] = -4.34898841810699588477366255144e1
A[9, 0] = 4.77662536438264365890433908527e-1
A[9, 3] = -2.48811461997166764192642586468
A[9, 4] = -5.90290826836842996371446475743e-1
A[9, 5] = 2.12300514481811942347288949897e1
A[9, 6] = 1.52792336328824235832596922938e1
A[9, 7] = -3.32882109689848629194453265587e1
A[9, 8] = -2.03312017085086261358222928593e-2
A[10, 0] = -9.3714243008598732571704021658e-1
A[10, 3] = 5.18637242884406370830023853209
A[10, 4] = 1.09143734899672957818500254654
A[10, 5] = -8.14978701074692612513997267357
A[10, 6] = -1.85200656599969598641566180701e1
A[10, 7] = 2.27394870993505042818970056734e1
A[10, 8] = 2.49360555267965238987089396762
A[10, 9] = -3.0467644718982195003823669022
A[11, 0] = 2.27331014751653820792359768449
A[11, 3] = -1.05344954667372501984066689879e1
A[11, 4] = -2.00087205822486249909675718444
A[11, 5] = -1.79589318631187989172765950534e1
A[11, 6] = 2.79488845294199600508499808837e1
A[11, 7] = -2.85899827713502369474065508674
A[11, 8] = -8.87285693353062954433549289258
A[11, 9] = 1.23605671757943030647266201528e1
A[11, 10] = 6.43392746015763530355970484046e-1
A[12, 0] = 5.42937341165687622380535766363e-2
A[12, 5] = 4.45031289275240888144113950566
A[12, 6] = 1.89151789931450038304281599044
A[12, 7] = -5.8012039600105847814672114227
A[12, 8] = 3.1116436695781989440891606237e-1
A[12, 9] = -1.52160949662516078556178806805e-1
A[12, 10] = 2.01365400804030348374776537501e-1
A[12, 11] = 4.47106157277725905176885569043e-2
A[13, 0] = 5.61675022830479523392909219681e-2
A[13, 6] = 2.53500210216624811088794765333e-1
A[13, 7] = -2.46239037470802489917441475441e-1
A[13, 8] = -1.24191423263816360469010140626e-1
A[13, 9] = 1.5329179827876569731206322685e-1
A[13, 10] = 8.20105229563468988491666602057e-3
A[13, 11] = 7.56789766054569976138603589584e-3
A[13, 12] = -8.298e-3
A[14, 0] = 3.18346481635021405060768473261e-2
A[14, 5] = 2.83009096723667755288322961402e-2
A[14, 6] = 5.35419883074385676223797384372e-2
A[14, 7] = -5.49237485713909884646569340306e-2
A[14, 10] = -1.08347328697249322858509316994e-4
A[14, 11] = 3.82571090835658412954920192323e-4
A[14, 12] = -3.40465008687404560802977114492e-4
A[14, 13] = 1.41312443674632500278074618366e-1
A[15, 0] = -4.28896301583791923408573538692e-1
A[15, 5] = -4.69762141536116384314449447206
A[15, 6] = 7.68342119606259904184240953878
A[15, 7] = 4.06898981839711007970213554331
A[15, 8] = 3.56727187455281109270669543021e-1
A[15, 12] = -1.39902416515901462129418009734e-3
A[15, 13] = 2.9475147891527723389556272149
A[15, 14] = -9.15095847217987001081870187138
B = A[N_STAGES, :N_STAGES]
E3 = np.zeros(N_STAGES + 1)
E3[:-1] = B.copy()
E3[0] -= 0.244094488188976377952755905512
E3[8] -= 0.733846688281611857341361741547
E3[11] -= 0.220588235294117647058823529412e-1
E5 = np.zeros(N_STAGES + 1)
E5[0] = 0.1312004499419488073250102996e-1
E5[5] = -0.1225156446376204440720569753e+1
E5[6] = -0.4957589496572501915214079952
E5[7] = 0.1664377182454986536961530415e+1
E5[8] = -0.3503288487499736816886487290
E5[9] = 0.3341791187130174790297318841
E5[10] = 0.8192320648511571246570742613e-1
E5[11] = -0.2235530786388629525884427845e-1
# First 3 coefficients are computed separately.
D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
D[0, 0] = -0.84289382761090128651353491142e+1
D[0, 5] = 0.56671495351937776962531783590
D[0, 6] = -0.30689499459498916912797304727e+1
D[0, 7] = 0.23846676565120698287728149680e+1
D[0, 8] = 0.21170345824450282767155149946e+1
D[0, 9] = -0.87139158377797299206789907490
D[0, 10] = 0.22404374302607882758541771650e+1
D[0, 11] = 0.63157877876946881815570249290
D[0, 12] = -0.88990336451333310820698117400e-1
D[0, 13] = 0.18148505520854727256656404962e+2
D[0, 14] = -0.91946323924783554000451984436e+1
D[0, 15] = -0.44360363875948939664310572000e+1
D[1, 0] = 0.10427508642579134603413151009e+2
D[1, 5] = 0.24228349177525818288430175319e+3
D[1, 6] = 0.16520045171727028198505394887e+3
D[1, 7] = -0.37454675472269020279518312152e+3
D[1, 8] = -0.22113666853125306036270938578e+2
D[1, 9] = 0.77334326684722638389603898808e+1
D[1, 10] = -0.30674084731089398182061213626e+2
D[1, 11] = -0.93321305264302278729567221706e+1
D[1, 12] = 0.15697238121770843886131091075e+2
D[1, 13] = -0.31139403219565177677282850411e+2
D[1, 14] = -0.93529243588444783865713862664e+1
D[1, 15] = 0.35816841486394083752465898540e+2
D[2, 0] = 0.19985053242002433820987653617e+2
D[2, 5] = -0.38703730874935176555105901742e+3
D[2, 6] = -0.18917813819516756882830838328e+3
D[2, 7] = 0.52780815920542364900561016686e+3
D[2, 8] = -0.11573902539959630126141871134e+2
D[2, 9] = 0.68812326946963000169666922661e+1
D[2, 10] = -0.10006050966910838403183860980e+1
D[2, 11] = 0.77771377980534432092869265740
D[2, 12] = -0.27782057523535084065932004339e+1
D[2, 13] = -0.60196695231264120758267380846e+2
D[2, 14] = 0.84320405506677161018159903784e+2
D[2, 15] = 0.11992291136182789328035130030e+2
D[3, 0] = -0.25693933462703749003312586129e+2
D[3, 5] = -0.15418974869023643374053993627e+3
D[3, 6] = -0.23152937917604549567536039109e+3
D[3, 7] = 0.35763911791061412378285349910e+3
D[3, 8] = 0.93405324183624310003907691704e+2
D[3, 9] = -0.37458323136451633156875139351e+2
D[3, 10] = 0.10409964950896230045147246184e+3
D[3, 11] = 0.29840293426660503123344363579e+2
D[3, 12] = -0.43533456590011143754432175058e+2
D[3, 13] = 0.96324553959188282948394950600e+2
D[3, 14] = -0.39177261675615439165231486172e+2
D[3, 15] = -0.14972683625798562581422125276e+3
|
[
"ulcamilo@gmail.com"
] |
ulcamilo@gmail.com
|
8a2e361d427501a46d5935226ee01779753093ca
|
eef3fd0eba25725aa045f4913304c4d2dd93ba7e
|
/deeplearning_tensorflow_p/p58_transpose_resnet.py
|
2463164107c0a9da57793cd2bb02a1bff48cb30e
|
[] |
no_license
|
provenclei/tensorflow_cv
|
c613e686ab6827a5eedcbaf00ef1317da0b94e81
|
c8827e74e0db42fa617c91f1d14b71abcff8780a
|
refs/heads/master
| 2022-12-01T05:52:31.365257
| 2020-08-16T00:24:11
| 2020-08-16T00:24:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,078
|
py
|
# -*- coding: utf-8 -*-
"""
@Author : LEITENG
@Version :
------------------------------------
@File : p56_ResNet.py
@Description :
@CreateTime : 2020/7/10 09:21
------------------------------------
@ModifyTime : 反 Resnet 框架
"""
import tensorflow as tf
RESNET18 = 'ResNet18'
RESNET34 = 'ResNet34'
RESNET50 = 'ResNet50'
RESNET101 = 'ResNet101'
RESNET152 = 'ResNet152'
SETTING = {
RESNET18: {"bottleneck": False, 'repeats': [2, 2, 2, 2]},
RESNET34: {"bottleneck": False, 'repeats': [3, 4, 6, 3]},
RESNET50: {"bottleneck": True, 'repeats': [3, 4, 6, 3]},
RESNET101: {"bottleneck": True, 'repeats': [3, 4, 23, 3]},
RESNET152: {"bottleneck": True, 'repeats': [3, 8, 36, 3]},
}
_name_id = 1
class TransposeResNet:
def __init__(self, name):
self.bottleneck = SETTING[name]['bottleneck']
self.repeats = SETTING[name]['repeats']
def __call__(self, x, size: int, training: bool, name=None):
'''
进行反卷积操作
:param x: 输入值 [-1, -1]
:param size: 输出大小,必须为 32 的倍数,默认为224
:param training:
:param name:
:return:
'''
height, width = _check(size)
if name is None:
global _name_id
name = 'transpose_resnet_%d' % _name_id
_name_id += 1
with tf.variable_scope(name):
filters = 2048 if self.bottleneck else 512
# [-1, 2048] 或 [-1, 512]
x = tf.layers.dense(x, filters, name='fc', activation=tf.nn.relu)
# [-1, 1, 1, 2048] 或 [-1, 1, 1, 512]
x = tf.reshape(x, [-1, 1, 1, filters])
# [-1, 7, 7, 2048] 或 [-1, 7, 7, 512]
x = tf.layers.conv2d_transpose(x, filters, (height // 32, width // 32), 1,
name='deconv1', activation=tf.nn.relu)
# -> [-1, 56, 56, 64]
x = self._repeat(x, training)
# 池化对应操作为反卷积
# x: [-1, 56, 56, 64] -> [-1, 112, 112, 64]
x = tf.layers.conv2d_transpose(x, 64, 3, 2, 'same', name='decov2', activation=tf.nn.relu)
# [-1, 112, 112, 64] -> [-1, 224, 224, 3]
x = tf.layers.conv2d_transpose(x, 3, (height // 32, width // 32), 2, 'same', name='decov3')
return x
def _repeat(self, x, training):
# [-1, 7, 7, 2048] 或 [-1, 7, 7, 512] -> [-1, 56, 56, 64]
filters = x.shape[-1].value
for num_i, num in zip(range(len(self.repeats) - 1, -1, -1), reversed(self.repeats)):
for i in range(num-1, -1, -1):
x = self._transpose_residual(x, num_i, i, filters, training)
filters //= 2
return x
def _transpose_residual(self, x, num_i, i, filters, training):
strides = 2 if num_i > 0 and i == 0 else 1
if self.bottleneck:
left = _my_deconv(x, filters, 1, 1, 'same', name='res_%d_%d_left_myconv1' % (num_i, i),
training=training)
filters //= 4
left = _my_deconv(left, filters, 3, 1, 'same', name='res_%d_%d_left_myconv2' % (num_i, i),
training=training)
left = _my_deconv(left, filters, 1, strides, 'same', name='res_%d_%d_left_myconv3' % (num_i, i),
training=training, active=False)
else:
left = _my_deconv(x, filters, 3, 1, 'same', name='res_%d_%d_left_myconv1' % (num_i, i),
training=training)
left = _my_deconv(left, filters, 3, strides, 'same', name='res_%d_%d_left_myconv2' % (num_i, i),
training=training)
if filters != x.shape[-1].value or strides > 1:
# 如果右侧通道数或图片大小不相等,则通过卷积
right = _my_deconv(x, filters, 1, strides, 'same', name='res_%d_%d_right_myconv1' % (num_i, i),
training=training, active=False)
else:
right = x
return tf.nn.relu(left + right)
def _my_deconv(x, filters, kernel_size, strides, padding, name, training, active: bool=True):
with tf.variable_scope(name):
x = tf.layers.conv2d_transpose(x, filters, kernel_size, strides, padding, name='deconv')
x = tf.layers.batch_normalization(x, [1, 2, 3], epsilon=1e-6, training=training, name='bn')
if active:
x = tf.nn.relu(x)
return x
def _check(size):
if type(size) == int:
size = (size, size)
height, width = size
assert height % 32 == 0
assert width % 32 == 0
return height, width
def main():
net = TransposeResNet(RESNET50)
# 调用 __call__ 函数
x = net(tf.random_normal([20, 123]), 224, True) # 使用 ()就可以调用魔法函数__call__'
print(x.shape)
net = TransposeResNet(RESNET101)
x = net(tf.random_normal([20, 123]), 224, True) # 使用 ()就可以调用魔法函数__call__'
print(x.shape)
if __name__ == '__main__':
main()
|
[
"18565663062@163.com"
] |
18565663062@163.com
|
2f05ffdb5318a803ae57b08a2a6dfa42c899af36
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03644/s583061806.py
|
4f4c5fa1513661aa6021ecf76f19f6b4993b52f6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
N = int(input())
result = []
for i in range(1, N+1):
bin_i = bin(i)
bin_i_c = bin_i.count("0")-1
result.append(bin_i_c)
sorted_result = sorted(result, reverse = True)
print(2 ** sorted_result[0])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3d67d667f7c635238b62f637bbb7bca5a7604a8d
|
dbd8180d9c02c22b42baa5227437714ff352fd8e
|
/1-100/L250.py
|
5488dcdc339fc4b78d91939d00a3f4171395ad11
|
[] |
no_license
|
k8godzilla/-Leetcode
|
92953dfffc0f06907fa7bd0beea7bc27b16f9efa
|
58d5384155f481b1d1b0a7ca69566245dd779554
|
refs/heads/master
| 2020-06-12T15:35:43.380979
| 2019-08-07T11:14:49
| 2019-08-07T11:14:49
| 194,348,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 28 17:29:57 2019
@author: sunyin
"""
'''
给定一个二叉树,统计该二叉树数值相同的子树个数。
同值子树是指该子树的所有节点都拥有相同的数值。
示例:
输入: root = [5,1,5,5,5,null,5]
5
/ \
1 5
/ \ \
5 5 5
输出: 4
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/count-univalue-subtrees
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def countUnivalSubtrees(self, root: TreeNode) -> int:
self.res = 0
b, v = self.helper(root)
return self.res
def helper(self, node: TreeNode):
if node.left is None and node.right is None:
self.res += 1
return True, node.val
elif node.left is None:
b, v = self.helper(node.right)
if b and v == node.val:
self.res += 1
return b, v
else:
return False, -1
elif node.right is None:
b, v = self.helper(node.left)
if b and v == node.val:
self.res += 1
return b, v
else:
return False, -1
else:
bRight, vRight = self.helper(node.right)
bLeft, vLeft = self.helper(node.left)
if bRight and bLeft and vRight == node.val and vLeft == node.val:
self.res += 1
return True, node.val
else:
return False, -1
|
[
"k8sunyin@126.com"
] |
k8sunyin@126.com
|
a32a139770dcea6321e2c857c8f1d62509740d59
|
dd1b38d6d953fae7ace7b9c5f86821ac24936b1d
|
/stutorial/items.py
|
2376d466616a39f112cdf068f151165ddf3d4b94
|
[] |
no_license
|
ranafge/scrapy_project
|
81e1345cc793e65061ba8a43afa78ec91c0680a9
|
8b921a64f9e499ac56cb985b6ccaf680258a2b2f
|
refs/heads/master
| 2020-04-11T04:58:28.982076
| 2018-12-12T19:22:12
| 2018-12-12T19:22:12
| 161,533,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class StutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"ranafge@gmail.com"
] |
ranafge@gmail.com
|
81a10088604bb25b97c23ba1ef2a1604af597a9f
|
0c3cd4e933afef9dd15c2983d6b1a8413063ae80
|
/alpaca_paper/tests/test_screeners.py
|
f9d9c8afeb1f5fc0c0b87f8372bca7a71743e206
|
[] |
no_license
|
webclinic017/paper_trading_bot
|
405ae2cad7fd50b393509a90973d674b5f59ce8c
|
a172ce2dc150183be4ddb5b218dfcb7006027f69
|
refs/heads/master
| 2023-07-24T06:31:12.093572
| 2021-07-20T01:32:32
| 2021-07-20T01:32:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
import pytest
from alpaca_paper.screeners import MarketWatch
@pytest.fixture
def mw():
return MarketWatch()
def test_premarket(mw):
pre_market = mw.pre_market()
assert isinstance(pre_market, (dict,))
assert isinstance(pre_market['gainers'], (list,))
assert isinstance(pre_market['loosers'], (list,))
assert isinstance(pre_market['most_actives'], (list,))
def test_vol_to_float():
assert 221770 == MarketWatch.vol_to_float('221.77K')
assert 2189000 == MarketWatch.vol_to_float('2.189M')
assert 3316 == MarketWatch.vol_to_float('3,316')
|
[
"samuelperron@hotmail.fr"
] |
samuelperron@hotmail.fr
|
566b7fac7c1540a03e7ba419a8e74227534f307e
|
fb124e51024917d6479fa626d9607ff10f7a3aba
|
/storm-control/storm_control/hal4000/illumination/button_editor_ui.py
|
6340c37f39723a6c6e0a3e99238c3c19bfe496a8
|
[
"MIT"
] |
permissive
|
BehnamAbaie/storm-control
|
054bd7bbd903ed9635e4d1121c30544f58473c4f
|
0c686321142eccad62ce3365eae22c3b69229b0d
|
refs/heads/main
| 2023-06-18T08:04:01.108874
| 2021-07-14T00:51:15
| 2021-07-14T00:51:15
| 342,049,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'button_editor.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(493, 380)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(Dialog)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 473, 327))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Power Button Editor"))
|
[
"noreply@github.com"
] |
BehnamAbaie.noreply@github.com
|
eab876d87c22914ef3b369338b868419b7af5f42
|
cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3
|
/Learning_Python_Generators/Exercise_Files/Ch3/03_04/coroutine_decorator.py
|
a55630fb3f470ad6df497cd864cbf4893c44f0e7
|
[] |
no_license
|
sedstan/LinkedIn-Learning-Python-Course
|
2b936d0f00703a6e66a872220ed47572123dc7fd
|
b4584218355bf07aa3d2939b950911eae67adb0b
|
refs/heads/master
| 2021-10-11T10:19:13.675662
| 2019-01-24T17:55:20
| 2019-01-24T17:55:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
def coroutine_decorator(func):
def wrap(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return wrap
@coroutine_decorator
def coroutine_example():
while True:
x = yield
#do something with x
print (x)
|
[
"sed@wearewhy.co.uk"
] |
sed@wearewhy.co.uk
|
f87cd3733e9279862ac119d90af6bf4ea02c10ae
|
627cca9406c31ce30c493ff7502f79eb4c57eee3
|
/xcha/wallet/lineage_proof.py
|
ca16b4fed04cad1847319ac28818c391c418eb37
|
[
"Apache-2.0"
] |
permissive
|
blockchiansea/xcha-blockchain
|
40c6d36813f671e94316a522904238f495f39f6b
|
7de0ba89056236e30069aef12fe25843f6093bcf
|
refs/heads/master
| 2023-07-26T02:36:57.654196
| 2021-09-06T06:04:21
| 2021-09-06T06:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from dataclasses import dataclass
from typing import Optional
from xcha.types.blockchain_format.sized_bytes import bytes32
from xcha.util.ints import uint64
from xcha.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class LineageProof(Streamable):
parent_name: bytes32
inner_puzzle_hash: Optional[bytes32]
amount: uint64
|
[
"xchanet@gmail.com"
] |
xchanet@gmail.com
|
71dc32b622749cfff8398ad2dde76627857ea6a3
|
17e3234ab01fd93233cc453f1495d50424c3bd8f
|
/latte/dashboard/doctype/dashboard_data_slice/__init__.py
|
b3c7ec22aa81e9680d69eb7ddacef402ae0c4349
|
[
"MIT"
] |
permissive
|
sunnyakaxd/latte
|
8943dbf70ce934e04e51b147a54e6dd02dfe43db
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
refs/heads/master
| 2023-06-11T10:25:31.217047
| 2021-07-06T06:40:19
| 2021-07-06T06:40:19
| 383,363,137
| 0
| 0
|
NOASSERTION
| 2021-07-06T06:26:49
| 2021-07-06T06:26:49
| null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
import frappe
import jwt
import time
from latte.utils.caching import cache_me_if_you_can
@frappe.whitelist()
def run(slice_name=None, data_source_name=None, filters=None):
return run_cached(slice_name, data_source_name, filters)
# @cache_me_if_you_can(expiry=20)
def run_cached(slice_name=None, data_source_name=None, filters=None):
if not slice_name:
frappe.throw('Dashboard Name Required')
dataslice_doc = frappe.get_doc('Dashboard Data Slice', slice_name)
response, status = dataslice_doc.execute(data_source_name, filters)
return frappe._dict({
'response': response,
'status': status
})
@frappe.whitelist()
def get_metabase_url(name, resource_type, metabase_site_url=None, metabase_secret_key=None):
if frappe.conf.metabase_site_url:
metabase_site_url = frappe.conf.metabase_site_url
if frappe.conf.metabase_secret_key:
metabase_secret_key = frappe.conf.metabase_secret_key
payload = {
'resource': {resource_type: int(name)},
'params': {},
'exp': round(time.time()) + (60 * 100) # 100 minute expiration
}
token = jwt.encode(payload, metabase_secret_key, algorithm='HS256')
iframeUrl = metabase_site_url + '/embed/'+ resource_type +'/' + token.decode('utf8') + '#bordered=true&titled=false'
return iframeUrl
@frappe.whitelist()
def save_chart_config(data_slice, config):
data_slice_doc = frappe.get_doc("Dashboard Data Slice", data_slice)
data_slice_doc.chart_default_config = config
data_slice_doc.save()
return "Success"
|
[
"himanshu.mishra@elastic.run"
] |
himanshu.mishra@elastic.run
|
77ee9b20a0817d31a073534a684b32b631dcca13
|
e41651d8f9b5d260b800136672c70cb85c3b80ff
|
/Notification_System/temboo/Library/Google/Gmail/InboxFeed.py
|
8764414118792dd2115f21eea4921bec6e6e5563
|
[] |
no_license
|
shriswissfed/GPS-tracking-system
|
43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c
|
1c5e90a483386bd2e5c5f48f7c5b306cd5f17965
|
refs/heads/master
| 2020-05-23T03:06:46.484473
| 2018-10-03T08:50:00
| 2018-10-03T08:50:00
| 55,578,217
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,883
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# InboxFeed
# Allows you to access a read-only Gmail feed that contains a list of unread emails.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InboxFeed(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InboxFeed Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InboxFeed, self).__init__(temboo_session, '/Library/Google/Gmail/InboxFeed')
def new_input_set(self):
return InboxFeedInputSet()
def _make_result_set(self, result, path):
return InboxFeedResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InboxFeedChoreographyExecution(session, exec_id, path)
class InboxFeedInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InboxFeed
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new Access Token.)
"""
super(InboxFeedInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('ClientSecret', value)
def set_Label(self, value):
"""
Set the value of the Label input for this Choreo. ((optional, string) The name of a Gmail Label to retrieve messages from (e.g., important, starred, sent, junk-e-mail, all).)
"""
super(InboxFeedInputSet, self)._set_input('Label', value)
def set_Mode(self, value):
"""
Set the value of the Mode input for this Choreo. ((optional, string) Used when an XPath query is provided. Valid values are "select" or "recursive". Select mode will return the first match of the query. In recursive mode, the XPath query will be applied within a loop.)
"""
super(InboxFeedInputSet, self)._set_input('Mode', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) A Google App-specific password that you've generated after enabling 2-Step Verification (Note: authenticating with OAuth credentials is the preferred authentication method).)
"""
super(InboxFeedInputSet, self)._set_input('Password', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format for the response. Valid values are JSON and XML. This will be ignored when providng an XPath query because results are returned as a string or JSON depending on the Mode specified.)
"""
super(InboxFeedInputSet, self)._set_input('ResponseFormat', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((optional, string) Your full Google email address e.g., martha.temboo@gmail.com (Note: authenticating with OAuth credentials is the preferred authentication method).)
"""
super(InboxFeedInputSet, self)._set_input('Username', value)
def set_XPath(self, value):
"""
Set the value of the XPath input for this Choreo. ((optional, string) An XPATH query to run.)
"""
super(InboxFeedInputSet, self)._set_input('XPath', value)
class InboxFeedResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InboxFeed Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_FullCount(self):
"""
Retrieve the value for the "FullCount" output from this Choreo execution. ((integer) The number of unread messages. This is parsed from the Google XML response. Note that when using the Label input to retrieve messages from a particular Gmail label, the full count element may be 0.)
"""
return self._output.get('FullCount', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google. This will contain the data from the Gmail feed, or if the XPath input is provided, it will contain the result of the XPath query.)
"""
return self._output.get('Response', None)
class InboxFeedChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InboxFeedResultSet(response, path)
|
[
"shriswissfed@gmail.com"
] |
shriswissfed@gmail.com
|
97d98121e3aad07d0cc73ab82f9883743bfc3be7
|
556db265723b0cc30ad2917442ed6dad92fd9044
|
/tensorflow/python/ops/ragged/ragged_print_op_test.py
|
2b612d463d0eb70eebb13c4486f0d5d159173771
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
graphcore/tensorflow
|
c1669b489be0e045b3ec856b311b3139858de196
|
085b20a4b6287eff8c0b792425d52422ab8cbab3
|
refs/heads/r2.6/sdk-release-3.2
| 2023-07-06T06:23:53.857743
| 2023-03-14T13:04:04
| 2023-03-14T13:48:43
| 162,717,602
| 84
| 17
|
Apache-2.0
| 2023-03-25T01:13:37
| 2018-12-21T13:30:38
|
C++
|
UTF-8
|
Python
| false
| false
| 7,777
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.print with ragged tensors.
Note: ragged support for tf.print is implemented by RaggedPrintV2Dispatcher in
ragged_dispatch.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tempfile
from absl.testing import parameterized
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedPrintV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='2d_int_values',
inputs=lambda: [ragged_factory_ops.constant([[1, 2], [3]])],
expected='[[1, 2], [3]]\n'),
dict(
testcase_name='3d_int_values',
inputs=lambda: [ragged_factory_ops.constant([[[1, 2], [3]], [[4]]])],
expected='[[[1, 2], [3]], [[4]]]\n'),
dict(
testcase_name='2d_str_values',
inputs=lambda: [ragged_factory_ops.constant([['a', 'b'], ['c']])],
expected="[['a', 'b'], ['c']]\n"),
dict(
testcase_name='2d_str_values_with_escaping',
inputs=lambda: [ragged_factory_ops.constant([["a'b"], ['c"d']])],
expected="[['a\\'b'], ['c\"d']]\n"),
dict(
testcase_name='two_ragged_values',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
ragged_factory_ops.constant([[5], [], [6, 7, 8]])
],
expected='[[1, 2], [3]] [[5], [], [6, 7, 8]]\n'),
dict(
testcase_name='ragged_value_and_non_tensor_values',
inputs=lambda:
['a', 5, True,
ragged_factory_ops.constant([[1, 2], [3]]), 'c'],
expected='a 5 True [[1, 2], [3]] c\n'),
dict(
testcase_name='ragged_value_and_dense_value',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
constant_op.constant([[1, 2], [3, 4]])
],
expected='[[1, 2], [3]] [[1 2]\n [3 4]]\n'),
dict(
testcase_name='ragged_value_and_sparse_value',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
sparse_ops.from_dense([[1]])
],
expected=(
'[[1, 2], [3]] '
"'SparseTensor(indices=[[0 0]], values=[1], shape=[1 1])'\n")),
dict(
testcase_name='summarize_default',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
expected=('[[1, 2, 3, ..., 7, 8, 9], [10], [], '
'..., '
'[], [], [11, 12]]\n')),
dict(
testcase_name='summarize_2',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
summarize=2,
expected='[[1, 2, ..., 8, 9], [10], ..., [], [11, 12]]\n'),
dict(
testcase_name='summarize_neg1',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
summarize=-1,
expected=('[[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], '
'[], [], [], [], [11, 12]]\n')),
])
def testRaggedPrint(self, inputs, expected, summarize=None):
if callable(inputs):
inputs = inputs()
with tempfile.TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, 'print_output')
kwargs = {'output_stream': 'file://{}'.format(path)}
if summarize is not None:
kwargs.update(summarize=summarize)
self.evaluate(logging_ops.print_v2(*inputs, **kwargs))
actual = open(path, 'r').read()
self.assertEqual(repr(actual), repr(expected))
@test_util.run_all_in_graph_and_eager_modes
class RaggedToStringTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
('2d_int', [[1, 2], [], [3, 4, 5]], '[[1, 2], [], [3, 4, 5]]'),
('2d_str', [['a'], ['b'], ['c', 'd']], "[['a'], ['b'], ['c', 'd']]"),
('3d_int', [[[1, 2], []], [[3, 4, 5]]], '[[[1, 2], []], [[3, 4, 5]]]'),
('escape', [["a'b"], [r'c\d']], r"[['a\'b'], ['c\\d']]"),
dict(testcase_name='2d_empty', rt=[], ragged_rank=1, expected='[]'),
dict(testcase_name='3d_empty', rt=[], ragged_rank=2, expected='[]'),
dict(
testcase_name='3d_rrank1',
rt=[[[1, 2], [3, 4]], [], [[5, 6]]],
ragged_rank=1,
expected='[[[1, 2], [3, 4]], [], [[5, 6]]]'),
dict(
testcase_name='2d_empty_row', rt=[[]], ragged_rank=1,
expected='[[]]'),
dict(
testcase_name='3d_empty_row', rt=[[]], ragged_rank=2,
expected='[[]]'),
dict(
testcase_name='summarize_1',
rt=[[1, 2, 3, 4, 5], [], [6], [7], [8, 9]],
summarize=1,
expected='[[1, ..., 5], ..., [8, 9]]'),
dict(
testcase_name='summarize_2',
rt=[[1, 2, 3, 4, 5], [], [6], [7], [8, 9]],
summarize=2,
expected='[[1, 2, ..., 4, 5], [], ..., [7], [8, 9]]'),
])
def testRaggedToString(self, rt, expected, summarize=None, ragged_rank=None):
rt = ragged_factory_ops.constant(rt, ragged_rank=ragged_rank)
actual = ragged_string_ops.ragged_tensor_to_string(rt, summarize=summarize)
self.assertAllEqual(actual, expected)
@parameterized.named_parameters([
('maxelts_BadType', [[1]], "Expected summarize .*, got 'foo'", 'foo'),
('maxelts_0', [[1]], 'Expected summarize to be .*, got 0', 0),
('maxelts_Neg2', [[1]], 'Expected summarize to be .*, got -2', -2),
])
def testRaggedToStringErrors(self,
rt,
error,
summarize=None,
exception=ValueError):
rt = ragged_factory_ops.constant(rt)
with self.assertRaisesRegex(exception, error):
self.evaluate(
ragged_string_ops.ragged_tensor_to_string(rt, summarize=summarize))
def testRaggedToStringUnknownRank(self):
@def_function.function(
input_signature=[ragged_tensor.RaggedTensorSpec(ragged_rank=1)])
def f(rt):
return ragged_string_ops.ragged_tensor_to_string(rt)
with self.assertRaisesRegex(
ValueError, 'RaggedTensor to_string requires '
'that rt.shape.rank is not None'):
f(ragged_factory_ops.constant([[1, 2], [3]]))
if __name__ == '__main__':
googletest.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
723eeca76900ebe35f0b732286e7bd9845b4ffac
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/powerbi/v20200601/__init__.py
|
ce0d971ca28040740542ec1f335f01eed4ab4889
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_private_endpoint_connection import *
from .power_bi_resource import *
from .private_endpoint_connection import *
from ._inputs import *
from . import outputs
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.