hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
906fc90146a02fc91c29a4ca6a8d89955a76d227 | 1,542 | py | Python | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | 4 | 2020-09-05T00:17:27.000Z | 2022-01-25T19:44:32.000Z | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | null | null | null | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | 6 | 2020-11-20T15:42:03.000Z | 2022-02-10T02:43:29.000Z | from setuptools import setup
from distutils.util import convert_path
main_ns = {}
ver_path = convert_path('mudslide/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setup(
name='mudslide',
packages=['mudslide'],
version=main_ns['__version__'],
license='MIT',
description='Package to simulate nonadiabatic molecular dynamics using trajectory methods',
author='Shane M. Parker',
author_email='shane.parker@case.edu',
url='https://github.com/smparker/mudslide',
download_url='https://github.com/smparker/mudslide/archive/v0.9.tar.gz',
keywords= ['science', 'chemistry', 'nonadiabatic dynamics'],
install_requires=[
'numpy>=1.19',
'scipy',
'typing_extensions'
],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': [
'mudslide = mudslide.__main__:main',
'mudslide-surface = mudslide.surface:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
]
)
| 30.84 | 95 | 0.624514 |
906fe64b74d7a1e64be5829e3ead36fd43b1f23d | 1,361 | py | Python | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
] | null | null | null | import csv
import joblib
from sklearn.metrics import accuracy_score
data = []
features = []
targets = []
feature_names = []
users = []
with open('satisfaction_feature_names.csv') as name_file:
column_name_file = csv.reader(name_file)
feature_names = next(column_name_file)[2:394]
with open('cza_satisfaction_train_0922.csv') as data_file:
csv_file = csv.reader(data_file)
idx = 0
for content in csv_file:
idx = idx + 1
if idx <= 10000:
continue
if idx > 50000:
break
content = content[:2] + list(map(float, content[2:]))
if len(content) != 0:
data.append(content)
features.append(content[2:394])
targets.append(content[-1])
users.append(content[1])
clf, sorted_feature_scores = joblib.load("cza_rf.pkl")
predict_result = clf.predict(features)
print(sorted_feature_scores)
print(accuracy_score(predict_result, targets))
result = list(zip(users, predict_result))
print(result[:10])
print(sum(predict_result))
print(sum([flag[1] for flag in result]))
with open("rf_predict_result.csv", "w", encoding="UTF-8") as w_file:
result_file = csv.writer(w_file)
for idx, row in enumerate(result):
if idx > 10:
break
row = list(row)
row.insert(0, 20200928)
result_file.writerow(row)
| 27.22 | 68 | 0.648788 |
9070d5bf65f2cf491385a39c1e6e52e356fd0952 | 573 | py | Python | py/test.py | BEARUBC/grasp-kernel | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
] | 1 | 2021-05-31T22:05:10.000Z | 2021-05-31T22:05:10.000Z | py/test.py | BEARUBC/grasp-kernel-wrapper | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
] | null | null | null | py/test.py | BEARUBC/grasp-kernel-wrapper | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
] | 1 | 2021-05-31T18:54:55.000Z | 2021-05-31T18:54:55.000Z |
print("Hello World")
# test = TestClass()
| 18.483871 | 39 | 0.612565 |
9070ee6ae571936274c18044e8321cc9866dd425 | 2,836 | py | Python | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 3 | 2019-11-12T17:56:08.000Z | 2022-03-12T03:43:10.000Z | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 47 | 2018-12-10T04:08:58.000Z | 2022-03-20T14:54:36.000Z | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
] | 1 | 2019-11-12T17:55:17.000Z | 2019-11-12T17:55:17.000Z | """Utilities for managing child processes within a scope - this ensures
tests run cleanly even on failure and also gives us a mechanism to
get debug info for our children.
"""
import logging
import os
import sys
from contextlib import contextmanager
from typing import ContextManager, List
import psutil
import process_tracker
process_tracker.install()
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
__all__ = [
"active_children",
"contained_children",
"disable_child_tracking",
"kill_children",
]
def _get_create_time(create_time):
"""Given basic process create time, return one that would
match psutil.
"""
boot_time = psutil.boot_time()
clock_ticks = os.sysconf("SC_CLK_TCK")
return boot_time + (create_time / clock_ticks)
def active_children() -> List[psutil.Process]:
"""Returns the active child processes.
"""
out = []
children = process_tracker.children()
for pid, create_time in children:
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
continue
else:
if process.create_time() == _get_create_time(create_time):
out.append(process)
return out
def kill_children(timeout=1) -> List[psutil.Process]:
"""
Kill any active children, returning any that were not terminated within
timeout.
Args:
timeout: time to wait before killing.
Returns:
list of processes that had to be killed forcefully.
"""
procs = active_children()
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(procs, timeout=timeout)
for p in alive:
logger.warning("Cleaning up child: %d", p.pid)
p.kill()
return alive
| 26.504673 | 80 | 0.665374 |
9071096add8b5a4db338073c96e92750aa128c1f | 2,516 | py | Python | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
] | null | null | null | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
] | null | null | null | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 Carlos Segura. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import gzip
parents = {}
conversations = []
samples = {}
comentario_id = None
parent_id = []
with gzip.open(sys.argv[1]) as f:
for line in f:
try:
line = line.decode('utf-8').strip()
#print(line)
splitted_line = line.split()
if len(splitted_line) == 0:
continue
head = splitted_line[0]
rest = splitted_line[1:]
if head == 'comentario_id:':
comentario_id = rest[0]
parent_id = []
if head == 'parent_id:':
parent_id.append(rest[0])
if head == 'comentario:':
comentario = rest
if len(comentario) == 0:
comentario_id = None
parent_id = []
continue
#Store this comment in parents dictionary
if comentario_id is not None:
sample = Sample()
sample.comentario_id = comentario_id
sample.parent_id = parent_id
sample.comentario = comentario
samples[comentario_id] = sample
comentario_id = None
parent_id = []
except:
continue
for k in samples:
sample = samples[k]
for parent in sample.parent_id:
if parent in samples:
qa = [samples[parent].comentario, sample.comentario]
conversations.append(qa)
for conversation in conversations:
print('********************************************')
for frase in conversation:
print(*frase)
| 27.955556 | 79 | 0.534181 |
90740254e2ea619dbf9f847e862986ac065aaf0a | 4,087 | py | Python | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
] | null | null | null | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
] | null | null | null | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
] | null | null | null | import unittest
import pandas as pd
import git
import os
from dfstools import get_dataset_dtypes
from dfstools import find_related_cols_by_name
from dfstools import find_related_cols_by_content
from dfstools import find_parent_child_relationships
from dfstools import pecan_cookies_load_data
if __name__ == '__main__':
unittest.main()
| 49.841463 | 111 | 0.477857 |
907488d52d48e24b4d69fb2af57f6618dc2c3ce3 | 2,836 | py | Python | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout,QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | 35.45 | 203 | 0.605783 |
9074ea5b2e3ca5610b7441955b3420b7ffce9518 | 1,446 | py | Python | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
] | 95 | 2020-01-20T08:51:20.000Z | 2022-03-31T23:27:28.000Z | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
] | 274 | 2020-07-11T11:10:10.000Z | 2022-03-31T14:03:39.000Z | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
] | 9 | 2020-07-15T07:04:20.000Z | 2022-03-27T17:11:58.000Z | # // ###########################################################################
# // Queries
# // ###########################################################################
# -> get a single cell of a df (use `iloc` with `row` + `col` as arguments)
df.iloc[0]['staticContextId']
# -> get one column as a list
allFunctionNames = staticContexts[['displayName']].to_numpy().flatten().tolist()
# -> get all rows that match a condition
callLinked = staticTraces[~staticTraces['callId'].isin([0])]
# -> exclude columns
df.drop(['A', 'B'], axis=1)
# -> complex queries
staticTraces.query(f'callId == {callId} or resultCallId == {callId}')
# -> join queries (several examples)
# https://stackoverflow.com/a/40869861
df.set_index('key').join(other.set_index('key'))
B.query('client_id not in @A.client_id')
B[~B.client_id.isin(A.client_id)]
# merging dfs
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html
pd.merge(df1, df2, on=['A', 'B'])
df1.merge(df2, left_on='lkey', right_on='rkey')
# // ###########################################################################
# // Display
# // ###########################################################################
# -> display a groupby object (https://stackoverflow.com/questions/22691010/how-to-print-a-groupby-object)
groups = df.groupby('A')
for key, item in groups:
group = groups.get_group(key)
display(group)
# .to_numpy().flatten().tolist() | 34.428571 | 106 | 0.540111 |
907638a652d8418902c98ee951701aa5ff8b7dc1 | 2,279 | py | Python | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/diff/UniversalDiff.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v3.diff import Transaction_pb2 as v3_dot_diff_dot_Transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/diff/UniversalDiff.proto',
package='v3.diff',
syntax='proto3',
serialized_pb=_b('\n\x1bv3/diff/UniversalDiff.proto\x12\x07v3.diff\x1a\x19v3/diff/Transaction.proto\";\n\rUniversalDiff\x12*\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x14.v3.diff.Transactionb\x06proto3')
,
dependencies=[v3_dot_diff_dot_Transaction__pb2.DESCRIPTOR,])
_UNIVERSALDIFF = _descriptor.Descriptor(
name='UniversalDiff',
full_name='v3.diff.UniversalDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactions', full_name='v3.diff.UniversalDiff.transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=126,
)
_UNIVERSALDIFF.fields_by_name['transactions'].message_type = v3_dot_diff_dot_Transaction__pb2._TRANSACTION
DESCRIPTOR.message_types_by_name['UniversalDiff'] = _UNIVERSALDIFF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UniversalDiff = _reflection.GeneratedProtocolMessageType('UniversalDiff', (_message.Message,), dict(
DESCRIPTOR = _UNIVERSALDIFF,
__module__ = 'v3.diff.UniversalDiff_pb2'
# @@protoc_insertion_point(class_scope:v3.diff.UniversalDiff)
))
_sym_db.RegisterMessage(UniversalDiff)
# @@protoc_insertion_point(module_scope)
| 31.219178 | 203 | 0.777095 |
9076fc2a93a37415e1783c15ba456852ac6cdab0 | 4,549 | py | Python | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import numpy as np
import torch
from torch import Tensor
from onevision.data.augment.base import BaseAugment
from onevision.data.augment.utils import apply_transform_op
from onevision.data.data_class import ObjectAnnotation
from onevision.factory import AUGMENTS
__all__ = [
"ImageBoxAugment",
]
# MARK: - Modules
| 32.726619 | 94 | 0.473071 |
907746020f32a1228d26593b0db9dbd5b8907c24 | 2,087 | py | Python | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | 119 | 2017-07-22T15:02:30.000Z | 2021-08-02T10:42:59.000Z | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | null | null | null | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | 28 | 2017-08-04T14:28:41.000Z | 2019-11-27T23:46:14.000Z | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
| 44.404255 | 148 | 0.684236 |
9078e83afbdbc37dbf8bc13a26fcecb893de7fcb | 6,264 | py | Python | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
] | 1 | 2019-09-19T23:20:57.000Z | 2019-09-19T23:20:57.000Z | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
] | 1 | 2019-09-19T23:24:38.000Z | 2019-09-19T23:24:38.000Z | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
] | 1 | 2019-09-19T20:12:10.000Z | 2019-09-19T20:12:10.000Z |
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
#################################################################
#################################################################
# [END]
| 25.463415 | 81 | 0.543423 |
907a8e9bf17e1ccce65533dabf9db7c106ceba56 | 1,088 | py | Python | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
] | 7 | 2019-02-16T02:52:12.000Z | 2021-11-08T13:10:46.000Z | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
] | null | null | null | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
] | 14 | 2018-11-18T04:33:38.000Z | 2021-08-14T03:29:18.000Z | import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10000, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd')
model.fit(x_train, y_train,
epochs=100,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test)
| 31.085714 | 51 | 0.669118 |
907b2f51dc7dc8191cd5bf95004855d172a84d81 | 15,373 | py | Python | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | 1 | 2021-08-11T19:10:08.000Z | 2021-08-11T19:10:08.000Z | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
] | null | null | null | # AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This module is for selecting a subnetwork using CSS so that you can do special
things to them. Checkout the tutorial section for a walkthrough. This is exposed
automatically with::
from k1lib.imports import *
selector.select # exposed
"""
from torch import nn; import k1lib, re, torch
from typing import List, Tuple, Dict, Union, Any, Iterator, Callable
from contextlib import contextmanager; from functools import partial
__all__ = ["ModuleSelector", "preprocess", "select"]
def preprocess(selectors:str, defaultProp="*") -> List[str]:
r"""Removes all quirkly features allowed by the css
language, and outputs nice lines. Example::
# returns ["a:f", "a:g,h", "b:g,h", "t:*"]
selector.preprocess("a:f; a, b: g,h; t")
:param selectors: single css selector string. Statements separated
by "\\n" or ";"
:param defaultProp: default property, if statement doesn't have one"""
# filtering unwanted characters and quirky spaces
lines = [e for l in selectors.split("\n") for e in l.split(";")]
selectors = [re.sub("(^\s+)|(\s+$)", "", re.sub("\s\s+", " ", line)).replace(" >", ">").replace("> ", ">").replace(" :", ":").replace(": ", ":").replace(" ,", ",").replace(", ", ",").replace(";", "\n").replace(" \n", "\n").replace("\n ", "\n") for line in lines if line != ""]
# adding "*" to all selectors with no props specified
selectors = [selector if ":" in selector else f"{selector}:{defaultProp}" for selector in selectors]
# expanding comma-delimited selectors
return [f"{segment}:{selector.split(':')[1]}" for selector in selectors for segment in selector.split(":")[0].split(",")]
_idxAuto = k1lib.AutoIncrement()
def _strTensor(t): return "None" if t is None else f"{t.shape}"
from contextlib import ExitStack | 43.426554 | 280 | 0.656866 |
907cab399c56f59d773c9098dcb9ad23a5c47d44 | 3,482 | py | Python | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 11 | 2021-12-20T15:32:17.000Z | 2022-03-16T03:54:02.000Z | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 156 | 2021-12-31T21:01:31.000Z | 2022-03-20T21:57:31.000Z | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
] | 3 | 2022-01-21T14:52:43.000Z | 2022-02-12T05:32:19.000Z | '''
Created on 22 fvr. 2022
@author: slinux
'''
from .wxRavenGeneralDesign import wxRavenWebBrowser
from wxRavenGUI.application.wxcustom.CustomLoading import *
from wxRavenGUI.application.wxcustom import *
import wx.html2 as webview
import sys
import logging
from wxRavenGUI.application.wxcustom.CustomUserIO import UserAdvancedMessage
| 30.017241 | 269 | 0.601091 |
907d53bdf5f863a5b666758a3f35cfee8a3a43e9 | 4,097 | py | Python | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
] | 2 | 2017-11-16T15:02:43.000Z | 2017-11-20T17:41:16.000Z | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
] | 10 | 2020-01-28T22:12:06.000Z | 2022-03-11T23:16:53.000Z | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
] | 2 | 2017-11-16T14:59:03.000Z | 2017-11-16T23:52:13.000Z | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.objects.platform import PlatformVersion
from ..resources.strings import prompts
from ..resources.statics import namespaces, option_names
from ..core import io
from ..lib import elasticbeanstalk
from . import commonops
| 35.626087 | 78 | 0.661948 |
907e1b4a54a9e37e87ee07e0eb6f6b12a199f562 | 2,719 | py | Python | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
] | null | null | null | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
] | null | null | null | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
] | null | null | null | ''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
import logging
import boto3
from perimeterator.helper import aws_elb_arn
from perimeterator.helper import dns_lookup
| 37.246575 | 77 | 0.543582 |
907f3c024ac75afd4ff1f45c65ec5e6e22c38567 | 1,685 | py | Python | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
] | null | null | null | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
] | null | null | null | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
] | null | null | null | """Module to help guess whether a file is binary or text.
Requirements:
Python 2.7+
Recommended:
Python 3
"""
def is_binary_file(fname):
"""Attempt to guess if 'fname' is a binary file heuristically.
This algorithm has many flaws. Use with caution.
It assumes that if a part of the file has NUL bytes
or has more control characters than text characters,
it is a binary file.
Additionally, an ASCII compatible character set is assumed.
Returns True if 'fname' appears to be a binary file.
"""
with open(fname, 'rb') as fh:
chunk = fh.read(1024)
if not chunk: # Empty file
return False
if b'\x00' in chunk: # Has NUL bytes
return True
ncontrol = control_char_count(chunk)
ntext = len(chunk) - ncontrol
return ncontrol > ntext
def is_control_char(c):
"""Return True if 'c' is a control character.
c is considered a control character if
it is outside of the extended ASCII set or
has a code below 32 with some exclusions.
An ASCII compatible character set is assumed.
"""
charcode = 0
# The following assignment
# should make this module compatible with
# at least Python 2.7 (tested on 2.7.9).
try:
charcode = ord(c)
except TypeError:
charcode = c
excludes = ("\t", "\r", "\n")
if charcode in [ord(char) for char in excludes]:
return False
return (charcode < 32 or
charcode > 255)
def control_char_count(data):
"""Return the count of control characters in 'data'."""
n = 0
for c in data:
if is_control_char(c):
n += 1
return n
| 25.923077 | 66 | 0.626113 |
9080c3b939a2c1af97171c5d7d2b2932cf209fec | 8,329 | py | Python | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null | import numpy as np
import spikemetrics.metrics as metrics
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
from spikemetrics.utils import Epoch, printProgressBar
from collections import OrderedDict
from .parameter_dictionaries import get_recording_gui_params, get_feature_gui_params
def _compute_template_SNR(template, channel_noise_levels, max_channel_idx):
"""
Computes SNR on the channel with largest amplitude
Parameters
----------
template: np.array
Template (n_elec, n_timepoints)
channel_noise_levels: list
Noise levels for the different channels
max_channel_idx: int
Index of channel with largest templaye
Returns
-------
snr: float
Signal-to-noise ratio for the template
"""
snr = (
np.max(np.abs(template[max_channel_idx]))
/ channel_noise_levels[max_channel_idx]
)
return snr
def _compute_channel_noise_levels(recording, mode, noise_duration, seed):
"""
Computes noise level channel-wise
Parameters
----------
recording: RecordingExtractor
The recording ectractor object
mode: str
'std' or 'mad' (default
noise_duration: float
Number of seconds to compute SNR from
Returns
-------
moise_levels: list
Noise levels for each channel
"""
M = recording.get_num_channels()
n_frames = int(noise_duration * recording.get_sampling_frequency())
if n_frames >= recording.get_num_frames():
start_frame = 0
end_frame = recording.get_num_frames()
else:
start_frame = np.random.RandomState(seed=seed).randint(
0, recording.get_num_frames() - n_frames
)
end_frame = start_frame + n_frames
X = recording.get_traces(start_frame=start_frame, end_frame=end_frame)
noise_levels = []
for ch in range(M):
if mode == "std":
noise_level = np.std(X[ch, :])
elif mode == "mad":
noise_level = np.median(np.abs(X[ch, :]) / 0.6745)
else:
raise Exception("'mode' can be 'std' or 'mad'")
noise_levels.append(noise_level)
return noise_levels | 46.792135 | 231 | 0.623724 |
90818fc965fccbf18cf4f96b17fab97a599e1aaa | 824 | py | Python | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | null | null | null | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | null | null | null | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 4 | 2020-12-19T17:12:13.000Z | 2021-01-07T20:29:53.000Z | # This is a sample Python script.
# Press Mays+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import Gramatica as g
import interprete as Inter
import ts as TS
import jsonMode as JSON_INGE
import jsonMode as json
import Instruccion as INST
import Interfaz.Interfaz as Gui
import os
import glob
from os import path
from os import remove
if __name__ == '__main__':
Gui.principal
cadena= "goto"
# for n in cadena:
# in
print("ELIMINANDO...")
files = glob.glob('data/json/*')
for ele in files:
os.remove(ele)
| 18.311111 | 98 | 0.694175 |
90825885fb1011eb6a66d72e387d9a860b8e8b3f | 19,132 | py | Python | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2017-07-18T20:02:34.000Z | 2022-03-10T06:46:22.000Z | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
] | 103 | 2016-05-26T03:40:24.000Z | 2021-12-29T23:03:13.000Z | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
] | 9 | 2016-12-14T12:56:18.000Z | 2021-09-11T22:50:01.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test spparser.py module, which uses spark.py.
.. note::
Only testing to see if the parser makes the right kind of
objects. Quality of the data is tested in other modules.
"""
# STDLIB
import os
# THIRD-PARTY
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.exceptions import AstropyUserWarning
from numpy.testing import assert_allclose
# SYNPHOT
from synphot import exceptions as synexceptions
from synphot import units
from synphot.models import (BlackBodyNorm1D, Box1D, ConstFlux1D, Empirical1D,
GaussianFlux1D, PowerLawFlux1D)
from synphot.reddening import ExtinctionCurve
from synphot.spectrum import SourceSpectrum, SpectralElement
# LOCAL
from .. import catalog, exceptions, observationmode, spectrum, spparser
from ..config import conf
from ..stio import resolve_filename
def _compare_spectra(sp1, sp2):
"""Test that two spectra are basically equivalent."""
if sp1.waveset is None:
assert sp2.waveset is None
w = [100, 5000, 11000] * u.AA
else:
w = sp1.waveset
assert_quantity_allclose(w, sp2.waveset)
assert_quantity_allclose(sp1(w), sp2(w))
assert_quantity_allclose(sp1.integrate(wavelengths=w),
sp2.integrate(wavelengths=w))
assert type(sp1.model.__class__) == type(sp2.model.__class__) # noqa
if hasattr(sp1, 'z'):
assert sp1.z == sp2.z
def test_z_null():
"""ETC junk spectrum results in flat spectrum with no redshift."""
sp1 = spparser.parse_spec('z(null, 0.1)')
_single_functioncall(sp1, SourceSpectrum, ConstFlux1D, 'z(null,0.1)')
sp2 = SourceSpectrum(ConstFlux1D, amplitude=1 * units.PHOTLAM)
_compare_spectra(sp1, sp2)
class TestTokens:
"""Test underlying parser engine."""
def teardown_module():
"""Clear all cache."""
catalog.reset_cache()
observationmode.reset_cache()
spectrum.reset_cache()
| 33.447552 | 79 | 0.559011 |
9082f22e3410593d0f53f454a62bd2d756d1a9be | 554 | py | Python | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import os
from tornado.web import StaticFileHandler
from rsbroker.views import websocket
from rsbroker.views.error import NotFoundErrorHandler
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
handlers = [
# Http api
# Events WebSocket API
(r"/api/ws", websocket.BrokerServerHandler),
# Static
(r"/static/(.*)", StaticFileHandler),
# Error
(r".*", NotFoundErrorHandler)
]
| 20.518519 | 71 | 0.714801 |
9083f275a59b9bf245934e27e32ceb9469c2cb0d | 6,465 | py | Python | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
] | 4 | 2018-11-03T13:58:52.000Z | 2020-03-06T09:19:03.000Z | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
] | 92 | 2018-05-17T18:07:01.000Z | 2022-03-29T00:37:30.000Z | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
] | 4 | 2020-07-01T12:20:55.000Z | 2022-01-24T20:09:15.000Z | # -*- coding: utf-8 -*-
"""
Unit testing for command flags.
This tests the various command flags
and there helper methods.
"""
import argparse
import typing
import uuid
import pytest
from pheweb.load.command_flags import (
FLAG_CHROMOSOME,
add_chromosome_flag,
OUTPUT_COLUMN_CHROMOSOME,
FLAG_POSITION,
add_position_flag,
FLAG_REFERENCE,
add_reference_flag,
FLAG_ALTERNATIVE,
add_alternate_flag,
OUTPUT_COLUMN_REFERENCE,
OUTPUT_COLUMN_ALTERNATIVE,
FLAG_P_VALUE,
add_p_value_flag,
OUTPUT_COLUMN_P_VALUE,
FLAG_M_LOG_P_VALUE,
add_m_log_p_value_flag,
OUTPUT_COLUMN_M_LOG_P_VALUE,
add_beta_value_flag,
FLAG_BETA,
OUTPUT_COLUMN_BETA,
FLAG_SE_BETA,
add_se_beta_value_flag,
OUTPUT_COLUMN_SE_BETA,
OUTPUT_COLUMN_POSITION,
add_in_file_value_flag,
DEFAULT_IN_FILE,
add_out_file_value_flag,
DEFAULT_OUT_FILE,
add_rename_value_flag,
DEFAULT_RENAME,
add_exclude_value_flag,
FLAG_EXCLUDE,
FLAG_RENAME,
DEFAULT_EXCLUDE,
parse_exclude_args,
parse_rename_args,
)
def test_exclude_args() -> None:
"""
Test exclude args.
@return: None
"""
assert parse_exclude_args("") == set()
assert parse_exclude_args("a") == {"a"}
assert parse_exclude_args("a,b") == {"a", "b"}
assert parse_exclude_args("a,b,c") == {"a", "b", "c"}
def test_rename_args() -> None:
"""
Test rename args.
@return: None
"""
assert not parse_rename_args("")
assert parse_rename_args("a:b") == {"a": "b"}
assert parse_rename_args("a:b,c:d") == {"a": "b", "c": "d"}
with pytest.raises(ValueError):
assert parse_rename_args("a")
def parse_harness(
cli_argv: typing.List[str],
parse_method: typing.Callable[[argparse.ArgumentParser], None],
):
"""
Parse harness.
Calls the argument parser with the parse method.
Then calls the argument parse with the cli argv.
@param cli_argv: arguments to pass to parser
@param parse_method: parse set up method
@return: result of the parse
"""
parser = argparse.ArgumentParser(description=f"test : {parse_method}")
parse_method(parser)
return parser.parse_args(cli_argv)
def test_add_chromosome() -> None:
"""
Test arguments for chromosome column.
@return: None
"""
chromosome = str(uuid.uuid4())
arguments = parse_harness([FLAG_CHROMOSOME, chromosome], add_chromosome_flag)
assert arguments.chromosome == chromosome
assert parse_harness([], add_chromosome_flag).chromosome is OUTPUT_COLUMN_CHROMOSOME
def test_add_position():
"""
Test arguments for position column.
@return: None
"""
position = str(uuid.uuid4())
arguments = parse_harness([FLAG_POSITION, position], add_position_flag)
assert arguments.position == position
assert parse_harness([], add_position_flag).position is OUTPUT_COLUMN_POSITION
def test_add_ref() -> None:
"""
Test arguments for alternative column.
@return: None
"""
reference = str(uuid.uuid4())
arguments = parse_harness([FLAG_REFERENCE, reference], add_reference_flag)
assert arguments.reference == reference
assert parse_harness([], add_reference_flag).reference is OUTPUT_COLUMN_REFERENCE
def test_add_alt() -> None:
"""
Test arguments for alternative column.
@return: None
"""
alternative = str(uuid.uuid4())
arguments = parse_harness([FLAG_ALTERNATIVE, alternative], add_alternate_flag)
assert arguments.alternative == alternative
assert (
parse_harness([], add_alternate_flag).alternative is OUTPUT_COLUMN_ALTERNATIVE
)
def test_add_p_value() -> None:
"""
Test arguments for p-value column.
@return: None
"""
p_value = str(uuid.uuid4())
arguments = parse_harness([FLAG_P_VALUE, p_value], add_p_value_flag)
assert arguments.p_value == p_value
assert parse_harness([], add_p_value_flag).p_value == OUTPUT_COLUMN_P_VALUE
def test_add_m_log_p_value() -> None:
"""
Test arguments for m log p value column.
@return: None
"""
m_log_p_value = str(uuid.uuid4())
arguments = parse_harness(
[FLAG_M_LOG_P_VALUE, m_log_p_value], add_m_log_p_value_flag
)
assert arguments.m_log_p_value == m_log_p_value
arguments = parse_harness([], add_m_log_p_value_flag)
assert arguments.m_log_p_value == OUTPUT_COLUMN_M_LOG_P_VALUE
def test_add_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_BETA, beta], add_beta_value_flag)
assert arguments.beta == beta
assert parse_harness([], add_beta_value_flag).beta == OUTPUT_COLUMN_BETA
def test_add_se_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
se_beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_SE_BETA, se_beta], add_se_beta_value_flag)
assert arguments.se_beta == se_beta
assert parse_harness([], add_se_beta_value_flag).se_beta == OUTPUT_COLUMN_SE_BETA
def test_add_exclude() -> None:
"""
Test argument for columns to exclude.
@return: None
"""
exclude = str(uuid.uuid4())
arguments = parse_harness([FLAG_EXCLUDE, exclude], add_exclude_value_flag)
assert arguments.exclude == exclude
assert parse_harness([], add_exclude_value_flag).exclude == DEFAULT_EXCLUDE
def test_add_rename() -> None:
"""
Test arguments for rename.
@return: None
"""
new_name = str(uuid.uuid4())
old_name = str(uuid.uuid4())
rename = f"{old_name}:{new_name}"
arguments = parse_harness([FLAG_RENAME, rename], add_rename_value_flag)
assert arguments.rename == rename
assert parse_harness([], add_rename_value_flag).rename == DEFAULT_RENAME
def test_parse_out_file() -> None:
"""
Test arguments for out file.
@return: None
"""
out_file = str(uuid.uuid4())
arguments = parse_harness(["--out-file", out_file], add_out_file_value_flag)
assert arguments.out_file == out_file
assert parse_harness([], add_out_file_value_flag).out_file == DEFAULT_OUT_FILE
def test_add_in_file() -> None:
"""
Test arguments for input file.
@return: None
"""
in_file = str(uuid.uuid4())
assert parse_harness([in_file], add_in_file_value_flag).in_file == in_file
assert parse_harness([], add_in_file_value_flag).in_file == DEFAULT_IN_FILE
| 26.174089 | 88 | 0.692653 |
9085232046fc5765251336d07c6534499f1401bb | 4,388 | py | Python | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
] | 558 | 2015-05-22T15:03:21.000Z | 2022-03-23T04:49:17.000Z | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
] | 1,057 | 2015-05-14T20:27:04.000Z | 2022-03-08T09:29:36.000Z | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
] | 193 | 2015-05-18T10:13:34.000Z | 2021-12-10T11:58:01.000Z | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
"""
Error correct reads based on a counting hash from a diginorm step.
Output sequences will be put in inputfile.corr.
% python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import os
import screed
import khmer
from khmer import Countgraph
from khmer import khmer_args
from khmer.khmer_args import FileType as khFileType
DEFAULT_CUTOFF = 2
if __name__ == '__main__':
main()
| 35.104 | 78 | 0.66773 |
908535dac0f891e497250dce7197eb9409ed8be9 | 7,745 | py | Python | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 1 | 2021-11-16T03:45:33.000Z | 2021-11-16T03:45:33.000Z | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 4 | 2022-03-02T03:01:24.000Z | 2022-03-23T00:57:33.000Z | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 5 | 2021-07-26T08:37:42.000Z | 2021-11-16T05:41:02.000Z | import json
import pathlib
from unittest.mock import patch
from freezegun import freeze_time
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.identity.azure_ad import AzureADConfig
from tests.test_helpers import mce_helpers
FROZEN_TIME = "2021-08-24 09:00:00"
def load_test_resources(test_resources_dir):
azure_ad_users_json_file = test_resources_dir / "azure_ad_users.json"
azure_ad_groups_json_file = test_resources_dir / "azure_ad_groups.json"
with azure_ad_users_json_file.open() as azure_ad_users_json:
reference_users = json.loads(azure_ad_users_json.read())
with azure_ad_groups_json_file.open() as azure_ad_groups_json:
reference_groups = json.loads(azure_ad_groups_json.read())
return reference_users, reference_groups
def mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_groups_users
):
# mock token response
mock_token.return_value = "xxxxxxxx"
# mock users and groups response
users, groups = load_test_resources(test_resources_dir)
mock_users.return_value = iter(list([users]))
mock_groups.return_value = iter(list([groups]))
# For simplicity, each user is placed in ALL groups.
# Create a separate response mock for each group in our sample data.
r = []
for _ in groups:
r.append(users)
mock_groups_users.return_value = iter(r)
| 39.314721 | 123 | 0.629438 |
9085eea801b451acd44298bd5d756b5655efe26d | 138 | py | Python | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 28 | 2021-03-23T09:00:33.000Z | 2022-03-10T03:55:00.000Z | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 2 | 2021-04-17T20:08:55.000Z | 2022-02-01T17:48:55.000Z | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 5 | 2021-05-19T07:35:56.000Z | 2022-01-13T02:11:50.000Z | from .builder import build_optimizers, MGE_OPTIMIZERS, build_gradmanagers
from .default_constructor import DefaultOptimizerConstructor
| 23 | 73 | 0.876812 |
908733eb70f6006bbe7cab4fd64970e3aec01842 | 8,352 | py | Python | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
] | null | null | null | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
] | null | null | null | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
from config.parser.parsing import Parser
| 53.538462 | 120 | 0.578544 |
9088061118cf617385915ed728847f4d1b206103 | 862 | py | Python | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
] | 5 | 2016-10-02T13:49:29.000Z | 2020-02-12T00:09:14.000Z | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
] | null | null | null | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
] | null | null | null | ## Requires Python v3 and pandas (pip install pandas)
## This script takes the newcastle membership csv and attempts
## to reduce the file size as much as possible through aggregation and lookups
## Two lookup files to provide library names and dates are also created.
import csv
import os
import re
from datetime import datetime
import pandas
MEMBERDATA = '..\\data\\dashboard_newcastle_members.csv'
run() | 30.785714 | 100 | 0.732019 |
9088b5572da41984c1697dbaf7d670a85f1c124c | 10,535 | py | Python | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
] | 3 | 2016-12-28T09:31:27.000Z | 2017-01-09T18:38:46.000Z | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
] | 1 | 2019-05-04T18:14:24.000Z | 2019-05-04T18:14:24.000Z | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
] | null | null | null | """Interface contract object"""
from __future__ import absolute_import
import six
import sys
import logging
from contracts.interface import ContractException, ContractNotRespected
from .extension import ID
from ..declarations import implementer
from ..verify import verifyObject
from ..interface import InterfaceClass
__all__ = (
'InterfaceContract', 'MethodContract',
'AttributeContract', 'ContractNotRespected')
def method_wrapper(element):
return func
def construct_class(iface, elements):
attrs = {'__module__': iface.__module__}
slots = {'__context__', '__logger__'}
for name, element in elements.items():
slots.add(name)
if isinstance(element, AttributeContract):
attrs[name] = AttributeDescriptor(element)
else:
attrs[name] = method_wrapper(element)
name = '%sBoundContract' % iface.__name__
cls = type(name, (BoundInterfaceContract,), attrs)
cls.__slots__ = tuple(slots)
return implementer(iface)(cls)
| 31.541916 | 79 | 0.57608 |
908923bb1a1d3dddbedc40a59f1c9790842c688e | 3,979 | py | Python | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
] | null | null | null | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
] | null | null | null | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torchvision.transforms as transforms
from dataloader.dataloader_hourglass import heatmap_Dataloader
import os
from network import KFSGNet
import torchvision.transforms as transforms
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 200
learning_rate = 0.001
transform = transforms.Compose([
transforms.ToTensor()])
params = dict()
params['data_normalize_factor'] = 256
params['dataset_dir'] = "./"
params['rgb2gray'] = False
params['dataset'] = "heatmap_dataset_all"
params['train_batch_sz'] = 16
params['val_batch_sz'] = 1
params['sigma'] = 3
dataloaders, dataset_sizes = heatmap_Dataloader(params)
train_loader = dataloaders['train']
test_loader = dataloaders['val']
# Define your model
model = KFSGNet()
# model.load_state_dict(torch.load(
# '/media/home_bak/ziqi/park/hourglass/10heatmap5.ckpt'))
# move model to the right device
model.to(device)
model.train()
# Loss and optimizer
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#
#
# milestones[0. 200]
# [200, 300][300, 320].....[340, 400]
# gammagamma
# torch.optim.lr_scheduler.MultiStepLR(optimizer,
# milestones=[30, 60, 80, 100, 120, 140], gamma=0.5)
print(optimizer.state_dict()['param_groups'][0]['lr'])
# For updating learning rate
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
print("start")
def calculate_mask(heatmaps_target):
"""
:param heatmaps_target: Variable (N,15,96,96)
:return: Variable (N,15,96,96)
"""
N, C, _, _ = heatmaps_targets.size()
N_idx = []
C_idx = []
for n in range(N):
for c in range(C):
max_v = heatmaps_targets[n, c, :, :].max().data
if max_v != 0.0:
N_idx.append(n)
C_idx.append(c)
mask = torch.zeros(heatmaps_targets.size())
mask[N_idx, C_idx, :, :] = 1.
mask = mask.float().cuda()
return mask, [N_idx, C_idx]
# def MSE(y_pred, gt):
# loss = 0
# loss += 0.5 * np.sum((y_pred - gt)**2)
# vec_gt = [[0]*3] * 5
# for w in range(4):
# vec_gt[w] = np.array([gt[w][0],
# gt[w][1]])
# vector_gt = vec_gt[1]-vec_gt[0]
# vec_pred = [[0]*3] * 5
# for v in range(4):
# vec_pred[w] = np.array([y_pred[w][0],
# y_pred[w][1]])
# vector_pred = vec_pred[1]-vec_pred[0]
# loss += (vector_gt[0]*vector_pred[1]-vector_pred[0]*vector_gt[1])**0.5
for epoch in range(num_epochs):
tmp = 0
for i, (data, gt, mask, item, imgPath, heatmaps_targets) in enumerate(train_loader):
# print(i)
data = data.to(device)
gt = gt.to(device)
mask = mask.to(device)
gt = gt.view(-1, 8)
heatmaps_targets = heatmaps_targets.to(device)
mask, indices_valid = calculate_mask(heatmaps_targets)
# print(heatmaps_targets.shape)
# Forward pass
outputs = model(data)
outputs = outputs * mask
heatmaps_targets = heatmaps_targets * mask
# print(outputs.shape)
loss = loss_fn(outputs, heatmaps_targets)
tmp += loss.item()
# exit()
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}, average_loss: {:.4f}, learning_rate: {}".format(
epoch + 1, num_epochs, i + 1, total_step, loss.item(), tmp / (i+1), optimizer.state_dict()['param_groups'][0]['lr']))
if (epoch + 1) % 10 == 0:
torch.save(model.state_dict(), '{}heatmap4.ckpt'.format(epoch + 1))
# card2 heatmap 26688
# card0 heatmap2 29009
| 27.631944 | 133 | 0.619754 |
9089cafc79c7a1e8e0abc38c3cabc190f618f305 | 1,648 | py | Python | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from getpass import getpass
from hashlib import pbkdf2_hmac
from signal import signal, SIGINT
signal = signal(SIGINT, die)
iwd = """[Security]
PreSharedKey={psk}"""
supplicant = """network={{
ssid={ssid}
#psk={passphrase}
psk={psk}
}}"""
parser = ArgumentParser(
description="%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."
)
parser.add_argument("ssid", help="The SSID whose passphrase should be derived.")
parser.add_argument(
"passphrase",
help="The passphrase to use. If not included on the command line, passphrase will be read from standard input.",
nargs="?",
)
parser.add_argument(
"--iwd",
"-i",
dest="template",
action="store_const",
const=iwd,
default=supplicant,
help="Generate for iwd (default: generate for wpa_supplicant).",
)
args = parser.parse_args()
if not args.passphrase:
print("# reading passphrase from stdin", file=sys.stderr)
args.passphrase = getpass(prompt="")
if not 8 <= len(args.passphrase) <= 63:
print("Passphrase must be 8..63 characters", file=sys.stderr)
sys.exit(1)
passphrase = args.passphrase.encode()
if any(b < 32 or b == 127 for b in passphrase):
print("Invalid passphrase character", file=sys.stderr)
sys.exit(1)
ssid = args.ssid.encode()
psk = pbkdf2_hmac("sha1", passphrase, ssid, iterations=4096, dklen=32)
print(args.template.format(ssid=args.ssid, passphrase=args.passphrase, psk=psk.hex()))
| 28.912281 | 182 | 0.703277 |
908ab1d5d4950850ce0d224a0c7fe40fe59aa364 | 2,406 | py | Python | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | 2 | 2018-05-17T02:49:49.000Z | 2019-08-20T02:07:44.000Z | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | 2 | 2019-02-13T07:58:23.000Z | 2019-02-13T07:58:27.000Z | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from cms.api import copy_plugins_to_language
from cms.models import Title, Page
from cms.utils.i18n import get_language_list
| 37.59375 | 119 | 0.588944 |
908b0f1eabec4449e380288689a4979deb9e601d | 424 | py | Python | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | 1 | 2020-10-25T08:14:33.000Z | 2020-10-25T08:14:33.000Z | # Generated by Django 2.1.2 on 2018-10-14 18:37
from django.db import migrations
import picklefield.fields
| 21.2 | 72 | 0.629717 |
908bf756c481540c4c44d86144640fa2370be038 | 1,563 | py | Python | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
] | null | null | null | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
] | null | null | null | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from adsrefpipe.refparsers.CrossRefXML import CrossReftoREFs
from adsrefpipe.refparsers.ElsevierXML import ELSEVIERtoREFs
from adsrefpipe.refparsers.JATSxml import JATStoREFs
from adsrefpipe.refparsers.IOPxml import IOPtoREFs
from adsrefpipe.refparsers.SpringerXML import SPRINGERtoREFs
from adsrefpipe.refparsers.APSxml import APStoREFs
from adsrefpipe.refparsers.NatureXML import NATUREtoREFs
from adsrefpipe.refparsers.AIPxml import AIPtoREFs
from adsrefpipe.refparsers.WileyXML import WILEYtoREFs
from adsrefpipe.refparsers.NLM3xml import NLMtoREFs
from adsrefpipe.refparsers.AGUxml import AGUtoREFs
from adsrefpipe.refparsers.arXivTXT import ARXIVtoREFs
def verify(parser_name):
"""
:param parser_name: parser name from db
:return:
"""
# based on parser name return the parser class, if it is an xml
if parser_name == 'CrossRef':
return CrossReftoREFs
if parser_name == 'ELSEVIER':
return ELSEVIERtoREFs
if parser_name == 'JATS':
return JATStoREFs
if parser_name == 'IOP':
return IOPtoREFs
if parser_name == 'SPRINGER':
return SPRINGERtoREFs
if parser_name == 'APS':
return APStoREFs
if parser_name == 'NATURE':
return NATUREtoREFs
if parser_name == 'AIP':
return AIPtoREFs
if parser_name == 'WILEY':
return WILEYtoREFs
if parser_name == 'NLM':
return NLMtoREFs
if parser_name == 'AGU':
return AGUtoREFs
if parser_name == 'arXiv':
return ARXIVtoREFs
return None
| 32.5625 | 67 | 0.723608 |
908cafca02ccd9dbc79045504cbba8cbd1494065 | 12,221 | py | Python | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from cached_property import cached_property
from purl import URL
from onegov.translator_directory import _
from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler
from onegov.core.utils import linkify
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.translator_directory.collections.documents import \
TranslatorDocumentCollection
from onegov.translator_directory.collections.language import LanguageCollection
from onegov.translator_directory.collections.translator import \
TranslatorCollection
from onegov.translator_directory.constants import member_can_see, \
editor_can_see, GENDERS, ADMISSIONS, PROFESSIONAL_GUILDS, \
INTERPRETING_TYPES
def format_boolean(self, val):
assert isinstance(val, bool)
return self.request.translate((_('Yes') if val else _('No')))
def format_admission(self, val):
return self.request.translate(ADMISSIONS[val])
def show(self, attribute_name):
"""Some attributes on the translator are hidden for less privileged
users"""
if self.request.is_member:
return attribute_name in member_can_see
if self.request.is_editor:
return attribute_name in editor_can_see
return True
def color_class(self, count):
""" Depending how rare a language is offered by translators,
apply a color code using the returned css class
"""
if count <= 5:
return 'text-orange'
class TranslatorLayout(DefaultLayout):
class LanguageCollectionLayout(DefaultLayout):
| 31.17602 | 79 | 0.469274 |
908cc9c6b5ff8ca35a1dc06753afe50c50104b9d | 1,169 | py | Python | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
] | null | null | null | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
] | null | null | null | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import io | 33.4 | 100 | 0.544055 |
908ec7d6f30da870417cfcc9194599857d219fff | 5,861 | py | Python | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1,511 | 2015-07-01T15:29:03.000Z | 2022-03-30T13:40:05.000Z | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 540 | 2015-07-01T15:08:19.000Z | 2022-03-31T12:13:11.000Z |
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import re
import attr
from packageurl import PackageURL
import toml
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
"""
Handle Rust cargo crates
"""
TRACE = False
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def party_mapper(party, party_role):
"""
Yields a Party object with party of `party_role`.
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
"""
for person in party:
name, email = parse_person(person)
yield models.Party(
type=models.party_person,
name=name,
role=party_role,
email=email)
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "Isaac Z. Schlueter <i@izs.me>"
For example:
>>> p = parse_person('Barney Rubble <b@rubble.com>')
>>> assert p == ('Barney Rubble', 'b@rubble.com')
>>> p = parse_person('Barney Rubble')
>>> assert p == ('Barney Rubble', None)
>>> p = parse_person('<b@rubble.com>')
>>> assert p == (None, 'b@rubble.com')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
person_parser = re.compile(
r'^(?P<name>[^\(<]+)'
r'\s?'
r'(?P<email><([^>]+)>)?'
).match
person_parser_no_name = re.compile(
r'(?P<email><([^>]+)>)?'
).match
| 28.590244 | 99 | 0.61696 |
90918aea55bbacc028653f4732ff48d1cf1a76ea | 10,268 | py | Python | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | #!/usr/bin/env python2.7
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
from datetime import datetime
import difflib
import functools
import os
import re
import subprocess
import sys
import traceback
from results import UnitResult
from helpers import *
_test_base = os.path.join(os.path.dirname(__file__), "..")
| 34.689189 | 79 | 0.602357 |
9091ee961b1819c72143e6265ce0d0dcec7d5ad2 | 19,042 | py | Python | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
] | 934 | 2020-08-13T15:32:30.000Z | 2022-03-31T20:41:21.000Z | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
] | 88 | 2020-08-13T18:59:11.000Z | 2022-03-31T23:48:18.000Z | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
] | 161 | 2020-08-13T17:28:03.000Z | 2022-03-19T14:56:34.000Z | from app import (
mythic,
links,
nginx_port,
listen_port,
mythic_admin_password,
mythic_admin_user,
default_operation_name,
mythic_db
)
import app
import asyncpg
import redis
from peewee_async import Manager
from sanic.response import json
from sanic import response
from sanic.exceptions import (
NotFound,
Unauthorized,
MethodNotSupported,
SanicException,
RequestTimeout,
)
import sys
from jinja2 import Environment, PackageLoader
from app.database_models.model import (
Operator,
Operation,
OperatorOperation,
ATTACK,
Artifact,
)
import datetime
import app.crypto as crypto
from sanic_jwt import BaseEndpoint, utils, exceptions
from sanic_jwt.decorators import scoped, inject_user
import ujson as js
from ipaddress import ip_address
from app.routes.authentication import invalidate_refresh_token
import app.database_models.model as db_model
from sanic.log import logger
from uuid import uuid4
import asyncio
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True)
class Login(BaseEndpoint):
# /static serves out static images and files
mythic.static("/static", "./app/static", name="shared_files")
mythic.static("/favicon.ico", "./app/static/favicon.ico", name="favicon")
mythic.static("/strict_time.png", "./app/static/strict_time.png", name="strict_time")
mythic.static(
"/grouped_output.png", "./app/static/grouped_output.png", name="grouped_output"
)
mythic.static(
"/no_cmd_output.png", "./app/static/no_cmd_output.png", name="no_cmd_output"
)
mythic.static("/add_comment.png", "./app/static/add_comment.png", name="add_comment")
# add links to the routes in this file at the bottom
links["index"] = mythic.url_for("index")
links["login"] = links["WEB_BASE"] + "/login"
links["logout"] = mythic.url_for("logout")
links["settings"] = mythic.url_for("settings")
| 41.21645 | 173 | 0.589014 |
9092b9fc5566c9c58a04dd93c04224cbbceb0b64 | 1,911 | py | Python | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 222 | 2017-08-19T00:51:59.000Z | 2022-02-05T19:39:33.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 103 | 2017-08-20T17:13:05.000Z | 2022-02-05T20:20:01.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 54 | 2017-08-20T17:13:00.000Z | 2022-01-14T23:51:13.000Z | from ctypes import c_int
from .dll import _bind
__all__ = [
# Enums
"SDL_BlendMode",
"SDL_BLENDMODE_NONE", "SDL_BLENDMODE_BLEND", "SDL_BLENDMODE_ADD",
"SDL_BLENDMODE_MOD", "SDL_BLENDMODE_MUL", "SDL_BLENDMODE_INVALID",
"SDL_BlendOperation",
"SDL_BLENDOPERATION_ADD", "SDL_BLENDOPERATION_SUBTRACT",
"SDL_BLENDOPERATION_REV_SUBTRACT", "SDL_BLENDOPERATION_MINIMUM",
"SDL_BLENDOPERATION_MAXIMUM",
"SDL_BlendFactor",
"SDL_BLENDFACTOR_ZERO", "SDL_BLENDFACTOR_ONE",
"SDL_BLENDFACTOR_SRC_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR",
"SDL_BLENDFACTOR_SRC_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA",
"SDL_BLENDFACTOR_DST_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR",
"SDL_BLENDFACTOR_DST_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA",
# Functions
"SDL_ComposeCustomBlendMode"
]
SDL_BlendMode = c_int
SDL_BLENDMODE_NONE = 0x00000000
SDL_BLENDMODE_BLEND = 0x00000001
SDL_BLENDMODE_ADD = 0x00000002
SDL_BLENDMODE_MOD = 0x00000004
SDL_BLENDMODE_MUL = 0x00000008
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
SDL_BlendOperation = c_int
SDL_BLENDOPERATION_ADD = 0x1
SDL_BLENDOPERATION_SUBTRACT = 0x2
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3
SDL_BLENDOPERATION_MINIMUM = 0x4
SDL_BLENDOPERATION_MAXIMUM = 0x5
SDL_BlendFactor = c_int
SDL_BLENDFACTOR_ZERO = 0x1
SDL_BLENDFACTOR_ONE = 0x2
SDL_BLENDFACTOR_SRC_COLOR = 0x3
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4
SDL_BLENDFACTOR_SRC_ALPHA = 0x5
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6
SDL_BLENDFACTOR_DST_COLOR = 0x7
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8
SDL_BLENDFACTOR_DST_ALPHA = 0x9
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA
SDL_ComposeCustomBlendMode = _bind("SDL_ComposeCustomBlendMode", [SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation, SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation], SDL_BlendMode, added='2.0.6')
| 31.327869 | 204 | 0.791209 |
9093d4d8bd3bc3c9e386b961c6079deedbc45036 | 204 | py | Python | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
] | null | null | null | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
] | null | null | null | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
] | null | null | null | from .vizutils import viz_overlaymask, display_side2side, display_side2sidev1, stack_patches, figure2image, get_heatmap, visualize_probmaps
from .vizutils import get_heatmap_multiple, figure2image_save | 68 | 140 | 0.872549 |
90948ab3b394c7cb6e8df8160515b81630f1c311 | 4,510 | py | Python | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
] | null | null | null | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
] | null | null | null | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
site configuration for larch:
init_files: list of larch files run (in order) on startup
module_path: list of directories to search for larch code
history_file:
"""
from __future__ import print_function
import sys
import os
from os.path import exists, abspath, join
from .utils import get_homedir, nativepath
from .version import __version__ as larch_version
##
# set system-wide and local larch folders
# larchdir = sys.exec_prefix + 'share' + 'larch'
# usr_larchdir = get_homedir() + '.larch' (#unix)
# = get_homedir() + 'larch' (#win)
##
larchdir = pjoin(sys.exec_prefix, 'share', 'larch')
home_dir = get_homedir()
usr_larchdir = pjoin(home_dir, '.larch')
if os.name == 'nt':
usr_larchdir = pjoin(home_dir, 'larch')
if 'LARCHDIR' in os.environ:
usr_larchdir = nativepath(os.environ['LARCHDIR'])
##
## names (and loading order) for core plugin modules
core_plugins = ('std', 'math', 'io', 'wx', 'xray', 'xrf', 'xafs')
# frozen executables, as from cx_freeze, will have
# these paths to be altered...
if hasattr(sys, 'frozen'):
if os.name == 'nt':
try:
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = os.path.abspath(toplevel)
except:
pass
elif sys.platform.lower().startswith('darwin'):
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = pjoin(toplevel, 'Resources', 'larch')
modules_path = []
plugins_path = []
_path = [usr_larchdir, larchdir]
if 'LARCHPATH' in os.environ:
_path.extend([nativepath(s) for s in os.environ['LARCHPATH'].split(':')])
for pth in _path:
mdir = pjoin(pth, 'modules')
if exists(mdir) and mdir not in modules_path:
modules_path.append(mdir)
pdir = pjoin(pth, 'plugins')
if exists(pdir) and pdir not in plugins_path:
plugins_path.append(pdir)
# initialization larch files to be run on startup
init_files = [pjoin(usr_larchdir, 'init.lar')]
if 'LARCHSTARTUP' in os.environ:
startup = os.environ['LARCHSTARTUP']
if exists(startup):
init_files = [nativepath(startup)]
# history file:
history_file = pjoin(usr_larchdir, 'history.lar')
def make_user_larchdirs():
"""create user's larch directories"""
files = {'init.lar': 'put custom startup larch commands:',
'history.lar': 'history of larch commands:',
'history_larchgui.lar': 'history of larch_gui commands:',
}
subdirs = {'matplotlib': 'matplotlib may put files here',
'dlls': 'put dlls here',
'modules': 'put custom larch or python modules here',
'plugins': 'put custom larch plugins here'}
make_dir(usr_larchdir)
for fname, text in files.items():
write_file(pjoin(usr_larchdir, fname), text)
for sdir, text in subdirs.items():
sdir = pjoin(usr_larchdir, sdir)
make_dir(sdir)
write_file(pjoin(sdir, 'README'), text)
def system_settings():
"""set system-specific Environmental Variables, and make sure
that the user larchdirs exist.
This is run by the interpreter on startup."""
# ubuntu / unity hack
if sys.platform.lower().startswith('linux'):
if 'ubuntu' in os.uname()[3].lower():
os.environ['UBUNTU_MENUPROXY'] = '0'
make_user_larchdirs()
if __name__ == '__main__':
show_site_config()
| 29.86755 | 77 | 0.614634 |
909490610fb0cdfc5860262dba5b4c657bee2b6b | 2,898 | py | Python | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
] | 5 | 2020-03-22T20:21:47.000Z | 2022-03-08T07:50:25.000Z | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
] | 2 | 2020-04-09T10:49:26.000Z | 2022-03-08T04:37:27.000Z | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############
# GPathFinder: Identification of ligand pathways by a multi-objective
# genetic algorithm
#
# https://github.com/insilichem/gpathfinder
#
# Copyright 2019 Jos-Emilio Snchez Aparicio, Giuseppe Sciortino,
# Daniel Villadrich Herrmannsdoerfer, Pablo Orenes Chueca,
# Jaime Rodrguez-Guerra Pedregal and Jean-Didier Marchal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############
"""
This module contains the similarity functions that are used to discard
individuals that are not different enough.
This criterion of similarity will be applied in the case of two
``pathways`` individuals with the same score. Then, if they are similar
enough according to this module, one of them will be discarded.
"""
from __future__ import print_function, division
import logging
import numpy as np
logger = logging.getLogger(__name__)
def pathways_rmsd(ind1, ind2, subject, threshold, *args, **kwargs):
"""
Calculates the RMSD between the positions of the ``pathways`` genes
belonging two the two individuals object of study. If the squared
RMSD is less or equal than the squared threshold, we consider that
the two pathways are identical and one of them will be discarded.
Parameters
----------
ind1 : gpath.base.Individual
ind2 : gpath.base.Individual
subject: str
Name of Gpath ``pathway`` gene instance to measure.
threshold : float
Maximum RMSD value in Angstroms to consider two individuals as
similar.
If ``rmsd > threshold``, they are considered different.
Returns
-------
bool
True if ``rmsd`` is within threshold, False otherwise.
It will always return False if number of points of the pathway
is not equal in the two Individuals.
"""
coords1 = np.array([elem[:] for elem in \
ind1.genes[subject].allele['positions']])
coords2 = np.array([elem[:] for elem in \
ind2.genes[subject].allele['positions']])
if coords1.shape[0] != coords2.shape[0]:
return False
rmsd_squared = _rmsd_squared(coords1, coords2)
if rmsd_squared > threshold*threshold:
return False
return True | 36.683544 | 77 | 0.689786 |
9096de4357058d79ebeafc310708bd4b4560fdc0 | 1,666 | py | Python | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
] | null | null | null | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
] | null | null | null | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from .views import (GroupListAPIView,
GroupCreateAPIView,
AgendaListAPIView,
AgendaDetailAPIView,
AgendaCreateAPIView,
AgendaPostAPIView,
agenda_create,
AgendaRefreshAPIView,
NumberInGroupAPIView,
GroupProfileDetailAPIView,
GroupProfileUpdateAPIView,
number_in_group)
urlpatterns = [
url(r'^group/$', GroupListAPIView.as_view(), name="group_list"),
url(r'^group/create/$', GroupCreateAPIView.as_view(), name="group_create"),
url(r'agenda-list/$', AgendaListAPIView.as_view(), name="agenda_list"),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/detail/$', AgendaDetailAPIView.as_view(), name='agenda_detail'),
# url(r'^create/$', AgendaCreateAPIView.as_view(), name='agenda_create'),
url(r'^(?P<group_id>\d+)/post2/$', AgendaPostAPIView.as_view(), name='agenda_create2'), # recommended api
url(r'^(?P<group_id>\d+)/post/$', agenda_create, name='agenda_create'),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/refresh/$', AgendaRefreshAPIView.as_view(), name='agenda_refresh'),
url(r'^(?P<id>\d+)/number/$', NumberInGroupAPIView.as_view(), name="number"),
url(r'^(?P<group_id>\d+)/(?P<date>\d{4}-\d{2}-\d{2})/number/$', number_in_group, name="number2"),
url(r'^(?P<group_id>\d+)/group-profile/$', GroupProfileDetailAPIView.as_view(), name="group_profile"),
url(r'^(?P<group_id>\d+)/group-profile/update/$', GroupProfileUpdateAPIView.as_view(), name="group_profile_update"),
]
| 57.448276 | 120 | 0.614046 |
90988846045a582c1eb61f51d1fdf6a5c9b664f2 | 312 | py | Python | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
] | null | null | null | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
] | null | null | null | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
#models Shop
from .models import Shop
from .models import Parsed_data
from .models import Img_data
from .models import Other
admin.site.register(Shop)
admin.site.register(Parsed_data)
admin.site.register(Img_data)
admin.site.register(Other)
| 22.285714 | 32 | 0.814103 |
909914b3df8b80013e491c569d64a1ce700cd6e4 | 630 | py | Python | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 8 | 2022-01-19T20:47:36.000Z | 2022-03-20T05:11:04.000Z | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 2 | 2022-02-17T06:14:25.000Z | 2022-02-17T08:43:57.000Z | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 5 | 2022-02-15T04:16:27.000Z | 2022-03-29T01:21:41.000Z | import torch
from models.gta import GraphTemporalEmbedding
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x = torch.randn(32, 96, 122)
model = GraphTemporalEmbedding(122, 96, 3)
y = model(x)
print(y.size())
# model = AdaGraphSage(num_nodes=10, seq_len=96, label_len=48, out_len=24)
# model = model.double().to(device)
# x = torch.randn(32, 96, 10, requires_grad=True).double().to(device)
# y = torch.randn(32, 48, 10, requires_grad=True).double().to(device)
# # print(out.size())
# out = model(x, y, None, None)
# print(out.size()) | 39.375 | 78 | 0.647619 |
909acbc6fed7077e7d615e7ea5b4fd6ba9538288 | 954 | py | Python | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | '''
Auteur : Jol Dendaletche
But : trac une figure gomtrique l'aide de la bibliothque Turtle
Le projet utilise l'objet file pour itrer le calcul de chaque nouveau point
Les coordonnes des points d'un polygone sont placs dans une file
l'algorithme consiste calculer les coordonnes d'un point pour tracer une droite qui part du premier points
de la file et passe par le deuxime en prolongeant le segment d'une fraction dtermine de la longueur entre les
deux points. Le deuxime point est remplac par le nouveau. A la prochaine itration, le segment va partir du
nouveau point pour passer par le suivant dans la file, qui sera remplac par le nouveau point et ainsi de
suite.
'''
import turtle
board = turtle.Turtle()
listePoints = [(0,0),(10,0),(5, int(10*75**.5)]
print(listePoints)
for x, y in listePoints :
board.goto(x, y)
turtle.done() | 45.428571 | 121 | 0.697065 |
909acc24e11a5c6671af7463f6c79ae6bbfe3286 | 20,420 | py | Python | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
| 41.588595 | 145 | 0.645495 |
909ad865d21f2537f3949dbc416292efd7136d09 | 45 | py | Python | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
] | null | null | null | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
] | null | null | null | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
] | null | null | null | from .model import DummyModel, ImageNetModel
| 22.5 | 44 | 0.844444 |
909b082c85db7f41252c1dd15a6d1058abd2c236 | 2,330 | py | Python | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | import numpy as np
from prml.dimreduction.pca import PCA
| 36.40625 | 101 | 0.529185 |
909b242da63999e1207271fb27d3b19ba2f0e8e9 | 11,492 | py | Python | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
] | null | null | null | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
] | 2 | 2016-02-27T13:43:15.000Z | 2018-07-18T19:44:45.000Z | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
] | 1 | 2017-03-05T20:44:07.000Z | 2017-03-05T20:44:07.000Z | # Authors : Alexandre Gramfort, alexandre.gramfort@telecom-paristech.fr (2011)
# Denis A. Engemann <denis.engemann@gmail.com>
# License : BSD 3-clause
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, _time_mask
from ..fixes import get_spectrogram
from .multitaper import psd_array_multitaper
def _psd_func(epoch, noverlap, n_per_seg, nfft, fs, freq_mask, func):
"""Aux function."""
return func(epoch, fs=fs, nperseg=n_per_seg, noverlap=noverlap,
nfft=nfft, window='hamming')[2][..., freq_mask, :]
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
| 38.563758 | 79 | 0.62661 |
909b464aebeffe98a01bbc3d1080af46d979ef36 | 5,690 | py | Python | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | from zope.interface import implementer
from sqlalchemy import (
Column,
String,
Integer,
Float,
ForeignKey,
CheckConstraint,
)
from sqlalchemy.orm import relationship, backref
from clld import interfaces
from clld.db.meta import Base, CustomModelMixin
from clld.db.versioned import Versioned
from clld.db.models.common import (
Contribution, Parameter, IdNameDescriptionMixin, Language
)
from clld_glottologfamily_plugin.models import HasFamilyMixin, Family
from .interfaces import IDependency, ITransition, IStability, IDeepFamily, ISupport, IHasSupport
| 38.187919 | 100 | 0.727065 |
909b5fdd491dd149598afad1dcf2d6d1cdc7dcc1 | 600 | py | Python | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
] | 2 | 2021-08-01T13:02:41.000Z | 2021-08-01T14:39:44.000Z | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
] | null | null | null | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
] | 1 | 2021-08-01T14:39:45.000Z | 2021-08-01T14:39:45.000Z | import torch
import torch.nn as nn | 31.578947 | 99 | 0.576667 |
909b69d30b3ae1f1f238868bd4ff4b5d2afdace9 | 27,662 | py | Python | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
] | 5 | 2015-06-14T22:56:10.000Z | 2017-05-29T07:59:35.000Z | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
] | 5 | 2019-03-19T13:39:26.000Z | 2020-11-03T20:01:46.000Z | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
] | null | null | null | """ Twisted adapter for Kanone """
from twisted.python.failure import Failure
from twisted.internet import defer
from ..lib import Invalid
from ..util import varargs2kwargs
import logging, sys
log = logging.getLogger( __name__ )
# hacky and redundant, but it'll do for now ..
# TODO: move to proper twisted specific classes under .tx.*
# and get rid of the monkey
_python3 = sys.version_info[0]>=3
def monkeyPatch():
"""
Patches Kanone so that any validation returns a Deferred, thus
one can write asynchronous validators using Twisted's non-blocking API.
Schema and ForEach fields are validated concurrently.
"""
if getattr( monkeyPatch,'_isMonkeyPatched',False):
return
from ..lib import Context, PASS, MISSING
from ..validator.core import Tag, Compose, Tmp, Item, Not, And, Or, Call, If
from ..validator.check import Match
from ..validator.schema import Schema, ForEach, Field
from ..validator.web import MXLookup
from twisted.names import client
from twisted.names.dns import Record_MX
from twisted.names.error import DNSNameError
from twisted.internet.defer import TimeoutError
mxLookup_resolver = client.Resolver('/etc/resolv.conf')
Context.validate = context_validate
Tag.validate = tag_validate
Compose.valdate = compose_validate
Tmp.validate = tmp_validate
Item.validate = item_validate
Not.validate = not_validate
And.validate = and_validate
Or.validate = or_validate
Call.validate = call_validate
Match.on_value = match_on_value
If.validate = if_validate
Schema._on_value = schema__on_value
Schema._createContextChildren_on_value = schema__createContextChildren_on_value
ForEach._on_value = forEach__on_value
ForEach._createContextChildren_on_value = forEach__createContextChildren_on_value
Field.validate = field_validate
MXLookup.on_value = mxLookup_on_value
monkeyPatch._isMonkeyPatched = True
from ..util import getArgSpec, getParameterNames
| 30.431243 | 132 | 0.521654 |
909bb64980267ae4a08d2d7a1f0a4d2581917497 | 1,579 | py | Python | sandbox/graph-size.py | maarten1983/khmer | 417aaa57f0659685c01887a6910de1c08d0a73e5 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T15:12:44.000Z | 2019-11-02T15:12:44.000Z | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
] | null | null | null | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import khmer
import sys
import screed
import os.path
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fasta_iter
K = 32
HASHTABLE_SIZE = int(4e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
GROUPSIZE = 100
###
if __name__ == '__main__':
main()
| 23.567164 | 76 | 0.664345 |
909c067225e930569a068986504ae450bf7106ff | 3,187 | py | Python | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 4 | 2020-11-12T19:37:06.000Z | 2021-12-14T01:38:39.000Z | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 96 | 2020-09-08T05:17:17.000Z | 2022-03-31T23:12:51.000Z | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 2 | 2021-03-03T23:02:40.000Z | 2021-06-17T23:33:05.000Z | """
Fetches demand statistics.
Modified from Dan Zhao
Original article:
https://yaledailynews.com/blog/2020/01/10/yales-most-popular-courses/
Github:
https://github.com/iamdanzhao/yale-popular-classes
README:
https://github.com/iamdanzhao/yale-popular-classes/blob/master/data-guide/course_data_guide.md
"""
import argparse
from multiprocessing import Pool
from typing import List, Tuple
import ujson
from ferry import config
from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg
from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates
from ferry.includes.tqdm import tqdm
def handle_season_subject_demand(demand_args: Tuple[str, str, List[str], List[str]]):
"""
Handler for fetching subject codes to be passed into Pool()
"""
demand_season, demand_subject_code, demand_subject_codes, demand_dates = demand_args
courses = fetch_season_subject_demand(
demand_season, demand_subject_code, demand_subject_codes, demand_dates
)
return courses
if __name__ == "__main__":
# Set season
# Pass using command line arguments
# Examples: 202001 = 2020 Spring, 201903 = 2019 Fall
# If no season is provided, the program will scrape all available seasons
parser = argparse.ArgumentParser(description="Import demand stats")
add_seasons_args(parser)
args = parser.parse_args()
# list of seasons previously from fetch_seasons.py
with open(f"{config.DATA_DIR}/demand_seasons.json", "r") as f:
all_viable_seasons = ujson.load(f)
seasons = parse_seasons_arg(args.seasons, all_viable_seasons)
print("Retrieving subjects list... ", end="")
with open(f"{config.DATA_DIR}/demand_subjects.json", "r") as f:
subjects = ujson.load(f)
subject_codes = sorted(list(subjects.keys()))
print("ok")
# set up parallel processing pool
with Pool(processes=64) as pool:
for season in seasons:
print(f"Retrieving demand by subject for season {season}")
dates = get_dates(season)
pool_args = [
(season, subject_code, subject_codes, dates)
for subject_code in subject_codes
]
season_courses = []
# use imap_unordered to report to tqdm
with tqdm(total=len(pool_args), desc="Subjects retrieved") as pbar:
for i, result in enumerate(
pool.imap_unordered(handle_season_subject_demand, pool_args)
):
pbar.update()
season_courses.append(result)
# flatten season courses
season_courses = [x for y in season_courses for x in y]
# sort courses by title (for consistency with ferry-data)
season_courses = sorted(season_courses, key=lambda x: x["title"])
with open(f"{config.DATA_DIR}/demand_stats/{season}_demand.json", "w") as f:
ujson.dump(season_courses, f, indent=4)
| 29.509259 | 94 | 0.671478 |
909dc9969f5cc018e88da564d8e3efacb5bc1be6 | 406 | py | Python | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
] | 2 | 2021-05-06T17:02:21.000Z | 2021-05-19T19:41:21.000Z | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
] | 9 | 2019-08-01T18:49:35.000Z | 2021-04-01T12:52:35.000Z | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
] | 18 | 2019-07-23T16:26:17.000Z | 2022-01-21T10:33:41.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from qxf2_scheduler import models
from qxf2_scheduler import db
from qxf2_scheduler.__init__ import app
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
migrate=Migrate(app, db,render_as_batch=True)
manager=Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
manager.run() | 25.375 | 48 | 0.830049 |
909e429cd3c93b342a1a4e97e4084847d6b07a78 | 3,476 | py | Python | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
] | 15 | 2019-07-31T11:48:28.000Z | 2022-02-25T13:55:23.000Z | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
] | null | null | null | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
] | 14 | 2019-02-20T21:44:39.000Z | 2022-02-16T11:35:27.000Z | # import packages
import requests
import pandas as pd
import time
from functions import *
# limit per sity
max_results_per_city = 100
# db of city
city_set = ['New+York','Toronto','Las+Vegas']
# job roles
job_set = ['business+analyst','data+scientist']
# file num
file = 1
# from where to skip
SKIPPER = 0
# loop on all cities
for city in city_set:
# for each job role
for job_qry in job_set:
# count
cnt = 0
startTime = time.time()
# skipper
if(file > SKIPPER):
# dataframe
df = pd.DataFrame(columns = ['unique_id', 'city', 'job_qry','job_title', 'company_name', 'location', 'summary', 'salary', 'link', 'date', 'full_text'])
# for results
for start in range(0, max_results_per_city, 10):
# get dom
page = requests.get('http://www.indeed.com/jobs?q=' + job_qry +'&l=' + str(city) + '&start=' + str(start))
#ensuring at least 1 second between page grabs
time.sleep(1)
#fetch data
soup = get_soup(page.text)
divs = soup.find_all(name="div", attrs={"class":"row"})
# if results exist
if(len(divs) == 0):
break
# for all jobs on a page
for div in divs:
#specifying row num for index of job posting in dataframe
num = (len(df) + 1)
cnt = cnt + 1
#job data after parsing
job_post = []
#append unique id
job_post.append(div['id'])
#append city name
job_post.append(city)
#append job qry
job_post.append(job_qry)
#grabbing job title
job_post.append(extract_job_title(div))
#grabbing company
job_post.append(extract_company(div))
#grabbing location name
job_post.append(extract_location(div))
#grabbing summary text
job_post.append(extract_summary(div))
#grabbing salary
job_post.append(extract_salary(div))
#grabbing link
link = extract_link(div)
job_post.append(link)
#grabbing date
job_post.append(extract_date(div))
#grabbing full_text
job_post.append(extract_fulltext(link))
#appending list of job post info to dataframe at index num
df.loc[num] = job_post
#debug add
write_logs(('Completed =>') + '\t' + city + '\t' + job_qry + '\t' + str(cnt) + '\t' + str(start) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
#saving df as a local csv file
df.to_csv('jobs_' + str(file) + '.csv', encoding='utf-8')
else:
#debug add
write_logs(('Skipped =>') + '\t' + city + '\t' + job_qry + '\t' + str(-1) + '\t' + str(-1) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
# increment file
file = file + 1
| 29.709402 | 183 | 0.467779 |
909f78a9a426fedd3532cae3c362b0e27f684e37 | 4,973 | py | Python | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
] | 5 | 2021-01-24T13:19:45.000Z | 2021-04-05T15:49:35.000Z | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
] | null | null | null | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
] | null | null | null | # Import Libraries
import numpy as np
import cv2
import argparse
import time
# Import User Libraries
import L0_helpers
# Image File Path
image_r = "images/flowers.jpg"
image_w = "out_serial.png"
# L0 minimization parameters
kappa = 2.0
_lambda = 2e-2
# Verbose output
verbose = False
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description="Serial implementation of image smoothing via L0 gradient minimization")
parser.add_argument('image_r', help="input image file")
parser.add_argument('image_w', help="output image file")
parser.add_argument('-k', type=float, default=2.0,
metavar='kappa', help='updating weight (default 2.0)')
parser.add_argument('-l', type=float, default=2e-2,
metavar='lambda', help='smoothing weight (default 2e-2)')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose logging for each iteration')
args = parser.parse_args()
L0_smooth(args.image_r, args.image_w, args.k, args.l, args.verbose)
| 26.593583 | 90 | 0.575508 |
909f8409bcfac0d98c71ec79e9110765c9b7b295 | 2,565 | py | Python | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
] | 9 | 2020-09-26T23:52:49.000Z | 2021-10-04T00:08:23.000Z | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
] | null | null | null | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
] | 2 | 2021-04-06T13:02:24.000Z | 2021-12-06T09:03:24.000Z | ### Interpretable cnn for big five personality traits using audio data ###
### This script downsamples 41000 kz signal into 4000 kz signal ###
from __future__ import absolute_import, division, print_function
import pathlib
import random
import csv
import numpy as np
from scipy.io import wavfile
import tensorflow as tf
import itertools
from scipy import stats
### functions for mapping ###
### down sample the data ###
data = []
labels = []
folder_path = '/...path/to/wav/data/folder/'
folder_path = pathlib.Path(folder_path)
files_path = list(folder_path.glob('*.wav'))
files_path = [str(path) for path in files_path]
no_of_samples = len(files_path)
### load data labels ###
with open('/...path/to/.csv/labels/file', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
data.append(row)
for i in range(len(files_path)):
file_1 = files_path[i]
file_1 = file_1.split("/")[5]
file_name_1 = file_1[:-4]
new_filename_1 = file_name_1 + '.mp4'
label_1 = []
label_2 = []
matching = [s for s in data if new_filename_1 in s]
label_1= np.delete(matching,[0],axis=1)
label_2 = label_1[0,:]
label_2 = [float(i) for i in label_2]
labels.append(label_2)
### dataset pipeline ###
ds = tf.data.Dataset.from_tensor_slices((files_path, labels))
data_ds = ds.map(get_wav)
ds = data_ds.shuffle(buffer_size=wavfiles_count)
ds = ds.repeat()
ds = ds.batch(1)
### prefetch the data batches in the background ###
ds = ds.prefetch(buffer_size=1)
iterator = ds.make_one_shot_iterator()
next_ele = iterator.get_next()
features_4k = []
labels_4k = []
with tf.Session() as sess:
for _ in range(len(files_path)):
t_features, t_labels = sess.run(next_ele)
features_4k.append(t_features)
labels_4k.append(t_labels)
np.save('.../save/path/',features_4k)
np.save('.../save/path/',labels_4k)
print('Completed')
| 29.482759 | 121 | 0.670955 |
90a1865751cb26e76fdfe2385bd5686fe05ca8bb | 1,858 | py | Python | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pprint
from abc import abstractmethod
class _ModelRepoObjectPrinter(object):
| 28.584615 | 91 | 0.697524 |
90a2c66069c33df69aa851c8c0f49466dd43d14e | 2,127 | py | Python | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
] | null | null | null | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
] | null | null | null | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for model_search.search.common."""
from absl.testing import parameterized
from model_search.search import common
import tensorflow.compat.v2 as tf
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 32.227273 | 74 | 0.649741 |
90a3029cbc5a3d0ba677696927ab7f1da401c62e | 588 | py | Python | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
] | 5 | 2016-05-13T15:23:41.000Z | 2019-05-29T08:23:25.000Z | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
] | null | null | null | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
] | 2 | 2016-06-08T08:22:42.000Z | 2019-02-25T08:46:54.000Z | import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
| 34.588235 | 72 | 0.681973 |
90a3bca5369f1537b322d1766cb9151ec9a0af0c | 3,181 | py | Python | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | from torchvision.models import resnet18
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch
import pdb
##############################
# Encoder
##############################
##############################
# Generator
##############################
##############################
# Discriminator
##############################
| 30.586538 | 106 | 0.563345 |
90a433b0faab6ec973b072f69d11760a7c0bb8ef | 3,381 | py | Python | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
] | null | null | null | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
] | null | null | null | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
] | null | null | null | from oem_framework.models.core import ModelRegistry
from oem_framework.plugin import Plugin
from oem_framework.storage import ProviderStorage
from oem_storage_file.core.base import BaseFileStorage
from oem_storage_file.database import DatabaseFileStorage
import appdirs
import os
#
# Index methods
#
#
# Item methods
#
#
# Private methods
#
| 27.266129 | 89 | 0.615794 |
90a450c6bb8a1da60bd0c096428df1ba30321115 | 1,565 | py | Python | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T09:16:32.000Z | 2020-07-22T09:16:32.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
| 30.686275 | 79 | 0.709904 |
90a4ede6bfdb471d923545a3e19b34b37a9df384 | 7,038 | py | Python | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
] | null | null | null | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
] | null | null | null | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
] | null | null | null | from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
| 34.331707 | 90 | 0.596903 |
90a5135d7b2c7cb2a555e6f77c99a227c0fdaa11 | 2,386 | py | Python | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | 1 | 2018-05-08T09:26:45.000Z | 2018-05-08T09:26:45.000Z | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | null | null | null | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | 1 | 2020-12-13T18:04:00.000Z | 2020-12-13T18:04:00.000Z | import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
| 33.605634 | 79 | 0.723386 |
90a586abf2c437f6ccd419108bdf5f296a7fed74 | 5,630 | py | Python | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
] | null | null | null | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
] | null | null | null | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
] | null | null | null | from tests.base import TestCase, main, assets
from ocrd_models.ocrd_page import (
AlternativeImageType,
PcGtsType,
PageType,
TextRegionType,
TextLineType,
WordType,
GlyphType,
parseString,
parse,
to_xml
)
simple_page = """\
<PcGts xmlns="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15 http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15/pagecontent.xsd">
<Metadata>
<Creator>OCR-D</Creator>
<Created>2016-09-20T11:09:27.041+02:00</Created>
<LastChange>2018-04-25T17:44:49.605+01:00</LastChange>
</Metadata>
<Page
imageFilename="https://github.com/OCR-D/assets/raw/master/data/kant_aufklaerung_1784/data/OCR-D-IMG/INPUT_0017.tif"
imageWidth="1457"
imageHeight="2083"
type="content">
<TextRegion type="heading" id="r_1_1" custom="readingOrder {index:0;} structure {type:heading;}">
<Coords points="113,365 919,365 919,439 113,439"/>
<TextLine id="tl_1" primaryLanguage="German" custom="readingOrder {index:0;} textStyle {offset:0; length:26;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,366 918,366 918,438 114,438"/>
<Baseline points="114,429 918,429"/>
<Word id="w_w1aab1b1b2b1b1ab1" language="German" custom="readingOrder {index:0;} textStyle {offset:0; length:11;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,368 442,368 442,437 114,437"/>
<TextEquiv conf="0.987654321">
<Unicode>Berliniche</Unicode>
</TextEquiv>
</Word>
</TextLine>
</TextRegion>
</Page>
</PcGts>
"""
# pylint: disable=protected-access
if __name__ == '__main__':
main()
| 43.307692 | 298 | 0.649556 |
90a811a1c9219aef26a6c2b2f33c1210f92378af | 643 | py | Python | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
] | 19 | 2018-04-05T23:33:33.000Z | 2022-03-24T00:18:20.000Z | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
] | 13 | 2018-01-10T23:31:11.000Z | 2018-07-20T12:55:02.000Z | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
] | 5 | 2018-02-12T05:33:19.000Z | 2019-09-21T22:43:02.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 15:35:50 2014
@author: rich
"""
import networkx as nx
# assign component IDs to graph components, id=0 is giant component
| 26.791667 | 82 | 0.659409 |
90a821eadcd600fc9ceb85786e62d6539b2c7ae3 | 9,603 | py | Python | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | 5 | 2019-12-15T09:47:02.000Z | 2022-03-16T03:18:55.000Z | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | null | null | null | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | 1 | 2021-03-27T08:11:53.000Z | 2021-03-27T08:11:53.000Z | #!/usr/bin/python
"""composes the config from user definitions."""
import argparse
import os
import users
import users.__config__
import importlib
import csv
# file indicators
IND_DELIM = "_"
USER_INDICATOR = "user" + IND_DELIM
VLAN_INDICATOR = "vlan" + IND_DELIM
AUTH_PHASE_ONE = "PEAP"
AUTH_PHASE_TWO = "MSCHAPV2"
def _get_mod(name):
"""import the module dynamically."""
return importlib.import_module("users." + name)
def _get_by_indicator(indicator):
"""get by a file type indicator."""
return [x for x in sorted(users.__all__) if x.startswith(indicator)]
def _common_call(common, method, entity):
"""make a common mod call."""
obj = entity
if common is not None and method in dir(common):
call = getattr(common, method)
if call is not None:
obj = call(obj)
return obj
def check_object(obj):
"""Check an object."""
return obj.check()
def _process(output):
"""process the composition of users."""
common_mod = None
try:
common_mod = _get_mod("common")
print("loaded common definitions...")
except Exception as e:
print("defaults only...")
vlans = None
meta = ConfigMeta()
for v_name in _get_by_indicator(VLAN_INDICATOR):
print("loading vlan..." + v_name)
for obj in _load_objs(v_name, users.__config__.VLAN):
if vlans is None:
vlans = {}
if not check_object(obj):
exit(-1)
num_str = str(obj.num)
for vk in vlans.keys():
if num_str == vlans[vk]:
print("vlan number defined multiple times...")
exit(-1)
vlans[obj.name] = num_str
if obj.initiate is not None and len(obj.initiate) > 0:
for init_to in obj.initiate:
meta.vlan_to_vlan(init_to)
if vlans is None:
raise Exception("missing required config settings...")
meta.all_vlans = vlans.keys()
store = Store()
for f_name in _get_by_indicator(USER_INDICATOR):
print("composing..." + f_name)
for obj in _load_objs(f_name, users.__config__.Assignment):
obj = _common_call(common_mod, 'ready', obj)
key = f_name.replace(USER_INDICATOR, "")
if not key.isalnum():
print("does not meet naming requirements...")
exit(-1)
vlan = obj.vlan
if vlan not in vlans:
raise Exception("no vlan defined for " + key)
store.add_vlan(vlan, vlans[vlan])
meta.vlan_user(vlan, key)
fqdn = vlan + "." + key
if not check_object(obj):
print("did not pass check...")
exit(-1)
if obj.disabled:
print("account is disabled")
continue
macs = sorted(obj.macs)
password = obj.password
bypassed = sorted(obj.bypassed())
owned = sorted(obj.owns)
# meta checks
meta.user_macs(macs)
if not obj.inherits:
meta.password(password)
meta.extra(bypassed)
meta.extra(owned)
store.add_user(fqdn, macs, password)
if obj.mab_only:
store.set_mab(fqdn)
if len(bypassed) > 0:
for m in bypassed:
store.add_mab(m, obj.bypass_vlan(m))
user_all = []
for l in [obj.macs, obj.owns, bypassed]:
user_all += list(l)
store.add_audit(fqdn, sorted(set(user_all)))
meta.verify()
# audit outputs
with open(output + "audit.csv", 'w') as f:
csv_writer = csv.writer(f, lineterminator=os.linesep)
for a in sorted(store.get_tag(store.audit)):
p = a[0].split(".")
for m in a[1]:
csv_writer.writerow([p[1], p[0], m])
# eap_users and preauth
manifest = []
with open(output + "eap_users", 'w') as f:
for u in store.get_eap_user():
f.write('"{}" {}\n\n'.format(u[0], AUTH_PHASE_ONE))
f.write('"{}" {} hash:{} [2]\n'.format(u[0], AUTH_PHASE_TWO, u[1]))
write_vlan(f, u[2])
for u in store.get_eap_mab():
up = u[0].upper()
f.write('"{}" MD5 "{}"\n'.format(up, up))
write_vlan(f, u[1])
manifest.append((u[0], u[0]))
for u in store.get_tag(store.umac):
manifest.append((u[0], u[1]))
with open(output + "manifest", 'w') as f:
for m in sorted(manifest):
f.write("{}.{}\n".format(m[0], m[1]).lower())
def write_vlan(f, vlan_id):
"""Write vlan assignment for login."""
f.write('radius_accept_attr=64:d:13\n')
f.write('radius_accept_attr=65:d:6\n')
f.write('radius_accept_attr=81:s:{}\n\n'.format(vlan_id))
def main():
"""main entry."""
success = False
try:
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
_process(args.output)
success = True
except Exception as e:
print('unable to compose')
print(str(e))
if success:
print("success")
exit(0)
else:
print("failure")
exit(1)
if __name__ == "__main__":
main()
| 30.389241 | 79 | 0.53754 |
90a9c694ad7055aeb7e214346c75ba596c28d602 | 3,673 | py | Python | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | # Import the Twython class
from twython import Twython, TwythonStreamer
import json
# import pandas as pd
import csv
import datetime
# Create a class that inherits TwythonStreamer
if __name__ == "__main__":
main()
| 33.390909 | 88 | 0.58263 |
90aa48820bf97867a9816268e697f65885c29466 | 389 | py | Python | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
import sys;
from yaml import load, dump, load_all
from cassandra_attributes import *
if __name__ == "__main__":
main()
| 25.933333 | 109 | 0.637532 |
90aa5dbc6e140871e083e339d177b3478bf2b89d | 526 | py | Python | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
] | null | null | null | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
] | null | null | null | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
] | null | null | null | import pytest
import itertools
# Cartesian product of file names and extensions
# e.g. README.txt, README.md, CHANGELOG.txt, CHANGELOG.md ...
file_extensions = ['txt', 'md']
names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE', 'CODE_OF_CONDUCT']
exempt_files = [('.'.join(x)) for x in itertools.product(names, file_extensions)]
| 35.066667 | 81 | 0.709125 |
90aa7fec2a73694bfef5aa1b7617bf2c7fb170fa | 1,570 | py | Python | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
] | 76 | 2015-01-24T19:18:31.000Z | 2021-03-11T11:17:14.000Z | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
] | 8 | 2015-01-24T18:59:57.000Z | 2017-06-07T13:07:34.000Z | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
] | 21 | 2015-01-24T18:56:52.000Z | 2021-03-10T14:33:14.000Z | import sys
import unittest
sys.path.append("../main")
from sshtransport import *
| 32.708333 | 107 | 0.658599 |
90ab146abe91415bc0bc793fedf75c04fb9406e9 | 7,357 | py | Python | activity-classification/main_scenario_baseline.py | bstollnitz/grad-school-portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
] | 2 | 2019-10-24T16:40:44.000Z | 2020-06-21T03:56:18.000Z | activity-classification/main_scenario_baseline.py | bstollnitz/portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
] | null | null | null | activity-classification/main_scenario_baseline.py | bstollnitz/portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
] | null | null | null | import random
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import utils_graph
import utils_io
import utils_nn
from feed_forward import FeedForward
from hyperparameters import Hyperparameters
from signal_data import SignalData
from signal_dataset import SignalDataset
PLOTS_FOLDER = 'plots'
USE_CUDA = torch.cuda.is_available()
def _train_ff_network(hyperparameter_dict: dict,
data: SignalData) -> Tuple[FeedForward, List, List, List, List]:
"""Trains a feed-forward network using the specified hyperparameters.
"""
# Ensure reproducibility by giving PyTorch the same seed every time we train.
torch.manual_seed(1)
# Print hyperparameters.
print(f'Hyperparameters: {hyperparameter_dict}')
# Get hyperparameters.
learning_rate = hyperparameter_dict['learning_rate']
batch_size = hyperparameter_dict['batch_size']
optimizer_str = hyperparameter_dict['optimizer']
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
full_train_labels = data.train_labels - 1
# Get generators.
signal_dataset = SignalDataset(data.train_signals, full_train_labels)
(training_generator, validation_generator) = utils_nn.get_trainval_generators(
signal_dataset, batch_size, num_workers=0, training_fraction=0.8)
# Crete feed forward network.
input_size = data.num_timesteps * data.num_components
feed_forward = FeedForward(input_size, input_size, data.num_activity_labels)
print(feed_forward)
# Parameters should be moved to GPU before constructing the optimizer.
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
feed_forward = feed_forward.to(device)
# Get optimizer.
optimizer = None
if optimizer_str == 'adam':
optimizer = torch.optim.Adam(feed_forward.parameters(), lr=learning_rate)
elif optimizer_str == 'sgd':
optimizer = torch.optim.SGD(feed_forward.parameters(), lr=learning_rate)
else:
raise Exception(f'Specified optimizer not valid: {optimizer_str}')
training_accuracy_list = []
training_loss_list = []
validation_accuracy_list = []
validation_loss_list = []
max_epochs = 10
for epoch in range(max_epochs):
print(f'Epoch {epoch}')
# Training data.
(training_accuracy, training_loss) = utils_nn.fit(feed_forward,
training_generator, optimizer, USE_CUDA)
training_accuracy_list.append(training_accuracy)
training_loss_list.append(training_loss)
# Validation data.
(validation_accuracy, validation_loss) = utils_nn.evaluate(feed_forward,
validation_generator, 'Validation', USE_CUDA)
validation_accuracy_list.append(validation_accuracy)
validation_loss_list.append(validation_loss)
return (feed_forward, training_accuracy_list, training_loss_list,
validation_accuracy_list, validation_loss_list)
def _get_ff_hyperparameters() -> Hyperparameters:
"""Returns hyperparameters used to tune the feed-forward network.
"""
# First pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.1, 0.01, 0.001],
'batch_size': [32, 64, 128],
'optimizer': ['adam', 'sgd']
})
# Best:
# optimizer: sgd, batch size: 64, learning rate: 0.1
# Second pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.05, 0.1, 0.2],
'batch_size': [16, 32, 64],
'optimizer': ['sgd']
})
# Best:
# optimizer: sgd, batch size: 16, learning rate: 0.1
return hyperparameter_values
def _tune_ff_hyperparameters(data: SignalData) -> None:
"""Classifies temporal signals using a feed-forward network.
"""
print(' Tuning hyperparameters.')
start_time = time.time()
# Hyperparameters to tune.
hyperparameter_values = _get_ff_hyperparameters()
hyperparameter_combinations = hyperparameter_values.sample_combinations()
# Create Tensorboard writer.
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
# Hyperparameter loop.
for hyperparameter_dict in hyperparameter_combinations:
(_, _, _, validation_accuracy_list, _) = _train_ff_network(
hyperparameter_dict, data)
writer.add_hparams(hyperparameter_dict,
{'hparam/signals/validation_accuracy': validation_accuracy_list[-1]})
utils_io.print_elapsed_time(start_time, time.time())
def _test_ff_network(feed_forward: FeedForward, signal_data: SignalData,
hyperparameter_dict: dict) -> Tuple[float, float]:
"""Returns accuracy and loss of specified network for specified test data
and specified hyperparameters.
"""
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
test_labels = signal_data.test_labels - 1
# Get test generator.
batch_size = hyperparameter_dict['batch_size']
test_data = SignalDataset(signal_data.test_signals, test_labels)
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 0}
test_generator = data.DataLoader(test_data, **params)
(test_avg_accuracy, test_avg_loss) = utils_nn.evaluate(feed_forward,
test_generator, 'Test', USE_CUDA)
return (test_avg_accuracy, test_avg_loss)
def _test_best_ff_hyperparameters(data: SignalDataset) -> None:
"""Use network with best hyperparameters to predict labels for test data.
Produces accuracy and loss graphs for training and validation data, as
well as accuracy and loss values for test data.
"""
hyperparameter_dict = {
'learning_rate': 0.1,
'batch_size': 16,
'optimizer': 'sgd',
}
(feed_forward, training_accuracy_list,
training_loss_list,
validation_accuracy_list,
validation_loss_list) = _train_ff_network(hyperparameter_dict,
data)
utils_graph.graph_nn_results(training_accuracy_list, validation_accuracy_list,
f'Training and validation accuracy of classification of temporal signals',
'Accuracy', PLOTS_FOLDER, f'signals_accuracy.html')
utils_graph.graph_nn_results(training_loss_list, validation_loss_list,
f'Training and validation loss of classification of temporal signals',
'Loss', PLOTS_FOLDER, f'signals_loss.html')
_test_ff_network(feed_forward, data, hyperparameter_dict)
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
num_epochs_train_val = len(training_accuracy_list)
for i in range(num_epochs_train_val):
writer.add_scalars(f'signals/accuracy', {
'training': training_accuracy_list[i],
'validation': validation_accuracy_list[i]
}, i)
writer.add_scalars(f'signals/loss', {
'training': training_loss_list[i],
'validation': validation_loss_list[i]
}, i)
# Test accuracy: 87.25%
# Test loss: 0.45
def scenario1(data: SignalData) -> None:
"""Uses a simple feed forward network to classify the raw signal.
"""
print('Scenario 1: feed forward network on raw signal')
# _tune_ff_hyperparameters(data)
_test_best_ff_hyperparameters(data) | 35.887805 | 85 | 0.703819 |
90ab4c6f6273b660fe6334ebc9b6fb8fce97ce8e | 868 | py | Python | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | input = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
count = 0
for i in input.strip().split("\n\n"):
if validate(i):
count += 1
print(count) | 25.529412 | 122 | 0.615207 |
90ad0d873a774414aef935d258105887a6980e80 | 3,322 | py | Python | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
] | null | null | null | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
] | null | null | null | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
] | 1 | 2021-06-24T10:21:43.000Z | 2021-06-24T10:21:43.000Z | import os.path as osp
from unittest import TestCase
import pytest
from flit_core.common import (
Module, get_info_from_module, InvalidVersion, NoVersionError, check_version,
normalize_file_permissions, Metadata
)
samples_dir = osp.join(osp.dirname(__file__), 'samples')
| 35.340426 | 112 | 0.599639 |
90af463579adb14e899b746a24caf95a35d80b1b | 3,017 | py | Python | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | import datetime
import logging
from typing import Optional
from betfairlightweight.resources.bettingresources import MarketBook, MarketCatalogue
from .blotter import Blotter
from ..events import events
logger = logging.getLogger(__name__)
| 33.153846 | 88 | 0.670534 |
90b067d91d1317f4e26b80f4ccf8b819d42bc981 | 206 | py | Python | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
] | 1 | 2021-12-14T17:08:24.000Z | 2021-12-14T17:08:24.000Z | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
] | null | null | null | import pytest
from fastapi.testclient import TestClient
from {{cookiecutter.project_name}}.app import app
| 18.727273 | 49 | 0.757282 |
90b264bddefd9c5d8b81c5073da1b99d48704da6 | 2,228 | py | Python | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | 9 | 2020-01-02T23:07:36.000Z | 2022-01-26T10:04:04.000Z | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | null | null | null | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | 1 | 2020-01-02T23:08:26.000Z | 2020-01-02T23:08:26.000Z |
import itertools
import multiprocessing
import json
import numpy as np
from tqdm import tqdm
from lp_generators.features import coeff_features, solution_features
from lp_generators.performance import clp_simplex_performance
from search_operators import lp_column_neighbour, lp_row_neighbour
from seeds import cli_seeds
from search_common import condition, objective, start_instance
run()
| 33.253731 | 81 | 0.685817 |
90b42e2cf853da75296b6d0c2d2e8e3942e4a7bb | 1,066 | py | Python | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
] | 4 | 2018-06-05T14:07:56.000Z | 2021-04-17T12:09:23.000Z | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
] | 1 | 2018-07-10T19:53:54.000Z | 2018-07-10T19:58:38.000Z | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
] | 2 | 2020-01-29T10:51:11.000Z | 2021-11-11T21:37:24.000Z | import unittest
from iterable_collections import collect
| 29.611111 | 68 | 0.605066 |
90b5ceb756a46b298c1cfb2d69501dea6821b502 | 8,354 | py | Python | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
] | 1 | 2021-04-12T16:07:42.000Z | 2021-04-12T16:07:42.000Z | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
] | null | null | null | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
] | 1 | 2021-04-12T16:07:45.000Z | 2021-04-12T16:07:45.000Z | from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Variable
from parcels import AdvectionRK4, AdvectionEE, AdvectionRK45
from argparse import ArgumentParser
import numpy as np
import math # NOQA
import pytest
from datetime import timedelta as delta
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def peninsula_fieldset(xdim, ydim, mesh='flat'):
"""Construct a fieldset encapsulating the flow field around an
idealised peninsula.
:param xdim: Horizontal dimension of the generated fieldset
:param xdim: Vertical dimension of the generated fieldset
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical: Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat (default): No conversion, lat/lon are assumed to be in m.
The original test description can be found in Fig. 2.2.3 in:
North, E. W., Gallego, A., Petitgas, P. (Eds). 2009. Manual of
recommended practices for modelling physical - biological
interactions during fish early life.
ICES Cooperative Research Report No. 295. 111 pp.
http://archimer.ifremer.fr/doc/00157/26792/24888.pdf
To avoid accuracy problems with interpolation from A-grid
to C-grid, we return NetCDF files that are on an A-grid.
"""
# Set Parcels FieldSet variables
# Generate the original test setup on A-grid in m
domainsizeX, domainsizeY = (1.e5, 5.e4)
dx, dy = domainsizeX / xdim, domainsizeY / ydim
La = np.linspace(dx, 1.e5-dx, xdim, dtype=np.float32)
Wa = np.linspace(dy, 5.e4-dy, ydim, dtype=np.float32)
u0 = 1
x0 = domainsizeX / 2
R = 0.32 * domainsizeX / 2
# Create the fields
x, y = np.meshgrid(La, Wa, sparse=True, indexing='xy')
P = (u0*R**2*y/((x-x0)**2+y**2)-u0*y) / 1e3
U = u0-u0*R**2*((x-x0)**2-y**2)/(((x-x0)**2+y**2)**2)
V = -2*u0*R**2*((x-x0)*y)/(((x-x0)**2+y**2)**2)
# Set land points to NaN
landpoints = P >= 0.
P[landpoints] = np.nan
U[landpoints] = np.nan
V[landpoints] = np.nan
# Convert from m to lat/lon for spherical meshes
lon = La / 1852. / 60. if mesh == 'spherical' else La
lat = Wa / 1852. / 60. if mesh == 'spherical' else Wa
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
return FieldSet.from_data(data, dimensions, mesh=mesh)
else:
x = 3. * (1. / 1.852 / 60) # 3 km offset from boundary
y = (fieldset.U.lat[0] + x, fieldset.U.lat[-1] - x) # latitude range, including offsets
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle,
start=(x, y[0]), finish=(x, y[1]), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Advect the particles for 24h
time = delta(hours=24)
dt = delta(minutes=5)
k_adv = pset.Kernel(method)
k_p = pset.Kernel(UpdateP)
out = pset.ParticleFile(name="MyParticle", outputdt=delta(hours=1)) if output else None
print("Peninsula: Advecting %d particles for %s" % (npart, str(time)))
pset.execute(k_adv + k_p, runtime=time, dt=dt, output_file=out)
if verbose:
print("Final particle positions:\n%s" % pset)
return pset
def fieldsetfile(mesh):
"""Generate fieldset files for peninsula test"""
filename = 'peninsula'
fieldset = peninsula_fieldset(100, 50, mesh=mesh)
fieldset.write(filename)
return filename
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing RK4 computation')
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
p.add_argument('-d', '--degree', type=int, default=1,
help='Degree of spatial interpolation')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-o', '--nooutput', action='store_true', default=False,
help='Suppress trajectory output')
p.add_argument('--profiling', action='store_true', default=False,
help='Print profiling information after run')
p.add_argument('-f', '--fieldset', type=int, nargs=2, default=None,
help='Generate fieldset file with given dimensions')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
args = p.parse_args()
if args.fieldset is not None:
filename = 'peninsula'
fieldset = peninsula_fieldset(args.fieldset[0], args.fieldset[1], mesh='flat')
fieldset.write(filename)
# Open fieldset file set
fieldset = FieldSet.from_parcels('peninsula', extra_fields={'P': 'P'}, allow_time_extrapolation=True)
if args.profiling:
from cProfile import runctx
from pstats import Stats
runctx("pensinsula_example(fieldset, args.particles, mode=args.mode,\
degree=args.degree, verbose=args.verbose,\
output=not args.nooutput, method=method[args.method])",
globals(), locals(), "Profile.prof")
Stats("Profile.prof").strip_dirs().sort_stats("time").print_stats(10)
else:
pensinsula_example(fieldset, args.particles, mode=args.mode,
degree=args.degree, verbose=args.verbose,
output=not args.nooutput, method=method[args.method])
| 43.061856 | 112 | 0.649988 |
90b614eb6ed41d954f776b1b26da34eda803102b | 456 | py | Python | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
] | null | null | null | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
] | null | null | null | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
] | null | null | null | from httprunner import HttpRunner
import time
kwargs = {
"failfast":False,
#"dot_env_path": "/path/to/.env"
}
runner = HttpRunner(**kwargs)
#
runner.run("/Users/wangjianqing/PycharmProjects/HttpRunner-master/tests/testcases/Release/-.yml")
runner.gen_html_report(html_report_name="reportTestForBetaYunZS",html_report_template="/Users/wangjianqing/PycharmProjects/HttpRunner-master/httprunner/templates/default_report_template.html")
| 26.823529 | 192 | 0.800439 |
90b636cded4c580440a67538e3ed1bce323607f4 | 2,186 | py | Python | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | '''
Manage sensitivity classification recommendations.
'''
from ...... pyaz_utils import _call_az
def list(name, resource_group, workspace_name, filter=None, included_disabled=None, skip_token=None):
'''
List the recommended sensitivity classifications of a given SQL pool.
Required Parameters:
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
Optional Parameters:
- filter -- An OData filter expression that filters elements in the collection.
- included_disabled -- Indicates whether the result should include disabled recommendations
- skip_token -- An OData query option to indicate how many elements to skip in the collection.
'''
return _call_az("az synapse sql pool classification recommendation list", locals())
def enable(column, name, resource_group, schema, table, workspace_name):
'''
Enable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation enable", locals())
def disable(column, name, resource_group, schema, table, workspace_name):
'''
Disable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation disable", locals())
| 42.038462 | 128 | 0.718664 |
90b74a470408ddeb782e48bf20e39ffd4457275e | 1,755 | py | Python | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
] | null | null | null | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
] | null | null | null | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
] | null | null | null | """ Testing array utilities
"""
import sys
import numpy as np
from ..arrfuncs import as_native_array, pinv, eigh
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>'
SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<'
| 29.745763 | 77 | 0.616524 |
90b769e3d5d7b99ed6ee9f9dfa67655328ca1e58 | 1,571 | py | Python | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | 1 | 2017-09-09T13:53:09.000Z | 2017-09-09T13:53:09.000Z | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | null | null | null | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import sys
import math
#HOMEMADE WITHOUT ONLINE CODE by Aris
#LIENCE BY ARIS
| 29.092593 | 84 | 0.57352 |
90b7ba0980ae3d667866aa6f68a2acda5b4f0621 | 1,895 | py | Python | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 3 | 2018-07-09T12:15:46.000Z | 2020-12-03T07:02:23.000Z | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 1 | 2019-05-09T21:57:20.000Z | 2019-05-09T21:57:20.000Z | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 2 | 2018-07-23T12:49:21.000Z | 2021-06-03T11:00:44.000Z | """Rail network map
"""
import os
import sys
from collections import OrderedDict
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from vtra.utils import *
if __name__ == '__main__':
main()
| 28.712121 | 77 | 0.604749 |
90b801d343545a11009f0b5ecc8dd2af2c9f92ca | 3,189 | py | Python | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-27 20:02
from django.db import migrations, models
import django.db.models.deletion
| 41.415584 | 123 | 0.54688 |
90b9151bc28db99fb5989633cea86f3faad362ff | 4,471 | py | Python | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
] | null | null | null | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
] | null | null | null | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import os
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from .. import Pydlspec2dException
from ..spec1d import (HMF, findspec, spec_append, spec_path, template_metadata,
wavevector)
| 38.543103 | 79 | 0.47193 |
90b979db4f0ee9199884997c5ba3cb24bb11e60e | 7,800 | py | Python | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
] | null | null | null | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
] | null | null | null | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
from absl import flags
import numpy as np
import skimage.io as io
import cv2
import matplotlib.pyplot as plt
# import tensorflow as tf
# from psbody.mesh import Mesh
from smpl_webuser.serialization import load_model
import pyrender
import trimesh
from util import renderer as vis_util
from util import image as img_util
from flame import FLAME
from flame_config import get_config
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
import MyRingnet
# Input size: 2048 + 159, fc1_size: 512, fc2_size: 512, out_size: 159
config_img_size = 244
if __name__ == '__main__':
# read images and scale
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180424_03335_TA/multiview_neutral/IMG_0101.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180704_03355_TA/multiview_expressions/IMG_1948.jpg"
input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180427_03338_TA/multiview_expressions/IMG_0230.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180502_00145_TA/multiview_expressions/IMG_0407.jpg"
openpose = np.load(input_img_path.replace("iphone_pictures", "openpose").replace("jpg", "npy"), allow_pickle=True, encoding='latin1')
img = io.imread(input_img_path)
if np.max(img.shape[:2]) != config_img_size:
# print('Resizing so the max image size is %d..' % self.config_img_size)
scale = (float(config_img_size) / np.max(img.shape[:2]))
else:
scale = 1.0#scaling_factor
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
crop, proc_param = img_util.scale_and_crop(
img, scale, center, config_img_size)
print(proc_param)
#exit(0)
crop = torch.tensor(crop)
crop = crop.permute(2, 0, 1)
crop = crop[None, :, :, :].float().cuda()
# print(crop)
# build model
resnet50 = torch.load("./good_resnet50.pkl")
resnet50.cuda()
resnet50.fc = Identity()
# print(resnet50)
regression = torch.load("./good_model.pkl")
regression.cuda()
config = get_config()
config.batch_size = 1
flamelayer = FLAME(config)
flamelayer.requires_grad_ = False
flamelayer.cuda()
# run the model
res_output = resnet50(crop)
# Empty estimates as the initial value for concatenation
regress_estimates = torch.zeros([ res_output.shape[0], MyRingnet.regress_out_size ]).cuda()
# Regression model
for _ in range(MyRingnet.regress_iteration_cnt):
# Preprocess regression input - concatenation
regress_input = torch.cat([res_output, regress_estimates], 1)
regress_estimates = regression(regress_input)
regress_output = regress_estimates
# FLAME model
cam_params, pose_params = regress_output[0:, 0:3], regress_output[0:, 3:9]
shape_params, exp_params = regress_output[0:, 9:109], regress_output[0:, 109:159]
# pose_params[0,2] = 3.14/5
flame_vert, flame_lmk = flamelayer(shape_params, exp_params, pose_params)
# Render and display the mesh
print(flame_lmk, cam_params)
# flame_lmk[0]=cam_params[0]*-1
# a_params = cam_params[:,:]*-1
mesh_vertices, mesh_faces = flame_vert.detach().cpu().numpy().squeeze(), flamelayer.faces
mesh_vertices_colors = np.ones([mesh_vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]
renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, flame_lmk.detach().cpu().numpy().squeeze())
#renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, cam_params[0])
# flame_lmk[:, :, 1] *= -1
# cam_params[:,1]*=-1
# cam_params[:, 0] = 2
# cam_params[:, 1] = 0.2
# print(flame_lmk)
center = torch.tensor(center.copy()).cuda()
print(cam_params)
new_cam = MyRingnet.transform_cam(cam_params, 1. / scale, config_img_size, center[None, :])
projected_lmks = MyRingnet.project_points(flame_lmk, new_cam)
#op_pts = openpose[0,:68,:]
#ground_truth_weights = ((op_pts[:,2] > 0.41).astype(float))
#print(ground_truth_weights)
#print(op_pts)
# print(projected_lmks)
# print(openpose)
plt.figure
plt.imshow(img)
count = 0
cpu_lmks = projected_lmks.cpu()
#print(img.shape)
for i in cpu_lmks[0]:
x = i[0].int()
y = i[1].int()
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='red', marker='o')
count = count + 1
count = 0
#openpose[0] *= scale
for i in openpose[0]:
x = i[0]
y = i[1]
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='blue', marker='o')
count = count + 1
plt.show()
renderer = vis_util.SMPLRenderer(faces=mesh_faces)
print(img.shape[:2])
cam_for_render, vert_shifted = vis_util.get_original(
#proc_param, mesh_vertices, new_cam.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
proc_param, mesh_vertices, cam_params.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
)
print(cam_params, new_cam, cam_for_render)
#exit(0)
# rend_img_overlay = renderer(
# #vert_shifted * 1.0, cam=new_cam.squeeze().detach().cpu().numpy(), img=img, do_alpha=True
# #vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# )
rend_img_vp1 = renderer.rotated(
mesh_vertices, 30, cam=new_cam.squeeze().detach().cpu().numpy(), img_size=img.shape[:2]
#vert_shifted * 1.0, 30, cam=cam_for_render, img_size=img.shape[:2]
)
plt.imshow(rend_img_vp1)
plt.show()
| 34.513274 | 145 | 0.680385 |
90b9c9ce2f3208b12b35d5e78f9d7d9be8454378 | 92 | py | Python | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
] | 2 | 2021-11-19T10:40:07.000Z | 2022-02-28T16:39:49.000Z | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
] | null | null | null | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
os.system("sudo ./scan.py")
os.system("sudo ./enable-wifi.py")
| 15.333333 | 34 | 0.673913 |
90b9ca60618e207e4f11df9555b71806b45d69af | 1,538 | py | Python | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
] | 14 | 2021-08-24T12:36:37.000Z | 2022-03-18T12:14:36.000Z | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
] | null | null | null | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
] | 1 | 2021-10-21T20:22:55.000Z | 2021-10-21T20:22:55.000Z | from sklearn.ensemble import RandomForestClassifier
import xgboost
| 27.963636 | 71 | 0.683355 |
90ba1f62b3ac0c6dc5b223b48142b7f90d52dc27 | 4,958 | py | Python | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
] | null | null | null | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
] | null | null | null | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
] | null | null | null | from keras.optimizers import RMSprop
from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU
from keras.layers import concatenate, Reshape, SpatialDropout1D
from keras.models import Model
from keras import backend as K
from .AttentionWeightedAverage import AttentionWeightedAverage
def textgenrnn_model(num_classes, cfg, context_size=None,
weights_path=None,
dropout=0.0,
optimizer=RMSprop(lr=4e-3, rho=0.99)):
'''
Builds the model architecture for textgenrnn and
loads the specified weights for the model.
'''
input = Input(shape=(cfg['max_length'],), name='input')
embedded = Embedding(num_classes, cfg['dim_embeddings'],
input_length=cfg['max_length'],
name='embedding')(input)
if dropout > 0.0:
embedded = SpatialDropout1D(dropout, name='dropout')(embedded)
rnn_layer_list = []
for i in range(cfg['rnn_layers']):
prev_layer = embedded if i == 0 else rnn_layer_list[-1]
if cfg.get('rnn_type') == 'gru':
rnn_layer_list.append(new_rnn_gru(cfg, i + 1)(prev_layer))
else:
rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))
seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
attention = AttentionWeightedAverage(name='attention')(seq_concat)
output = Dense(num_classes, name='output', activation='softmax')(attention)
if context_size is None:
model = Model(inputs=[input], outputs=[output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
else:
context_input = Input(
shape=(context_size,), name='context_input')
context_reshape = Reshape((context_size,),
name='context_reshape')(context_input)
merged = concatenate([attention, context_reshape], name='concat')
main_output = Dense(num_classes, name='context_output',
activation='softmax')(merged)
model = Model(inputs=[input, context_input],
outputs=[main_output, output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
loss_weights=[0.8, 0.2])
return model
'''
Create a new LSTM layer per parameters. Unfortunately,
each combination of parameters must be hardcoded.
The normal LSTMs use sigmoid recurrent activations
for parity with CuDNNLSTM:
https://github.com/keras-team/keras/issues/8860
'''
| 40.308943 | 103 | 0.583905 |
90bbeed86ea6726d8cf4682e4d77c05a1d88ab5a | 121,331 | py | Python | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
] | 38 | 2015-11-30T10:11:42.000Z | 2022-02-10T18:31:44.000Z | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
] | 143 | 2015-12-10T19:00:42.000Z | 2020-08-20T13:51:42.000Z | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
] | 15 | 2015-12-14T23:03:30.000Z | 2019-01-15T19:35:45.000Z | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, has_length, equal_to, is_, none, empty
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman.adapters.switches import brocade_factory_ssh, brocade_factory_telnet
from netman.adapters.switches.brocade import Brocade, parse_if_ranges
from netman.adapters.switches.util import SubShell
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownVlan, UnknownIP, UnknownAccessGroup, BadVlanNumber, \
BadVlanName, UnknownInterface, TrunkVlanNotSet, UnknownVrf, VlanVrfNotSet, VrrpAlreadyExistsForVlan, BadVrrpPriorityNumber, BadVrrpGroupNumber, \
BadVrrpTimers, BadVrrpTracking, NoIpOnVlanForVrrp, VrrpDoesNotExistForVlan, UnknownDhcpRelayServer, DhcpRelayServerAlreadyExists, \
VlanAlreadyExist, InvalidAccessGroupName, IPAlreadySet
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK
from netman.core.objects.switch_descriptor import SwitchDescriptor
def vlan_with_vif_display(vlan_id, vif_id, name="[None]"):
return vlan_display(vlan_id, name, vif_id=vif_id)
def vlan_display(vlan_id=9, vlan_name="[None]", tagged_port_str=None, untagged_port_str=None, vif_id=None):
ret = [
"PORT-VLAN {}, Name {}, Priority Level -, Priority Force 0, Creation Type STATIC".format(vlan_id, vlan_name),
"Topo HW idx : 81 Topo SW idx: 257 Topo next vlan: 0",
"L2 protocols : STP",
]
if untagged_port_str:
ret.append("Untagged Ports : {}".format(untagged_port_str))
if tagged_port_str:
ret.append("Statically tagged Ports : {}".format(tagged_port_str))
ret.extend([
"Associated Virtual Interface Id: {}".format(vif_id or "NONE"),
"----------------------------------------------------------",
"No ports associated with VLAN",
"Arp Inspection: 0",
"DHCP Snooping: 0",
"IPv4 Multicast Snooping: Disabled",
"IPv6 Multicast Snooping: Disabled",
])
if vif_id:
ret.extend([
"Ve{} is down, line protocol is down".format(vif_id),
" Type is Vlan (Vlan Id: {})".format(vlan_id),
" Hardware is Virtual Ethernet, address is 748e.f8a7.1b01 (bia 748e.f8a7.1b01)",
" No port name",
" Vlan id: {}".format(vlan_id),
" Internet address is 0.0.0.0/0, IP MTU 1500 bytes, encapsulation ethernet",
" Configured BW 0 kbps",
])
else:
ret.append("No Virtual Interfaces configured for this vlan")
return ret
| 52.365559 | 170 | 0.645779 |
90bd59aae81c9889080df91dbd28e4a9b304ffd9 | 1,384 | py | Python | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 36 | 2019-02-22T23:07:14.000Z | 2022-02-10T13:24:27.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 717 | 2019-02-21T22:07:55.000Z | 2022-02-26T15:17:49.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 19 | 2019-04-14T14:37:56.000Z | 2022-02-14T22:05:16.000Z | import uuid
from authtools import models as authtools_models
from django.core.validators import URLValidator
from django.db import models
from django.utils import timezone
from solo.models import SingletonModel
| 28.833333 | 85 | 0.710983 |
90bee561f7ee7014b2253c39a50c061487d0ec34 | 2,106 | py | Python | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 96 | 2018-08-23T16:49:05.000Z | 2021-11-25T00:47:16.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 1,011 | 2018-08-17T12:25:21.000Z | 2021-11-18T09:30:19.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 65 | 2018-08-20T20:05:40.000Z | 2022-02-26T23:54:35.000Z | import numpy as np
types = ["int", "float", "double"]
rngs = {"int": randi, "float": np.random.randn, "double": np.random.randn}
embodiments = {
"function": "R.%s(A,B).AllClose(C)",
"op": "(A %s B).AllClose(C)",
"inline_op": "(R = A, R %s B).AllClose(C)",
"inline_function": "( R = A, R.%s(B) ).AllClose(C)"
}
tests = {
'+': ("Addition", "Add", [], []),
'*': ("Multiplication", "Multiply", [], []),
'-': ("Subtraction", "Subtract", [], []),
'/': ("Division", "Divide", ["int"], []),
'dp': ("Dot product", "Dot", [], ["op", "inline_op"])
}
for type in types:
rng = rngs[type]
for op, details in tests.iteritems():
test_title, function, exclude, ignore = details
if type in exclude:
break
iop = op + "="
ifunction = "Inline" + function
names = {
"function": function,
"op": op,
"inline_op": iop,
"inline_function": ifunction
}
n = 7
m = 7
A = rng(n, m)
B = rng(n, m)
if op == "+":
C = A + B
elif op == "/":
C = A / B
elif op == "-":
C = A - B
elif op == "*":
C = A * B
elif op == "dp":
C = np.dot(A, B)
m1 = " ;\n".join([" ".join([str(y) for y in x]) for x in A])
m2 = " ;\n".join([" ".join([str(y) for y in x]) for x in B])
m3 = " ;\n".join([" ".join([str(y) for y in x]) for x in C])
print """
SCENARIO("%s") {
_M<%s> A,B,C,R;
R.Resize( %d, %d );
A = _M<%s>(R\"(\n%s\n)\");
B = _M<%s>(R\"(\n%s\n)\");
C = _M<%s>(R\"(\n%s\n)\");
""" % (test_title + " for " + type, type, n, m, type, m1, type, m2, type, m3)
for method, emb in embodiments.iteritems():
if method in ignore:
continue
name = names[method]
tt = emb % name
print "EXPECT( %s );" % tt
print "};"
print
| 25.071429 | 85 | 0.417854 |
90c01fddb271dd8ab9c578d5f65f7244cd0b0416 | 2,824 | py | Python | Lab 2/utils/inference_utils.py | davedecoder/aws-deepcomposer-samples | 34f94a04436dc3fa0ded8c353e0f3260f1b3305e | [
"MIT-0"
] | 6 | 2021-10-11T12:39:01.000Z | 2022-03-27T16:01:41.000Z | notebooks/AWS DeepComposer/reinvent-labs/lab-2/utils/inference_utils.py | jesussantana/AWS-Machine-Learning-Foundations | 526eddb486fe8398cafcc30184c4ecce49df5816 | [
"MIT"
] | null | null | null | notebooks/AWS DeepComposer/reinvent-labs/lab-2/utils/inference_utils.py | jesussantana/AWS-Machine-Learning-Foundations | 526eddb486fe8398cafcc30184c4ecce49df5816 | [
"MIT"
] | 5 | 2020-05-16T13:06:52.000Z | 2020-11-14T11:56:26.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
import numpy as np
from utils import path_utils, midi_utils, display_utils
# --- local samples------------------------------------------------------------------
def load_melody_samples(n_sample=10):
"""Load the samples used for evaluation."""
sample_source_path = './dataset/eval.npy'
data = np.load(sample_source_path)
data = np.asarray(data, dtype=np.float32) # {-1, 1}
random_idx = np.random.choice(len(data), n_sample, replace=False)
sample_x = data[random_idx]
sample_z = tf.random.truncated_normal((n_sample, 2, 8, 512))
print("Loaded {} melody samples".format(len(sample_x)))
return sample_x, sample_z
# --- Training ------------------------------------------------------------------
| 46.295082 | 127 | 0.725921 |
90c06ceec71cc460139a2abcafcd42b40b0a56a8 | 315 | py | Python | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | from enum import Enum
from sys import stderr | 22.5 | 44 | 0.714286 |
90c0801975d3d3c99714cb7e0cfc32ffb8ce7205 | 251 | py | Python | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
] | null | null | null | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
] | 10 | 2021-04-04T19:07:41.000Z | 2022-03-12T00:54:50.000Z | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Image
| 35.857143 | 131 | 0.776892 |